hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72f43ba26b309b9c8f2aba4f79737b3f52bf7d2 | 931 | py | Python | tasks.py | glyph/cryptography | 43cf688e885668198bc966b1cf3a4a425a60f1a6 | [
"Apache-2.0"
] | null | null | null | tasks.py | glyph/cryptography | 43cf688e885668198bc966b1cf3a4a425a60f1a6 | [
"Apache-2.0"
] | null | null | null | tasks.py | glyph/cryptography | 43cf688e885668198bc966b1cf3a4a425a60f1a6 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import invoke
@invoke.task
def release(version):
"""
``version`` should be a string like '0.4' or '1.0'.
"""
invoke.run("git tag -s {0}".format(version))
invoke.run("git push --tags")
invoke.run("python setup.py sdist")
invoke.run("twine upload -s dist/cryptography-{0}*".format(version))
| 33.25 | 72 | 0.721805 |
from __future__ import absolute_import, division, print_function
import invoke
@invoke.task
def release(version):
invoke.run("git tag -s {0}".format(version))
invoke.run("git push --tags")
invoke.run("python setup.py sdist")
invoke.run("twine upload -s dist/cryptography-{0}*".format(version))
| true | true |
f72f446798e695312ccfcdbcfe2f24b2c49bcbdc | 47,910 | py | Python | desktop/core/ext-py/SQLAlchemy-1.3.17/test/sql/test_defaults.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/SQLAlchemy-1.3.17/test/sql/test_defaults.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/SQLAlchemy-1.3.17/test/sql/test_defaults.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | import datetime
import itertools
import sqlalchemy as sa
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import DateTime
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import MetaData
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Unicode
from sqlalchemy.schema import CreateTable
from sqlalchemy.sql import literal_column
from sqlalchemy.sql import select
from sqlalchemy.sql import text
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.types import TypeDecorator
from sqlalchemy.types import TypeEngine
from sqlalchemy.util import b
from sqlalchemy.util import u
class DDLTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_string(self):
# note: that the datatype is an Integer here doesn't matter,
# the server_default is interpreted independently of the
# column's datatype.
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default="5"))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT '5')"
)
def test_string_w_quotes(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default="5'6"))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT '5''6')"
)
def test_text(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default=text("5 + 8")))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 5 + 8)"
)
def test_text_w_quotes(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default=text("5 ' 8")))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 5 ' 8)"
)
def test_literal_binds_w_quotes(self):
m = MetaData()
t = Table(
"t", m, Column("x", Integer, server_default=literal("5 ' 8"))
)
self.assert_compile(
CreateTable(t), """CREATE TABLE t (x INTEGER DEFAULT '5 '' 8')"""
)
def test_text_literal_binds(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"x", Integer, server_default=text("q + :x1").bindparams(x1=7)
),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT q + 7)"
)
def test_sqlexpr(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"x",
Integer,
server_default=literal_column("a") + literal_column("b"),
),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT a + b)"
)
def test_literal_binds_plain(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, server_default=literal("a") + literal("b")),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 'a' || 'b')"
)
def test_literal_binds_pgarray(self):
from sqlalchemy.dialects.postgresql import ARRAY, array
m = MetaData()
t = Table(
"t",
m,
Column("x", ARRAY(Integer), server_default=array([1, 2, 3])),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE t (x INTEGER[] DEFAULT ARRAY[1, 2, 3])",
dialect="postgresql",
)
class DefaultObjectTest(fixtures.TestBase):
def test_bad_arg_signature(self):
ex_msg = (
"ColumnDefault Python function takes zero "
"or one positional arguments"
)
def fn1(x, y):
pass
def fn2(x, y, z=3):
pass
class fn3(object):
def __init__(self, x, y):
pass
class FN4(object):
def __call__(self, x, y):
pass
fn4 = FN4()
for fn in fn1, fn2, fn3, fn4:
assert_raises_message(
sa.exc.ArgumentError, ex_msg, sa.ColumnDefault, fn
)
def test_arg_signature(self):
def fn1():
pass
def fn2():
pass
def fn3(x=1):
eq_(x, 1)
def fn4(x=1, y=2, z=3):
eq_(x, 1)
fn5 = list
class fn6a(object):
def __init__(self, x):
eq_(x, "context")
class fn6b(object):
def __init__(self, x, y=3):
eq_(x, "context")
class FN7(object):
def __call__(self, x):
eq_(x, "context")
fn7 = FN7()
class FN8(object):
def __call__(self, x, y=3):
eq_(x, "context")
fn8 = FN8()
for fn in fn1, fn2, fn3, fn4, fn5, fn6a, fn6b, fn7, fn8:
c = sa.ColumnDefault(fn)
c.arg("context")
def _check_default_slots(self, tbl, name, *wanted):
slots = [
"default",
"onupdate",
"server_default",
"server_onupdate",
]
col = tbl.c[name]
for slot in wanted:
slots.remove(slot)
assert getattr(col, slot) is not None, getattr(col, slot)
for slot in slots:
assert getattr(col, slot) is None, getattr(col, slot)
def test_py_vs_server_default_detection_one(self):
has_ = self._check_default_slots
metadata = MetaData()
tbl = Table(
"default_test",
metadata,
# python function
Column("col1", Integer, primary_key=True, default="1"),
# python literal
Column(
"col2",
String(20),
default="imthedefault",
onupdate="im the update",
),
# preexecute expression
Column(
"col3",
Integer,
default=func.length("abcdef"),
onupdate=func.length("abcdefghijk"),
),
# SQL-side default from sql expression
Column("col4", Integer, server_default="1"),
# SQL-side default from literal expression
Column("col5", Integer, server_default="1"),
# preexecute + update timestamp
Column(
"col6",
sa.Date,
default=datetime.datetime.today,
onupdate=datetime.datetime.today,
),
Column("boolcol1", sa.Boolean, default=True),
Column("boolcol2", sa.Boolean, default=False),
# python function which uses ExecutionContext
Column("col7", Integer, default=lambda: 5, onupdate=lambda: 10,),
# python builtin
Column(
"col8",
sa.Date,
default=datetime.date.today,
onupdate=datetime.date.today,
),
Column("col9", String(20), default="py", server_default="ddl"),
)
has_(tbl, "col1", "default")
has_(tbl, "col2", "default", "onupdate")
has_(tbl, "col3", "default", "onupdate")
has_(tbl, "col4", "server_default")
has_(tbl, "col5", "server_default")
has_(tbl, "col6", "default", "onupdate")
has_(tbl, "boolcol1", "default")
has_(tbl, "boolcol2", "default")
has_(tbl, "col7", "default", "onupdate")
has_(tbl, "col8", "default", "onupdate")
has_(tbl, "col9", "default", "server_default")
def test_py_vs_server_default_detection_two(self):
has_ = self._check_default_slots
metadata = MetaData()
ColumnDefault, DefaultClause = sa.ColumnDefault, sa.DefaultClause
tbl = Table(
"t2",
metadata,
Column("col1", Integer, Sequence("foo")),
Column(
"col2", Integer, default=Sequence("foo"), server_default="y"
),
Column("col3", Integer, Sequence("foo"), server_default="x"),
Column("col4", Integer, ColumnDefault("x"), DefaultClause("y")),
Column(
"col4",
Integer,
ColumnDefault("x"),
DefaultClause("y"),
DefaultClause("y", for_update=True),
),
Column(
"col5",
Integer,
ColumnDefault("x"),
DefaultClause("y"),
onupdate="z",
),
Column(
"col6",
Integer,
ColumnDefault("x"),
server_default="y",
onupdate="z",
),
Column(
"col7", Integer, default="x", server_default="y", onupdate="z"
),
Column(
"col8",
Integer,
server_onupdate="u",
default="x",
server_default="y",
onupdate="z",
),
)
has_(tbl, "col1", "default")
has_(tbl, "col2", "default", "server_default")
has_(tbl, "col3", "default", "server_default")
has_(tbl, "col4", "default", "server_default", "server_onupdate")
has_(tbl, "col5", "default", "server_default", "onupdate")
has_(tbl, "col6", "default", "server_default", "onupdate")
has_(tbl, "col7", "default", "server_default", "onupdate")
has_(
tbl,
"col8",
"default",
"server_default",
"onupdate",
"server_onupdate",
)
def test_no_embed_in_sql(self):
"""Using a DefaultGenerator, Sequence, DefaultClause
in the columns, where clause of a select, or in the values
clause of insert, update, raises an informative error"""
t = Table(
"some_table",
MetaData(),
Column("id", Integer),
Column("col4", String()),
)
for const in (
sa.Sequence("y"),
sa.ColumnDefault("y"),
sa.DefaultClause("y"),
):
assert_raises_message(
sa.exc.ArgumentError,
"SQL expression object expected, got object of type "
"<.* 'list'> instead",
t.select,
[const],
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str,
t.insert().values(col4=const),
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str,
t.update().values(col4=const),
)
class DefaultRoundTripTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
default_generator = cls.default_generator = {"x": 50}
def mydefault():
default_generator["x"] += 1
return default_generator["x"]
def myupdate_with_ctx(ctx):
conn = ctx.connection
return conn.execute(sa.select([sa.text("13")])).scalar()
def mydefault_using_connection(ctx):
conn = ctx.connection
return conn.execute(sa.select([sa.text("12")])).scalar()
use_function_defaults = testing.against("postgresql", "mssql")
is_oracle = testing.against("oracle")
class MyClass(object):
@classmethod
def gen_default(cls, ctx):
return "hi"
class MyType(TypeDecorator):
impl = String(50)
def process_bind_param(self, value, dialect):
if value is not None:
value = "BIND" + value
return value
cls.f = 6
cls.f2 = 11
with testing.db.connect() as conn:
currenttime = cls.currenttime = func.current_date(type_=sa.Date)
if is_oracle:
ts = conn.scalar(
sa.select(
[
func.trunc(
func.current_timestamp(),
sa.literal_column("'DAY'"),
type_=sa.Date,
)
]
)
)
currenttime = cls.currenttime = func.trunc(
currenttime, sa.literal_column("'DAY'"), type_=sa.Date
)
def1 = currenttime
def2 = func.trunc(
sa.text("current_timestamp"),
sa.literal_column("'DAY'"),
type_=sa.Date,
)
deftype = sa.Date
elif use_function_defaults:
def1 = currenttime
deftype = sa.Date
if testing.against("mssql"):
def2 = sa.text("getdate()")
else:
def2 = sa.text("current_date")
ts = conn.scalar(func.current_date())
else:
def1 = def2 = "3"
ts = 3
deftype = Integer
cls.ts = ts
Table(
"default_test",
metadata,
# python function
Column("col1", Integer, primary_key=True, default=mydefault),
# python literal
Column(
"col2",
String(20),
default="imthedefault",
onupdate="im the update",
),
# preexecute expression
Column(
"col3",
Integer,
default=func.length("abcdef"),
onupdate=func.length("abcdefghijk"),
),
# SQL-side default from sql expression
Column("col4", deftype, server_default=def1),
# SQL-side default from literal expression
Column("col5", deftype, server_default=def2),
# preexecute + update timestamp
Column("col6", sa.Date, default=currenttime, onupdate=currenttime),
Column("boolcol1", sa.Boolean, default=True),
Column("boolcol2", sa.Boolean, default=False),
# python function which uses ExecutionContext
Column(
"col7",
Integer,
default=mydefault_using_connection,
onupdate=myupdate_with_ctx,
),
# python builtin
Column(
"col8",
sa.Date,
default=datetime.date.today,
onupdate=datetime.date.today,
),
# combo
Column("col9", String(20), default="py", server_default="ddl"),
# python method w/ context
Column("col10", String(20), default=MyClass.gen_default),
# fixed default w/ type that has bound processor
Column("col11", MyType(), default="foo"),
)
def teardown(self):
self.default_generator["x"] = 50
super(DefaultRoundTripTest, self).teardown()
def test_standalone(self, connection):
t = self.tables.default_test
x = connection.execute(t.c.col1.default)
y = connection.execute(t.c.col2.default)
z = connection.execute(t.c.col3.default)
assert 50 <= x <= 57
eq_(y, "imthedefault")
eq_(z, self.f)
def test_insert(self, connection):
t = self.tables.default_test
r = connection.execute(t.insert())
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
r = connection.execute(t.insert(inline=True))
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
connection.execute(t.insert())
ctexec = connection.execute(
sa.select([self.currenttime.label("now")])
).scalar()
result = connection.execute(t.select().order_by(t.c.col1))
today = datetime.date.today()
eq_(
result.fetchall(),
[
(
x,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
)
for x in range(51, 54)
],
)
connection.execute(t.insert(), dict(col9=None))
# TODO: why are we looking at 'r' when we just executed something
# else ?
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
eq_(
list(connection.execute(t.select().where(t.c.col1 == 54))),
[
(
54,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
None,
"hi",
"BINDfoo",
)
],
)
def test_insertmany(self, connection):
t = self.tables.default_test
connection.execute(t.insert(), [{}, {}, {}])
ctexec = connection.scalar(self.currenttime)
result = connection.execute(t.select().order_by(t.c.col1))
today = datetime.date.today()
eq_(
list(result),
[
(
51,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
],
)
@testing.requires.multivalues_inserts
def test_insert_multivalues(self, connection):
t = self.tables.default_test
connection.execute(t.insert().values([{}, {}, {}]))
ctexec = connection.execute(self.currenttime).scalar()
result = connection.execute(t.select().order_by(t.c.col1))
today = datetime.date.today()
eq_(
list(result),
[
(
51,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
],
)
def test_missing_many_param(self, connection):
t = self.tables.default_test
assert_raises_message(
exc.StatementError,
"A value is required for bind parameter 'col7', in parameter "
"group 1",
connection.execute,
t.insert(),
{"col4": 7, "col7": 12, "col8": 19},
{"col4": 7, "col8": 19},
{"col4": 7, "col7": 12, "col8": 19},
)
def test_insert_values(self, connection):
t = self.tables.default_test
connection.execute(t.insert().values(col3=50))
result = connection.execute(t.select().order_by(t.c.col1))
eq_(50, result.first()["col3"])
def test_updatemany(self, connection):
t = self.tables.default_test
connection.execute(t.insert(), [{}, {}, {}])
connection.execute(
t.update().where(t.c.col1 == sa.bindparam("pkval")),
{"pkval": 51, "col7": None, "col8": None, "boolcol1": False},
)
connection.execute(
t.update().where(t.c.col1 == sa.bindparam("pkval")),
[{"pkval": 51}, {"pkval": 52}, {"pkval": 53}],
)
ctexec = connection.scalar(self.currenttime)
today = datetime.date.today()
result = connection.execute(t.select().order_by(t.c.col1))
eq_(
list(result),
[
(
51,
"im the update",
self.f2,
self.ts,
self.ts,
ctexec,
False,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"im the update",
self.f2,
self.ts,
self.ts,
ctexec,
True,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"im the update",
self.f2,
self.ts,
self.ts,
ctexec,
True,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
],
)
def test_update(self, connection):
t = self.tables.default_test
r = connection.execute(t.insert())
pk = r.inserted_primary_key[0]
connection.execute(
t.update().where(t.c.col1 == pk), dict(col4=None, col5=None)
)
ctexec = connection.scalar(self.currenttime)
result = connection.execute(t.select().where(t.c.col1 == pk))
result = result.first()
eq_(
result,
(
pk,
"im the update",
self.f2,
None,
None,
ctexec,
True,
False,
13,
datetime.date.today(),
"py",
"hi",
"BINDfoo",
),
)
def test_update_values(self, connection):
t = self.tables.default_test
r = connection.execute(t.insert())
pk = r.inserted_primary_key[0]
connection.execute(t.update().where(t.c.col1 == pk).values(col3=55))
result = connection.execute(t.select().where(t.c.col1 == pk))
row = result.first()
eq_(55, row["col3"])
class CTEDefaultTest(fixtures.TablesTest):
__requires__ = ("ctes", "returning", "ctes_on_dml")
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"q",
metadata,
Column("x", Integer, default=2),
Column("y", Integer, onupdate=5),
Column("z", Integer),
)
Table(
"p",
metadata,
Column("s", Integer),
Column("t", Integer),
Column("u", Integer, onupdate=1),
)
@testing.combinations(
("update", "select", testing.requires.ctes_on_dml),
("delete", "select", testing.requires.ctes_on_dml),
("insert", "select", testing.requires.ctes_on_dml),
("select", "update"),
("select", "insert"),
argnames="a, b",
)
def test_a_in_b(self, a, b, connection):
q = self.tables.q
p = self.tables.p
conn = connection
if a == "delete":
conn.execute(q.insert().values(y=10, z=1))
cte = q.delete().where(q.c.z == 1).returning(q.c.z).cte("c")
expected = None
elif a == "insert":
cte = q.insert().values(z=1, y=10).returning(q.c.z).cte("c")
expected = (2, 10)
elif a == "update":
conn.execute(q.insert().values(x=5, y=10, z=1))
cte = (
q.update()
.where(q.c.z == 1)
.values(x=7)
.returning(q.c.z)
.cte("c")
)
expected = (7, 5)
elif a == "select":
conn.execute(q.insert().values(x=5, y=10, z=1))
cte = sa.select([q.c.z]).cte("c")
expected = (5, 10)
if b == "select":
conn.execute(p.insert().values(s=1))
stmt = select([p.c.s, cte.c.z]).where(p.c.s == cte.c.z)
elif b == "insert":
sel = select([1, cte.c.z])
stmt = (
p.insert().from_select(["s", "t"], sel).returning(p.c.s, p.c.t)
)
elif b == "delete":
stmt = p.insert().values(s=1, t=cte.c.z).returning(p.c.s, cte.c.z)
elif b == "update":
conn.execute(p.insert().values(s=1))
stmt = (
p.update()
.values(t=5)
.where(p.c.s == cte.c.z)
.returning(p.c.u, cte.c.z)
)
eq_(list(conn.execute(stmt)), [(1, 1)])
eq_(conn.execute(select([q.c.x, q.c.y])).first(), expected)
class PKDefaultTest(fixtures.TablesTest):
__requires__ = ("subqueries",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
t2 = Table("t2", metadata, Column("nextid", Integer))
Table(
"t1",
metadata,
Column(
"id",
Integer,
primary_key=True,
default=sa.select([func.max(t2.c.nextid)]).as_scalar(),
),
Column("data", String(30)),
)
Table(
"date_table",
metadata,
Column(
"date_id",
DateTime,
default=text("current_timestamp"),
primary_key=True,
),
)
@testing.requires.returning
def test_with_implicit_returning(self):
self._test(True)
def test_regular(self):
self._test(False)
def _test(self, returning):
t2, t1, date_table = (
self.tables.t2,
self.tables.t1,
self.tables.date_table,
)
if not returning and not testing.db.dialect.implicit_returning:
engine = testing.db
else:
engine = engines.testing_engine(
options={"implicit_returning": returning}
)
with engine.begin() as conn:
conn.execute(t2.insert(), nextid=1)
r = conn.execute(t1.insert(), data="hi")
eq_([1], r.inserted_primary_key)
conn.execute(t2.insert(), nextid=2)
r = conn.execute(t1.insert(), data="there")
eq_([2], r.inserted_primary_key)
r = conn.execute(date_table.insert())
assert isinstance(r.inserted_primary_key[0], datetime.datetime)
class PKIncrementTest(fixtures.TablesTest):
run_define_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"aitable",
metadata,
Column(
"id",
Integer,
Sequence("ai_id_seq", optional=True),
primary_key=True,
),
Column("int1", Integer),
Column("str1", String(20)),
)
# TODO: add coverage for increment on a secondary column in a key
@testing.fails_on("firebird", "Data type unknown")
def _test_autoincrement(self, bind):
aitable = self.tables.aitable
ids = set()
rs = bind.execute(aitable.insert(), int1=1)
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), str1="row 2")
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), int1=3, str1="row 3")
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(values={"int1": func.length("four")}))
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
eq_(ids, set([1, 2, 3, 4]))
eq_(
list(bind.execute(aitable.select().order_by(aitable.c.id))),
[(1, 1, None), (2, None, "row 2"), (3, 3, "row 3"), (4, 4, None)],
)
def test_autoincrement_autocommit(self):
self._test_autoincrement(testing.db)
def test_autoincrement_transaction(self):
with testing.db.begin() as conn:
self._test_autoincrement(conn)
class EmptyInsertTest(fixtures.TestBase):
__backend__ = True
@testing.fails_on("oracle", "FIXME: unknown")
@testing.provide_metadata
def test_empty_insert(self, connection):
t1 = Table(
"t1",
self.metadata,
Column("is_true", Boolean, server_default=("1")),
)
self.metadata.create_all(connection)
connection.execute(t1.insert())
eq_(
1,
connection.scalar(select([func.count(text("*"))]).select_from(t1)),
)
eq_(True, connection.scalar(t1.select()))
class AutoIncrementTest(fixtures.TestBase):
__requires__ = ("identity",)
__backend__ = True
@testing.provide_metadata
def test_autoincrement_single_col(self, connection):
single = Table(
"single", self.metadata, Column("id", Integer, primary_key=True)
)
self.metadata.create_all(connection)
r = connection.execute(single.insert())
id_ = r.inserted_primary_key[0]
eq_(id_, 1)
eq_(connection.scalar(sa.select([single.c.id])), 1)
def test_autoinc_detection_no_affinity(self):
class MyType(TypeDecorator):
impl = TypeEngine
assert MyType()._type_affinity is None
t = Table("x", MetaData(), Column("id", MyType(), primary_key=True))
assert t._autoincrement_column is None
def test_autoincrement_ignore_fk(self):
m = MetaData()
Table("y", m, Column("id", Integer(), primary_key=True))
x = Table(
"x",
m,
Column(
"id",
Integer(),
ForeignKey("y.id"),
autoincrement="ignore_fk",
primary_key=True,
),
)
assert x._autoincrement_column is x.c.id
def test_autoincrement_fk_disqualifies(self):
m = MetaData()
Table("y", m, Column("id", Integer(), primary_key=True))
x = Table(
"x",
m,
Column("id", Integer(), ForeignKey("y.id"), primary_key=True),
)
assert x._autoincrement_column is None
@testing.only_on("sqlite")
@testing.provide_metadata
def test_non_autoincrement(self, connection):
# sqlite INT primary keys can be non-unique! (only for ints)
nonai = Table(
"nonaitest",
self.metadata,
Column("id", Integer, autoincrement=False, primary_key=True),
Column("data", String(20)),
)
nonai.create(connection)
# just testing SQLite for now, it passes
with expect_warnings(".*has no Python-side or server-side default.*"):
# postgresql + mysql strict will fail on first row,
# mysql in legacy mode fails on second row
connection.execute(nonai.insert(), dict(data="row 1"))
connection.execute(nonai.insert(), dict(data="row 2"))
@testing.metadata_fixture(ddl="function")
def dataset_no_autoinc(self, metadata):
# plain autoincrement/PK table in the actual schema
Table("x", metadata, Column("set_id", Integer, primary_key=True))
# for the INSERT use a table with a Sequence
# and autoincrement=False. Using a ForeignKey
# would have the same effect
some_seq = Sequence("some_seq")
dataset_no_autoinc = Table(
"x",
MetaData(),
Column(
"set_id",
Integer,
some_seq,
primary_key=True,
autoincrement=False,
),
)
return dataset_no_autoinc
def test_col_w_optional_sequence_non_autoinc_no_firing(
self, dataset_no_autoinc, connection
):
"""this is testing that a Table which includes a Sequence, when
run against a DB that does not support sequences, the Sequence
does not get in the way.
"""
dataset_no_autoinc.c.set_id.default.optional = True
connection.execute(dataset_no_autoinc.insert())
eq_(
connection.scalar(
select([func.count("*")]).select_from(dataset_no_autoinc)
),
1,
)
@testing.fails_if(testing.requires.sequences)
def test_col_w_nonoptional_sequence_non_autoinc_no_firing(
self, dataset_no_autoinc, connection
):
"""When the sequence is not optional and sequences are supported,
the test fails because we didn't create the sequence.
"""
dataset_no_autoinc.c.set_id.default.optional = False
connection.execute(dataset_no_autoinc.insert())
eq_(
connection.scalar(
select([func.count("*")]).select_from(dataset_no_autoinc)
),
1,
)
class SpecialTypePKTest(fixtures.TestBase):
"""test process_result_value in conjunction with primary key columns.
Also tests that "autoincrement" checks are against
column.type._type_affinity, rather than the class of "type" itself.
"""
__backend__ = True
@classmethod
def setup_class(cls):
class MyInteger(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
if value is None:
return None
return int(value[4:])
def process_result_value(self, value, dialect):
if value is None:
return None
return "INT_%d" % value
cls.MyInteger = MyInteger
@testing.provide_metadata
def _run_test(self, *arg, **kw):
metadata = self.metadata
implicit_returning = kw.pop("implicit_returning", True)
kw["primary_key"] = True
if kw.get("autoincrement", True):
kw["test_needs_autoincrement"] = True
t = Table(
"x",
metadata,
Column("y", self.MyInteger, *arg, **kw),
Column("data", Integer),
implicit_returning=implicit_returning,
)
with testing.db.connect() as conn:
t.create(conn)
r = conn.execute(t.insert().values(data=5))
# we don't pre-fetch 'server_default'.
if "server_default" in kw and (
not testing.db.dialect.implicit_returning
or not implicit_returning
):
eq_(r.inserted_primary_key, [None])
else:
eq_(r.inserted_primary_key, ["INT_1"])
eq_(conn.execute(t.select()).first(), ("INT_1", 5))
def test_plain(self):
# among other things, tests that autoincrement
# is enabled.
self._run_test()
def test_literal_default_label(self):
self._run_test(
default=literal("INT_1", type_=self.MyInteger).label("foo")
)
def test_literal_default_no_label(self):
self._run_test(default=literal("INT_1", type_=self.MyInteger))
def test_literal_column_default_no_label(self):
self._run_test(default=literal_column("1", type_=self.MyInteger))
def test_sequence(self):
self._run_test(Sequence("foo_seq"))
def test_text_clause_default_no_type(self):
self._run_test(default=text("1"))
def test_server_default(self):
self._run_test(server_default="1")
def test_server_default_no_autoincrement(self):
self._run_test(server_default="1", autoincrement=False)
def test_clause(self):
stmt = select([cast("INT_1", type_=self.MyInteger)]).as_scalar()
self._run_test(default=stmt)
@testing.requires.returning
def test_no_implicit_returning(self):
self._run_test(implicit_returning=False)
@testing.requires.returning
def test_server_default_no_implicit_returning(self):
self._run_test(server_default="1", autoincrement=False)
class ServerDefaultsOnPKTest(fixtures.TestBase):
__backend__ = True
@testing.provide_metadata
def test_string_default_none_on_insert(self, connection):
"""Test that without implicit returning, we return None for
a string server default.
That is, we don't want to attempt to pre-execute "server_default"
generically - the user should use a Python side-default for a case
like this. Testing that all backends do the same thing here.
"""
metadata = self.metadata
t = Table(
"x",
metadata,
Column(
"y", String(10), server_default="key_one", primary_key=True
),
Column("data", String(10)),
implicit_returning=False,
)
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, [None])
eq_(list(connection.execute(t.select())), [("key_one", "data")])
@testing.requires.returning
@testing.provide_metadata
def test_string_default_on_insert_with_returning(self, connection):
"""With implicit_returning, we get a string PK default back no
problem."""
metadata = self.metadata
t = Table(
"x",
metadata,
Column(
"y", String(10), server_default="key_one", primary_key=True
),
Column("data", String(10)),
)
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, ["key_one"])
eq_(list(connection.execute(t.select())), [("key_one", "data")])
@testing.provide_metadata
def test_int_default_none_on_insert(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
assert t._autoincrement_column is None
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, [None])
if testing.against("sqlite"):
eq_(list(connection.execute(t.select())), [(1, "data")])
else:
eq_(list(connection.execute(t.select())), [(5, "data")])
@testing.provide_metadata
def test_autoincrement_reflected_from_server_default(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
assert t._autoincrement_column is None
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("x", m2, autoload_with=connection, implicit_returning=False)
assert t2._autoincrement_column is None
@testing.provide_metadata
def test_int_default_none_on_insert_reflected(self, connection):
metadata = self.metadata
Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("x", m2, autoload_with=connection, implicit_returning=False)
r = connection.execute(t2.insert(), dict(data="data"))
eq_(r.inserted_primary_key, [None])
if testing.against("sqlite"):
eq_(list(connection.execute(t2.select())), [(1, "data")])
else:
eq_(list(connection.execute(t2.select())), [(5, "data")])
@testing.requires.returning
@testing.provide_metadata
def test_int_default_on_insert_with_returning(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
)
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, [5])
eq_(list(connection.execute(t.select())), [(5, "data")])
class UnicodeDefaultsTest(fixtures.TestBase):
__backend__ = True
def test_no_default(self):
Column(Unicode(32))
def test_unicode_default(self):
default = u("foo")
Column(Unicode(32), default=default)
def test_nonunicode_default(self):
default = b("foo")
assert_raises_message(
sa.exc.SAWarning,
"Unicode column 'foobar' has non-unicode "
"default value b?'foo' specified.",
Column,
"foobar",
Unicode(32),
default=default,
)
class InsertFromSelectTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table("data", metadata, Column("x", Integer), Column("y", Integer))
@classmethod
def insert_data(cls, connection):
data = cls.tables.data
connection.execute(
data.insert(), [{"x": 2, "y": 5}, {"x": 7, "y": 12}]
)
@testing.provide_metadata
def test_insert_from_select_override_defaults(self, connection):
data = self.tables.data
table = Table(
"sometable",
self.metadata,
Column("x", Integer),
Column("foo", Integer, default=12),
Column("y", Integer),
)
table.create(connection)
sel = select([data.c.x, data.c.y])
ins = table.insert().from_select(["x", "y"], sel)
connection.execute(ins)
eq_(
list(connection.execute(table.select().order_by(table.c.x))),
[(2, 12, 5), (7, 12, 12)],
)
@testing.provide_metadata
def test_insert_from_select_fn_defaults(self, connection):
data = self.tables.data
counter = itertools.count(1)
def foo(ctx):
return next(counter)
table = Table(
"sometable",
self.metadata,
Column("x", Integer),
Column("foo", Integer, default=foo),
Column("y", Integer),
)
table.create(connection)
sel = select([data.c.x, data.c.y])
ins = table.insert().from_select(["x", "y"], sel)
connection.execute(ins)
# counter is only called once!
eq_(
list(connection.execute(table.select().order_by(table.c.x))),
[(2, 1, 5), (7, 1, 12)],
)
class CurrentParametersTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
def gen_default(context):
pass
Table(
"some_table",
metadata,
Column("x", String(50), default=gen_default),
Column("y", String(50)),
)
def _fixture(self, fn):
def gen_default(context):
fn(context)
some_table = self.tables.some_table
some_table.c.x.default.arg = gen_default
return fn
@testing.combinations(
("single", "attribute"),
("single", "method"),
("executemany", "attribute"),
("executemany", "method"),
("multivalues", "method", testing.requires.multivalues_inserts),
argnames="exec_type, usemethod",
)
def test_parameters(self, exec_type, usemethod, connection):
collect = mock.Mock()
@self._fixture
def fn(context):
collect(context.get_current_parameters())
table = self.tables.some_table
if exec_type in ("multivalues", "executemany"):
parameters = [{"y": "h1"}, {"y": "h2"}]
else:
parameters = [{"y": "hello"}]
if exec_type == "multivalues":
stmt, params = table.insert().values(parameters), {}
else:
stmt, params = table.insert(), parameters
connection.execute(stmt, params)
eq_(
collect.mock_calls,
[mock.call({"y": param["y"], "x": None}) for param in parameters],
)
| 30.554847 | 79 | 0.501378 | import datetime
import itertools
import sqlalchemy as sa
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import DateTime
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import MetaData
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Unicode
from sqlalchemy.schema import CreateTable
from sqlalchemy.sql import literal_column
from sqlalchemy.sql import select
from sqlalchemy.sql import text
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.types import TypeDecorator
from sqlalchemy.types import TypeEngine
from sqlalchemy.util import b
from sqlalchemy.util import u
class DDLTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_string(self):
# the server_default is interpreted independently of the
# column's datatype.
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default="5"))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT '5')"
)
def test_string_w_quotes(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default="5'6"))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT '5''6')"
)
def test_text(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default=text("5 + 8")))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 5 + 8)"
)
def test_text_w_quotes(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default=text("5 ' 8")))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 5 ' 8)"
)
def test_literal_binds_w_quotes(self):
m = MetaData()
t = Table(
"t", m, Column("x", Integer, server_default=literal("5 ' 8"))
)
self.assert_compile(
CreateTable(t), """CREATE TABLE t (x INTEGER DEFAULT '5 '' 8')"""
)
def test_text_literal_binds(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"x", Integer, server_default=text("q + :x1").bindparams(x1=7)
),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT q + 7)"
)
def test_sqlexpr(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"x",
Integer,
server_default=literal_column("a") + literal_column("b"),
),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT a + b)"
)
def test_literal_binds_plain(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, server_default=literal("a") + literal("b")),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 'a' || 'b')"
)
def test_literal_binds_pgarray(self):
from sqlalchemy.dialects.postgresql import ARRAY, array
m = MetaData()
t = Table(
"t",
m,
Column("x", ARRAY(Integer), server_default=array([1, 2, 3])),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE t (x INTEGER[] DEFAULT ARRAY[1, 2, 3])",
dialect="postgresql",
)
class DefaultObjectTest(fixtures.TestBase):
def test_bad_arg_signature(self):
ex_msg = (
"ColumnDefault Python function takes zero "
"or one positional arguments"
)
def fn1(x, y):
pass
def fn2(x, y, z=3):
pass
class fn3(object):
def __init__(self, x, y):
pass
class FN4(object):
def __call__(self, x, y):
pass
fn4 = FN4()
for fn in fn1, fn2, fn3, fn4:
assert_raises_message(
sa.exc.ArgumentError, ex_msg, sa.ColumnDefault, fn
)
def test_arg_signature(self):
def fn1():
pass
def fn2():
pass
def fn3(x=1):
eq_(x, 1)
def fn4(x=1, y=2, z=3):
eq_(x, 1)
fn5 = list
class fn6a(object):
def __init__(self, x):
eq_(x, "context")
class fn6b(object):
def __init__(self, x, y=3):
eq_(x, "context")
class FN7(object):
def __call__(self, x):
eq_(x, "context")
fn7 = FN7()
class FN8(object):
def __call__(self, x, y=3):
eq_(x, "context")
fn8 = FN8()
for fn in fn1, fn2, fn3, fn4, fn5, fn6a, fn6b, fn7, fn8:
c = sa.ColumnDefault(fn)
c.arg("context")
def _check_default_slots(self, tbl, name, *wanted):
slots = [
"default",
"onupdate",
"server_default",
"server_onupdate",
]
col = tbl.c[name]
for slot in wanted:
slots.remove(slot)
assert getattr(col, slot) is not None, getattr(col, slot)
for slot in slots:
assert getattr(col, slot) is None, getattr(col, slot)
def test_py_vs_server_default_detection_one(self):
has_ = self._check_default_slots
metadata = MetaData()
tbl = Table(
"default_test",
metadata,
Column("col1", Integer, primary_key=True, default="1"),
Column(
"col2",
String(20),
default="imthedefault",
onupdate="im the update",
),
Column(
"col3",
Integer,
default=func.length("abcdef"),
onupdate=func.length("abcdefghijk"),
),
Column("col4", Integer, server_default="1"),
Column("col5", Integer, server_default="1"),
Column(
"col6",
sa.Date,
default=datetime.datetime.today,
onupdate=datetime.datetime.today,
),
Column("boolcol1", sa.Boolean, default=True),
Column("boolcol2", sa.Boolean, default=False),
Column("col7", Integer, default=lambda: 5, onupdate=lambda: 10,),
Column(
"col8",
sa.Date,
default=datetime.date.today,
onupdate=datetime.date.today,
),
Column("col9", String(20), default="py", server_default="ddl"),
)
has_(tbl, "col1", "default")
has_(tbl, "col2", "default", "onupdate")
has_(tbl, "col3", "default", "onupdate")
has_(tbl, "col4", "server_default")
has_(tbl, "col5", "server_default")
has_(tbl, "col6", "default", "onupdate")
has_(tbl, "boolcol1", "default")
has_(tbl, "boolcol2", "default")
has_(tbl, "col7", "default", "onupdate")
has_(tbl, "col8", "default", "onupdate")
has_(tbl, "col9", "default", "server_default")
def test_py_vs_server_default_detection_two(self):
has_ = self._check_default_slots
metadata = MetaData()
ColumnDefault, DefaultClause = sa.ColumnDefault, sa.DefaultClause
tbl = Table(
"t2",
metadata,
Column("col1", Integer, Sequence("foo")),
Column(
"col2", Integer, default=Sequence("foo"), server_default="y"
),
Column("col3", Integer, Sequence("foo"), server_default="x"),
Column("col4", Integer, ColumnDefault("x"), DefaultClause("y")),
Column(
"col4",
Integer,
ColumnDefault("x"),
DefaultClause("y"),
DefaultClause("y", for_update=True),
),
Column(
"col5",
Integer,
ColumnDefault("x"),
DefaultClause("y"),
onupdate="z",
),
Column(
"col6",
Integer,
ColumnDefault("x"),
server_default="y",
onupdate="z",
),
Column(
"col7", Integer, default="x", server_default="y", onupdate="z"
),
Column(
"col8",
Integer,
server_onupdate="u",
default="x",
server_default="y",
onupdate="z",
),
)
has_(tbl, "col1", "default")
has_(tbl, "col2", "default", "server_default")
has_(tbl, "col3", "default", "server_default")
has_(tbl, "col4", "default", "server_default", "server_onupdate")
has_(tbl, "col5", "default", "server_default", "onupdate")
has_(tbl, "col6", "default", "server_default", "onupdate")
has_(tbl, "col7", "default", "server_default", "onupdate")
has_(
tbl,
"col8",
"default",
"server_default",
"onupdate",
"server_onupdate",
)
def test_no_embed_in_sql(self):
t = Table(
"some_table",
MetaData(),
Column("id", Integer),
Column("col4", String()),
)
for const in (
sa.Sequence("y"),
sa.ColumnDefault("y"),
sa.DefaultClause("y"),
):
assert_raises_message(
sa.exc.ArgumentError,
"SQL expression object expected, got object of type "
"<.* 'list'> instead",
t.select,
[const],
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str,
t.insert().values(col4=const),
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str,
t.update().values(col4=const),
)
class DefaultRoundTripTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
default_generator = cls.default_generator = {"x": 50}
def mydefault():
default_generator["x"] += 1
return default_generator["x"]
def myupdate_with_ctx(ctx):
conn = ctx.connection
return conn.execute(sa.select([sa.text("13")])).scalar()
def mydefault_using_connection(ctx):
conn = ctx.connection
return conn.execute(sa.select([sa.text("12")])).scalar()
use_function_defaults = testing.against("postgresql", "mssql")
is_oracle = testing.against("oracle")
class MyClass(object):
@classmethod
def gen_default(cls, ctx):
return "hi"
class MyType(TypeDecorator):
impl = String(50)
def process_bind_param(self, value, dialect):
if value is not None:
value = "BIND" + value
return value
cls.f = 6
cls.f2 = 11
with testing.db.connect() as conn:
currenttime = cls.currenttime = func.current_date(type_=sa.Date)
if is_oracle:
ts = conn.scalar(
sa.select(
[
func.trunc(
func.current_timestamp(),
sa.literal_column("'DAY'"),
type_=sa.Date,
)
]
)
)
currenttime = cls.currenttime = func.trunc(
currenttime, sa.literal_column("'DAY'"), type_=sa.Date
)
def1 = currenttime
def2 = func.trunc(
sa.text("current_timestamp"),
sa.literal_column("'DAY'"),
type_=sa.Date,
)
deftype = sa.Date
elif use_function_defaults:
def1 = currenttime
deftype = sa.Date
if testing.against("mssql"):
def2 = sa.text("getdate()")
else:
def2 = sa.text("current_date")
ts = conn.scalar(func.current_date())
else:
def1 = def2 = "3"
ts = 3
deftype = Integer
cls.ts = ts
Table(
"default_test",
metadata,
Column("col1", Integer, primary_key=True, default=mydefault),
Column(
"col2",
String(20),
default="imthedefault",
onupdate="im the update",
),
Column(
"col3",
Integer,
default=func.length("abcdef"),
onupdate=func.length("abcdefghijk"),
),
Column("col4", deftype, server_default=def1),
Column("col5", deftype, server_default=def2),
Column("col6", sa.Date, default=currenttime, onupdate=currenttime),
Column("boolcol1", sa.Boolean, default=True),
Column("boolcol2", sa.Boolean, default=False),
Column(
"col7",
Integer,
default=mydefault_using_connection,
onupdate=myupdate_with_ctx,
),
Column(
"col8",
sa.Date,
default=datetime.date.today,
onupdate=datetime.date.today,
),
Column("col9", String(20), default="py", server_default="ddl"),
Column("col10", String(20), default=MyClass.gen_default),
Column("col11", MyType(), default="foo"),
)
def teardown(self):
self.default_generator["x"] = 50
super(DefaultRoundTripTest, self).teardown()
def test_standalone(self, connection):
t = self.tables.default_test
x = connection.execute(t.c.col1.default)
y = connection.execute(t.c.col2.default)
z = connection.execute(t.c.col3.default)
assert 50 <= x <= 57
eq_(y, "imthedefault")
eq_(z, self.f)
def test_insert(self, connection):
t = self.tables.default_test
r = connection.execute(t.insert())
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
r = connection.execute(t.insert(inline=True))
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
connection.execute(t.insert())
ctexec = connection.execute(
sa.select([self.currenttime.label("now")])
).scalar()
result = connection.execute(t.select().order_by(t.c.col1))
today = datetime.date.today()
eq_(
result.fetchall(),
[
(
x,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
)
for x in range(51, 54)
],
)
connection.execute(t.insert(), dict(col9=None))
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
eq_(
list(connection.execute(t.select().where(t.c.col1 == 54))),
[
(
54,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
None,
"hi",
"BINDfoo",
)
],
)
def test_insertmany(self, connection):
t = self.tables.default_test
connection.execute(t.insert(), [{}, {}, {}])
ctexec = connection.scalar(self.currenttime)
result = connection.execute(t.select().order_by(t.c.col1))
today = datetime.date.today()
eq_(
list(result),
[
(
51,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
],
)
@testing.requires.multivalues_inserts
def test_insert_multivalues(self, connection):
t = self.tables.default_test
connection.execute(t.insert().values([{}, {}, {}]))
ctexec = connection.execute(self.currenttime).scalar()
result = connection.execute(t.select().order_by(t.c.col1))
today = datetime.date.today()
eq_(
list(result),
[
(
51,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"imthedefault",
self.f,
self.ts,
self.ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
],
)
def test_missing_many_param(self, connection):
t = self.tables.default_test
assert_raises_message(
exc.StatementError,
"A value is required for bind parameter 'col7', in parameter "
"group 1",
connection.execute,
t.insert(),
{"col4": 7, "col7": 12, "col8": 19},
{"col4": 7, "col8": 19},
{"col4": 7, "col7": 12, "col8": 19},
)
def test_insert_values(self, connection):
t = self.tables.default_test
connection.execute(t.insert().values(col3=50))
result = connection.execute(t.select().order_by(t.c.col1))
eq_(50, result.first()["col3"])
def test_updatemany(self, connection):
t = self.tables.default_test
connection.execute(t.insert(), [{}, {}, {}])
connection.execute(
t.update().where(t.c.col1 == sa.bindparam("pkval")),
{"pkval": 51, "col7": None, "col8": None, "boolcol1": False},
)
connection.execute(
t.update().where(t.c.col1 == sa.bindparam("pkval")),
[{"pkval": 51}, {"pkval": 52}, {"pkval": 53}],
)
ctexec = connection.scalar(self.currenttime)
today = datetime.date.today()
result = connection.execute(t.select().order_by(t.c.col1))
eq_(
list(result),
[
(
51,
"im the update",
self.f2,
self.ts,
self.ts,
ctexec,
False,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"im the update",
self.f2,
self.ts,
self.ts,
ctexec,
True,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"im the update",
self.f2,
self.ts,
self.ts,
ctexec,
True,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
],
)
def test_update(self, connection):
t = self.tables.default_test
r = connection.execute(t.insert())
pk = r.inserted_primary_key[0]
connection.execute(
t.update().where(t.c.col1 == pk), dict(col4=None, col5=None)
)
ctexec = connection.scalar(self.currenttime)
result = connection.execute(t.select().where(t.c.col1 == pk))
result = result.first()
eq_(
result,
(
pk,
"im the update",
self.f2,
None,
None,
ctexec,
True,
False,
13,
datetime.date.today(),
"py",
"hi",
"BINDfoo",
),
)
def test_update_values(self, connection):
t = self.tables.default_test
r = connection.execute(t.insert())
pk = r.inserted_primary_key[0]
connection.execute(t.update().where(t.c.col1 == pk).values(col3=55))
result = connection.execute(t.select().where(t.c.col1 == pk))
row = result.first()
eq_(55, row["col3"])
class CTEDefaultTest(fixtures.TablesTest):
__requires__ = ("ctes", "returning", "ctes_on_dml")
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"q",
metadata,
Column("x", Integer, default=2),
Column("y", Integer, onupdate=5),
Column("z", Integer),
)
Table(
"p",
metadata,
Column("s", Integer),
Column("t", Integer),
Column("u", Integer, onupdate=1),
)
@testing.combinations(
("update", "select", testing.requires.ctes_on_dml),
("delete", "select", testing.requires.ctes_on_dml),
("insert", "select", testing.requires.ctes_on_dml),
("select", "update"),
("select", "insert"),
argnames="a, b",
)
def test_a_in_b(self, a, b, connection):
q = self.tables.q
p = self.tables.p
conn = connection
if a == "delete":
conn.execute(q.insert().values(y=10, z=1))
cte = q.delete().where(q.c.z == 1).returning(q.c.z).cte("c")
expected = None
elif a == "insert":
cte = q.insert().values(z=1, y=10).returning(q.c.z).cte("c")
expected = (2, 10)
elif a == "update":
conn.execute(q.insert().values(x=5, y=10, z=1))
cte = (
q.update()
.where(q.c.z == 1)
.values(x=7)
.returning(q.c.z)
.cte("c")
)
expected = (7, 5)
elif a == "select":
conn.execute(q.insert().values(x=5, y=10, z=1))
cte = sa.select([q.c.z]).cte("c")
expected = (5, 10)
if b == "select":
conn.execute(p.insert().values(s=1))
stmt = select([p.c.s, cte.c.z]).where(p.c.s == cte.c.z)
elif b == "insert":
sel = select([1, cte.c.z])
stmt = (
p.insert().from_select(["s", "t"], sel).returning(p.c.s, p.c.t)
)
elif b == "delete":
stmt = p.insert().values(s=1, t=cte.c.z).returning(p.c.s, cte.c.z)
elif b == "update":
conn.execute(p.insert().values(s=1))
stmt = (
p.update()
.values(t=5)
.where(p.c.s == cte.c.z)
.returning(p.c.u, cte.c.z)
)
eq_(list(conn.execute(stmt)), [(1, 1)])
eq_(conn.execute(select([q.c.x, q.c.y])).first(), expected)
class PKDefaultTest(fixtures.TablesTest):
__requires__ = ("subqueries",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
t2 = Table("t2", metadata, Column("nextid", Integer))
Table(
"t1",
metadata,
Column(
"id",
Integer,
primary_key=True,
default=sa.select([func.max(t2.c.nextid)]).as_scalar(),
),
Column("data", String(30)),
)
Table(
"date_table",
metadata,
Column(
"date_id",
DateTime,
default=text("current_timestamp"),
primary_key=True,
),
)
@testing.requires.returning
def test_with_implicit_returning(self):
self._test(True)
def test_regular(self):
self._test(False)
def _test(self, returning):
t2, t1, date_table = (
self.tables.t2,
self.tables.t1,
self.tables.date_table,
)
if not returning and not testing.db.dialect.implicit_returning:
engine = testing.db
else:
engine = engines.testing_engine(
options={"implicit_returning": returning}
)
with engine.begin() as conn:
conn.execute(t2.insert(), nextid=1)
r = conn.execute(t1.insert(), data="hi")
eq_([1], r.inserted_primary_key)
conn.execute(t2.insert(), nextid=2)
r = conn.execute(t1.insert(), data="there")
eq_([2], r.inserted_primary_key)
r = conn.execute(date_table.insert())
assert isinstance(r.inserted_primary_key[0], datetime.datetime)
class PKIncrementTest(fixtures.TablesTest):
run_define_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"aitable",
metadata,
Column(
"id",
Integer,
Sequence("ai_id_seq", optional=True),
primary_key=True,
),
Column("int1", Integer),
Column("str1", String(20)),
)
@testing.fails_on("firebird", "Data type unknown")
def _test_autoincrement(self, bind):
aitable = self.tables.aitable
ids = set()
rs = bind.execute(aitable.insert(), int1=1)
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), str1="row 2")
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), int1=3, str1="row 3")
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(values={"int1": func.length("four")}))
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
eq_(ids, set([1, 2, 3, 4]))
eq_(
list(bind.execute(aitable.select().order_by(aitable.c.id))),
[(1, 1, None), (2, None, "row 2"), (3, 3, "row 3"), (4, 4, None)],
)
def test_autoincrement_autocommit(self):
self._test_autoincrement(testing.db)
def test_autoincrement_transaction(self):
with testing.db.begin() as conn:
self._test_autoincrement(conn)
class EmptyInsertTest(fixtures.TestBase):
__backend__ = True
@testing.fails_on("oracle", "FIXME: unknown")
@testing.provide_metadata
def test_empty_insert(self, connection):
t1 = Table(
"t1",
self.metadata,
Column("is_true", Boolean, server_default=("1")),
)
self.metadata.create_all(connection)
connection.execute(t1.insert())
eq_(
1,
connection.scalar(select([func.count(text("*"))]).select_from(t1)),
)
eq_(True, connection.scalar(t1.select()))
class AutoIncrementTest(fixtures.TestBase):
__requires__ = ("identity",)
__backend__ = True
@testing.provide_metadata
def test_autoincrement_single_col(self, connection):
single = Table(
"single", self.metadata, Column("id", Integer, primary_key=True)
)
self.metadata.create_all(connection)
r = connection.execute(single.insert())
id_ = r.inserted_primary_key[0]
eq_(id_, 1)
eq_(connection.scalar(sa.select([single.c.id])), 1)
def test_autoinc_detection_no_affinity(self):
class MyType(TypeDecorator):
impl = TypeEngine
assert MyType()._type_affinity is None
t = Table("x", MetaData(), Column("id", MyType(), primary_key=True))
assert t._autoincrement_column is None
def test_autoincrement_ignore_fk(self):
m = MetaData()
Table("y", m, Column("id", Integer(), primary_key=True))
x = Table(
"x",
m,
Column(
"id",
Integer(),
ForeignKey("y.id"),
autoincrement="ignore_fk",
primary_key=True,
),
)
assert x._autoincrement_column is x.c.id
def test_autoincrement_fk_disqualifies(self):
m = MetaData()
Table("y", m, Column("id", Integer(), primary_key=True))
x = Table(
"x",
m,
Column("id", Integer(), ForeignKey("y.id"), primary_key=True),
)
assert x._autoincrement_column is None
@testing.only_on("sqlite")
@testing.provide_metadata
def test_non_autoincrement(self, connection):
nonai = Table(
"nonaitest",
self.metadata,
Column("id", Integer, autoincrement=False, primary_key=True),
Column("data", String(20)),
)
nonai.create(connection)
with expect_warnings(".*has no Python-side or server-side default.*"):
connection.execute(nonai.insert(), dict(data="row 1"))
connection.execute(nonai.insert(), dict(data="row 2"))
@testing.metadata_fixture(ddl="function")
def dataset_no_autoinc(self, metadata):
Table("x", metadata, Column("set_id", Integer, primary_key=True))
some_seq = Sequence("some_seq")
dataset_no_autoinc = Table(
"x",
MetaData(),
Column(
"set_id",
Integer,
some_seq,
primary_key=True,
autoincrement=False,
),
)
return dataset_no_autoinc
def test_col_w_optional_sequence_non_autoinc_no_firing(
self, dataset_no_autoinc, connection
):
dataset_no_autoinc.c.set_id.default.optional = True
connection.execute(dataset_no_autoinc.insert())
eq_(
connection.scalar(
select([func.count("*")]).select_from(dataset_no_autoinc)
),
1,
)
@testing.fails_if(testing.requires.sequences)
def test_col_w_nonoptional_sequence_non_autoinc_no_firing(
self, dataset_no_autoinc, connection
):
dataset_no_autoinc.c.set_id.default.optional = False
connection.execute(dataset_no_autoinc.insert())
eq_(
connection.scalar(
select([func.count("*")]).select_from(dataset_no_autoinc)
),
1,
)
class SpecialTypePKTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
class MyInteger(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
if value is None:
return None
return int(value[4:])
def process_result_value(self, value, dialect):
if value is None:
return None
return "INT_%d" % value
cls.MyInteger = MyInteger
@testing.provide_metadata
def _run_test(self, *arg, **kw):
metadata = self.metadata
implicit_returning = kw.pop("implicit_returning", True)
kw["primary_key"] = True
if kw.get("autoincrement", True):
kw["test_needs_autoincrement"] = True
t = Table(
"x",
metadata,
Column("y", self.MyInteger, *arg, **kw),
Column("data", Integer),
implicit_returning=implicit_returning,
)
with testing.db.connect() as conn:
t.create(conn)
r = conn.execute(t.insert().values(data=5))
if "server_default" in kw and (
not testing.db.dialect.implicit_returning
or not implicit_returning
):
eq_(r.inserted_primary_key, [None])
else:
eq_(r.inserted_primary_key, ["INT_1"])
eq_(conn.execute(t.select()).first(), ("INT_1", 5))
def test_plain(self):
# among other things, tests that autoincrement
# is enabled.
self._run_test()
def test_literal_default_label(self):
self._run_test(
default=literal("INT_1", type_=self.MyInteger).label("foo")
)
def test_literal_default_no_label(self):
self._run_test(default=literal("INT_1", type_=self.MyInteger))
def test_literal_column_default_no_label(self):
self._run_test(default=literal_column("1", type_=self.MyInteger))
def test_sequence(self):
self._run_test(Sequence("foo_seq"))
def test_text_clause_default_no_type(self):
self._run_test(default=text("1"))
def test_server_default(self):
self._run_test(server_default="1")
def test_server_default_no_autoincrement(self):
self._run_test(server_default="1", autoincrement=False)
def test_clause(self):
stmt = select([cast("INT_1", type_=self.MyInteger)]).as_scalar()
self._run_test(default=stmt)
@testing.requires.returning
def test_no_implicit_returning(self):
self._run_test(implicit_returning=False)
@testing.requires.returning
def test_server_default_no_implicit_returning(self):
self._run_test(server_default="1", autoincrement=False)
class ServerDefaultsOnPKTest(fixtures.TestBase):
__backend__ = True
@testing.provide_metadata
def test_string_default_none_on_insert(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column(
"y", String(10), server_default="key_one", primary_key=True
),
Column("data", String(10)),
implicit_returning=False,
)
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, [None])
eq_(list(connection.execute(t.select())), [("key_one", "data")])
@testing.requires.returning
@testing.provide_metadata
def test_string_default_on_insert_with_returning(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column(
"y", String(10), server_default="key_one", primary_key=True
),
Column("data", String(10)),
)
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, ["key_one"])
eq_(list(connection.execute(t.select())), [("key_one", "data")])
@testing.provide_metadata
def test_int_default_none_on_insert(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
assert t._autoincrement_column is None
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, [None])
if testing.against("sqlite"):
eq_(list(connection.execute(t.select())), [(1, "data")])
else:
eq_(list(connection.execute(t.select())), [(5, "data")])
@testing.provide_metadata
def test_autoincrement_reflected_from_server_default(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
assert t._autoincrement_column is None
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("x", m2, autoload_with=connection, implicit_returning=False)
assert t2._autoincrement_column is None
@testing.provide_metadata
def test_int_default_none_on_insert_reflected(self, connection):
metadata = self.metadata
Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("x", m2, autoload_with=connection, implicit_returning=False)
r = connection.execute(t2.insert(), dict(data="data"))
eq_(r.inserted_primary_key, [None])
if testing.against("sqlite"):
eq_(list(connection.execute(t2.select())), [(1, "data")])
else:
eq_(list(connection.execute(t2.select())), [(5, "data")])
@testing.requires.returning
@testing.provide_metadata
def test_int_default_on_insert_with_returning(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
)
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, [5])
eq_(list(connection.execute(t.select())), [(5, "data")])
class UnicodeDefaultsTest(fixtures.TestBase):
__backend__ = True
def test_no_default(self):
Column(Unicode(32))
def test_unicode_default(self):
default = u("foo")
Column(Unicode(32), default=default)
def test_nonunicode_default(self):
default = b("foo")
assert_raises_message(
sa.exc.SAWarning,
"Unicode column 'foobar' has non-unicode "
"default value b?'foo' specified.",
Column,
"foobar",
Unicode(32),
default=default,
)
class InsertFromSelectTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table("data", metadata, Column("x", Integer), Column("y", Integer))
@classmethod
def insert_data(cls, connection):
data = cls.tables.data
connection.execute(
data.insert(), [{"x": 2, "y": 5}, {"x": 7, "y": 12}]
)
@testing.provide_metadata
def test_insert_from_select_override_defaults(self, connection):
data = self.tables.data
table = Table(
"sometable",
self.metadata,
Column("x", Integer),
Column("foo", Integer, default=12),
Column("y", Integer),
)
table.create(connection)
sel = select([data.c.x, data.c.y])
ins = table.insert().from_select(["x", "y"], sel)
connection.execute(ins)
eq_(
list(connection.execute(table.select().order_by(table.c.x))),
[(2, 12, 5), (7, 12, 12)],
)
@testing.provide_metadata
def test_insert_from_select_fn_defaults(self, connection):
data = self.tables.data
counter = itertools.count(1)
def foo(ctx):
return next(counter)
table = Table(
"sometable",
self.metadata,
Column("x", Integer),
Column("foo", Integer, default=foo),
Column("y", Integer),
)
table.create(connection)
sel = select([data.c.x, data.c.y])
ins = table.insert().from_select(["x", "y"], sel)
connection.execute(ins)
# counter is only called once!
eq_(
list(connection.execute(table.select().order_by(table.c.x))),
[(2, 1, 5), (7, 1, 12)],
)
class CurrentParametersTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
def gen_default(context):
pass
Table(
"some_table",
metadata,
Column("x", String(50), default=gen_default),
Column("y", String(50)),
)
def _fixture(self, fn):
def gen_default(context):
fn(context)
some_table = self.tables.some_table
some_table.c.x.default.arg = gen_default
return fn
@testing.combinations(
("single", "attribute"),
("single", "method"),
("executemany", "attribute"),
("executemany", "method"),
("multivalues", "method", testing.requires.multivalues_inserts),
argnames="exec_type, usemethod",
)
def test_parameters(self, exec_type, usemethod, connection):
collect = mock.Mock()
@self._fixture
def fn(context):
collect(context.get_current_parameters())
table = self.tables.some_table
if exec_type in ("multivalues", "executemany"):
parameters = [{"y": "h1"}, {"y": "h2"}]
else:
parameters = [{"y": "hello"}]
if exec_type == "multivalues":
stmt, params = table.insert().values(parameters), {}
else:
stmt, params = table.insert(), parameters
connection.execute(stmt, params)
eq_(
collect.mock_calls,
[mock.call({"y": param["y"], "x": None}) for param in parameters],
)
| true | true |
f72f453170f50aa25b6ab839bd6aa24c9b63bd4a | 2,169 | py | Python | pkgInfo.py | smanschi/Rcs | a54980a44c7fb0e925b091cd3c29297e940af39c | [
"BSD-4-Clause"
] | null | null | null | pkgInfo.py | smanschi/Rcs | a54980a44c7fb0e925b091cd3c29297e940af39c | [
"BSD-4-Clause"
] | null | null | null | pkgInfo.py | smanschi/Rcs | a54980a44c7fb0e925b091cd3c29297e940af39c | [
"BSD-4-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020, Honda Research Institute Europe GmbH.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
name = "Rcs"
version = "1.0"
category="Libraries"
sqLevel = "basic"
sqOptOutRules = [ 'GEN04', 'C02', 'C06' ]
sqComments = { 'GEN04': 'Copyright header with BSD license not recognized by quality checker',
'C02': 'Link guards not to be used in Cpp headers ',
'C06': 'inline in template declarations should be ok '}
SQ_12 = [ 'build/bionic64/bin/TestMath -m -1 -numTests 10 -dl 1',
'build/bionic64/bin/Rcs -m 3 -iter 10 -dl 1 -valgrind',
'build/bionic64/bin/Rcs -m 6 -valgrind -nTests 10 -dl 1']
# EOF
| 40.924528 | 100 | 0.717842 |
name = "Rcs"
version = "1.0"
category="Libraries"
sqLevel = "basic"
sqOptOutRules = [ 'GEN04', 'C02', 'C06' ]
sqComments = { 'GEN04': 'Copyright header with BSD license not recognized by quality checker',
'C02': 'Link guards not to be used in Cpp headers ',
'C06': 'inline in template declarations should be ok '}
SQ_12 = [ 'build/bionic64/bin/TestMath -m -1 -numTests 10 -dl 1',
'build/bionic64/bin/Rcs -m 3 -iter 10 -dl 1 -valgrind',
'build/bionic64/bin/Rcs -m 6 -valgrind -nTests 10 -dl 1']
| true | true |
f72f454dc859e851e69564b7be10690a0e9e6c87 | 154 | py | Python | opencyb/filestorage/apps.py | Open-Cybernetics/opencyb-site | c79d61ed2d508edf304dee92d93dfbc101188c0f | [
"BSD-3-Clause"
] | 3 | 2021-07-05T15:29:42.000Z | 2021-07-22T13:11:06.000Z | opencyb/filestorage/apps.py | open-cyb/website | c79d61ed2d508edf304dee92d93dfbc101188c0f | [
"BSD-3-Clause"
] | null | null | null | opencyb/filestorage/apps.py | open-cyb/website | c79d61ed2d508edf304dee92d93dfbc101188c0f | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
class FilestorageConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'filestorage'
| 22 | 56 | 0.772727 | from django.apps import AppConfig
class FilestorageConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'filestorage'
| true | true |
f72f45f0aeacc8d7741dbd968eb4854f4f2508c2 | 1,606 | py | Python | tests/task/test_claim.py | asyncee/pycamunda | f4834d224ff99fcf80874efeaedf68a8a2efa926 | [
"MIT"
] | null | null | null | tests/task/test_claim.py | asyncee/pycamunda | f4834d224ff99fcf80874efeaedf68a8a2efa926 | [
"MIT"
] | null | null | null | tests/task/test_claim.py | asyncee/pycamunda | f4834d224ff99fcf80874efeaedf68a8a2efa926 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.task
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_claim_params(engine_url):
claim_task = pycamunda.task.Claim(url=engine_url, id_='anId', user_id='anUserId')
assert claim_task.url == engine_url + '/task/anId/claim'
assert claim_task.query_parameters() == {}
assert claim_task.body_parameters() == {'userId': 'anUserId'}
@unittest.mock.patch('requests.Session.request')
def test_claim_calls_requests(mock, engine_url):
claim_task = pycamunda.task.Claim(url=engine_url, id_='anId', user_id='anUserId')
claim_task()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'POST'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_claim_raises_pycamunda_exception(engine_url):
claim_task = pycamunda.task.Claim(url=engine_url, id_='anId', user_id='anUserId')
with pytest.raises(pycamunda.PyCamundaException):
claim_task()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_claim_raises_for_status(mock, engine_url):
claim_task = pycamunda.task.Claim(url=engine_url, id_='anId', user_id='anUserId')
claim_task()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
def test_claim_returns_none(engine_url):
claim_task = pycamunda.task.Claim(url=engine_url, id_='anId', user_id='anUserId')
result = claim_task()
assert result is None
| 32.12 | 85 | 0.754047 |
import unittest.mock
import pytest
import pycamunda.task
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_claim_params(engine_url):
claim_task = pycamunda.task.Claim(url=engine_url, id_='anId', user_id='anUserId')
assert claim_task.url == engine_url + '/task/anId/claim'
assert claim_task.query_parameters() == {}
assert claim_task.body_parameters() == {'userId': 'anUserId'}
@unittest.mock.patch('requests.Session.request')
def test_claim_calls_requests(mock, engine_url):
claim_task = pycamunda.task.Claim(url=engine_url, id_='anId', user_id='anUserId')
claim_task()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'POST'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_claim_raises_pycamunda_exception(engine_url):
claim_task = pycamunda.task.Claim(url=engine_url, id_='anId', user_id='anUserId')
with pytest.raises(pycamunda.PyCamundaException):
claim_task()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_claim_raises_for_status(mock, engine_url):
claim_task = pycamunda.task.Claim(url=engine_url, id_='anId', user_id='anUserId')
claim_task()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
def test_claim_returns_none(engine_url):
claim_task = pycamunda.task.Claim(url=engine_url, id_='anId', user_id='anUserId')
result = claim_task()
assert result is None
| true | true |
f72f47b979d68d0a61e3e6a98a9950c6cfd94e50 | 9,604 | py | Python | qcelemental/util/misc.py | bgpeyton/QCElemental | 34c259f0c759e53c2bad9aa0da1126500fe6cf75 | [
"BSD-3-Clause"
] | null | null | null | qcelemental/util/misc.py | bgpeyton/QCElemental | 34c259f0c759e53c2bad9aa0da1126500fe6cf75 | [
"BSD-3-Clause"
] | 1 | 2021-03-05T19:06:09.000Z | 2021-03-05T19:06:09.000Z | qcelemental/util/misc.py | Andrew-AbiMansour/QCElemental | 2e84cd686d5fff0fc79accb28ffa985de4684704 | [
"BSD-3-Clause"
] | null | null | null | import math
import re
from typing import Dict, List
import numpy as np
from ..physical_constants import constants
def distance_matrix(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Euclidean distance matrix between rows of arrays `a` and `b`. Equivalent to
`scipy.spatial.distance.cdist(a, b, 'euclidean')`. Returns a.shape[0] x b.shape[0] array.
"""
assert a.shape[1] == b.shape[1], """Inner dimensions do not match"""
distm = np.zeros([a.shape[0], b.shape[0]])
for i in range(a.shape[0]):
distm[i] = np.linalg.norm(a[i] - b, axis=1)
return distm
def update_with_error(a: Dict, b: Dict, path=None) -> Dict:
"""Merges `b` into `a` like dict.update; however, raises KeyError if values of a
key shared by `a` and `b` conflict.
Adapted from: https://stackoverflow.com/a/7205107
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
update_with_error(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
elif a[key] is None:
a[key] = b[key]
elif (
isinstance(a[key], (list, tuple))
and not isinstance(a[key], str)
and isinstance(b[key], (list, tuple))
and not isinstance(b[key], str)
and len(a[key]) == len(b[key])
and all((av is None or av == bv) for av, bv in zip(a[key], b[key]))
): # yapf: disable
a[key] = b[key]
else:
raise KeyError("Conflict at {}: {} vs. {}".format(".".join(path + [str(key)]), a[key], b[key]))
else:
a[key] = b[key]
return a
def standardize_efp_angles_units(units: str, geom_hints: List[List[float]]) -> List[List[float]]:
"""Applies to the pre-validated xyzabc or points hints in `geom_hints`
the libefp default (1) units of [a0] and (2) radian angle range of
(-pi, pi]. The latter is handy since this is how libefp returns hints
"""
def radrge(radang):
"""Adjust `radang` by 2pi into (-pi, pi] range."""
if radang > math.pi:
return radang - 2 * math.pi
elif radang <= -math.pi:
return radang + 2 * math.pi
else:
return radang
if units == "Angstrom":
iutau = 1.0 / constants.bohr2angstroms
else:
iutau = 1.0
hints = []
for hint in geom_hints:
if len(hint) == 6:
x, y, z = [i * iutau for i in hint[:3]]
a, b, c = [radrge(i) for i in hint[3:]]
hints.append([x, y, z, a, b, c])
if len(hint) == 9:
points = [i * iutau for i in hint]
hints.append(points)
return hints
def filter_comments(string: str) -> str:
"""Remove from `string` any Python-style comments ('#' to end of line)."""
return re.sub(r"(^|[^\\])#.*", "", string)
def unnp(dicary: Dict, _path=None, *, flat: bool = False) -> Dict:
"""Return `dicary` with any ndarray values replaced by lists.
Parameters
----------
dicary: dict
Dictionary where any internal iterables are dict or list.
flat : bool, optional
Whether the returned lists are flat or nested.
Returns
-------
dict
Input with any ndarray values replaced by lists.
"""
if _path is None:
_path = []
ndicary: Dict = {}
for k, v in dicary.items():
if isinstance(v, dict):
ndicary[k] = unnp(v, _path + [str(k)], flat=flat)
elif isinstance(v, list):
# relying on Py3.6+ ordered dict here
fakedict = {kk: vv for kk, vv in enumerate(v)}
tolisted = unnp(fakedict, _path + [str(k)], flat=flat)
ndicary[k] = list(tolisted.values())
else:
try:
v.shape
except AttributeError:
ndicary[k] = v
else:
if flat:
ndicary[k] = v.ravel().tolist()
else:
ndicary[k] = v.tolist()
return ndicary
def _norm(points) -> float:
"""
Return the Frobenius norm across axis=-1, NumPy's internal norm is crazy slow (~4x)
"""
tmp = np.atleast_2d(points)
return np.sqrt(np.einsum("ij,ij->i", tmp, tmp))
def measure_coordinates(coordinates, measurements, degrees=False):
"""
Measures a geometry array based on 0-based indices provided, automatically detects distance, angle,
and dihedral based on length of measurement input.
"""
coordinates = np.atleast_2d(coordinates)
num_coords = coordinates.shape[0]
single = False
if isinstance(measurements[0], int):
measurements = [measurements]
single = True
ret = []
for num, m in enumerate(measurements):
if any(x >= num_coords for x in m):
raise ValueError(f"An index of measurement {num} is out of bounds.")
kwargs = {}
if len(m) == 2:
func = compute_distance
elif len(m) == 3:
func = compute_angle
kwargs = {"degrees": degrees}
elif len(m) == 4:
func = compute_dihedral
kwargs = {"degrees": degrees}
else:
raise KeyError(f"Unrecognized number of arguments for measurement {num}, found {len(m)}, expected 2-4.")
val = func(*[coordinates[x] for x in m], **kwargs)
ret.append(float(val))
if single:
return ret[0]
else:
return ret
def compute_distance(points1, points2) -> np.ndarray:
"""
Computes the distance between the provided points on a per-row basis.
Parameters
----------
points1 : array-like
The first list of points, can be 1D or 2D
points2 : array-like
The second list of points, can be 1D or 2D
Returns
-------
distances : np.ndarray
The array of distances between points1 and points2
Notes
-----
Units are not considered inside these expressions, please preconvert to the same units before using.
See Also
--------
distance_matrix
Computes the distance between the provided points in all rows.
compute_distance result is the diagonal of the distance_matrix result.
"""
points1 = np.atleast_2d(points1)
points2 = np.atleast_2d(points2)
return _norm(points1 - points2)
def compute_angle(points1, points2, points3, *, degrees: bool = False) -> np.ndarray:
"""
Computes the angle (p1, p2 [vertex], p3) between the provided points on a per-row basis.
Parameters
----------
points1 : np.ndarray
The first list of points, can be 1D or 2D
points2 : np.ndarray
The second list of points, can be 1D or 2D
points3 : np.ndarray
The third list of points, can be 1D or 2D
degrees : bool, options
Returns the angle in degrees rather than radians if True
Returns
-------
angles : np.ndarray
The angle between the three points in radians
Notes
-----
Units are not considered inside these expressions, please preconvert to the same units before using.
"""
points1 = np.atleast_2d(points1)
points2 = np.atleast_2d(points2)
points3 = np.atleast_2d(points3)
v12 = points1 - points2
v23 = points2 - points3
denom = _norm(v12) * _norm(v23)
cosine_angle = np.einsum("ij,ij->i", v12, v23) / denom
angle = np.pi - np.arccos(cosine_angle)
if degrees:
return np.degrees(angle)
else:
return angle
def compute_dihedral(points1, points2, points3, points4, *, degrees: bool = False) -> np.ndarray:
"""
Computes the dihedral angle (p1, p2, p3, p4) between the provided points on a per-row basis using the Praxeolitic formula.
Parameters
----------
points1 : np.ndarray
The first list of points, can be 1D or 2D
points2 : np.ndarray
The second list of points, can be 1D or 2D
points3 : np.ndarray
The third list of points, can be 1D or 2D
points4 : np.ndarray
The third list of points, can be 1D or 2D
degrees : bool, options
Returns the dihedral angle in degrees rather than radians if True
Returns
-------
dihedrals : np.ndarray
The dihedral angle between the four points in radians
Notes
-----
Units are not considered inside these expressions, please preconvert to the same units before using.
"""
# FROM: https://stackoverflow.com/questions/20305272/
points1 = np.atleast_2d(points1)
points2 = np.atleast_2d(points2)
points3 = np.atleast_2d(points3)
points4 = np.atleast_2d(points4)
# Build the three vectors
v1 = -1.0 * (points2 - points1)
v2 = points3 - points2
v3 = points4 - points3
# Normalize the central vector
v2 = v2 / _norm(v2)
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
v = v1 - np.einsum("ij,ij->i", v1, v1) * v2
w = v3 - np.einsum("ij,ij->i", v3, v2) * v2
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = np.einsum("ij,ij->i", v, w)
y = np.einsum("ij,ij->i", np.cross(v2, v), w)
angle = np.arctan2(y, x)
if degrees:
return np.degrees(angle)
else:
return angle
| 29.919003 | 126 | 0.585173 | import math
import re
from typing import Dict, List
import numpy as np
from ..physical_constants import constants
def distance_matrix(a: np.ndarray, b: np.ndarray) -> np.ndarray:
assert a.shape[1] == b.shape[1], """Inner dimensions do not match"""
distm = np.zeros([a.shape[0], b.shape[0]])
for i in range(a.shape[0]):
distm[i] = np.linalg.norm(a[i] - b, axis=1)
return distm
def update_with_error(a: Dict, b: Dict, path=None) -> Dict:
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
update_with_error(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass
elif a[key] is None:
a[key] = b[key]
elif (
isinstance(a[key], (list, tuple))
and not isinstance(a[key], str)
and isinstance(b[key], (list, tuple))
and not isinstance(b[key], str)
and len(a[key]) == len(b[key])
and all((av is None or av == bv) for av, bv in zip(a[key], b[key]))
):
a[key] = b[key]
else:
raise KeyError("Conflict at {}: {} vs. {}".format(".".join(path + [str(key)]), a[key], b[key]))
else:
a[key] = b[key]
return a
def standardize_efp_angles_units(units: str, geom_hints: List[List[float]]) -> List[List[float]]:
def radrge(radang):
if radang > math.pi:
return radang - 2 * math.pi
elif radang <= -math.pi:
return radang + 2 * math.pi
else:
return radang
if units == "Angstrom":
iutau = 1.0 / constants.bohr2angstroms
else:
iutau = 1.0
hints = []
for hint in geom_hints:
if len(hint) == 6:
x, y, z = [i * iutau for i in hint[:3]]
a, b, c = [radrge(i) for i in hint[3:]]
hints.append([x, y, z, a, b, c])
if len(hint) == 9:
points = [i * iutau for i in hint]
hints.append(points)
return hints
def filter_comments(string: str) -> str:
return re.sub(r"(^|[^\\])#.*", "", string)
def unnp(dicary: Dict, _path=None, *, flat: bool = False) -> Dict:
if _path is None:
_path = []
ndicary: Dict = {}
for k, v in dicary.items():
if isinstance(v, dict):
ndicary[k] = unnp(v, _path + [str(k)], flat=flat)
elif isinstance(v, list):
fakedict = {kk: vv for kk, vv in enumerate(v)}
tolisted = unnp(fakedict, _path + [str(k)], flat=flat)
ndicary[k] = list(tolisted.values())
else:
try:
v.shape
except AttributeError:
ndicary[k] = v
else:
if flat:
ndicary[k] = v.ravel().tolist()
else:
ndicary[k] = v.tolist()
return ndicary
def _norm(points) -> float:
tmp = np.atleast_2d(points)
return np.sqrt(np.einsum("ij,ij->i", tmp, tmp))
def measure_coordinates(coordinates, measurements, degrees=False):
coordinates = np.atleast_2d(coordinates)
num_coords = coordinates.shape[0]
single = False
if isinstance(measurements[0], int):
measurements = [measurements]
single = True
ret = []
for num, m in enumerate(measurements):
if any(x >= num_coords for x in m):
raise ValueError(f"An index of measurement {num} is out of bounds.")
kwargs = {}
if len(m) == 2:
func = compute_distance
elif len(m) == 3:
func = compute_angle
kwargs = {"degrees": degrees}
elif len(m) == 4:
func = compute_dihedral
kwargs = {"degrees": degrees}
else:
raise KeyError(f"Unrecognized number of arguments for measurement {num}, found {len(m)}, expected 2-4.")
val = func(*[coordinates[x] for x in m], **kwargs)
ret.append(float(val))
if single:
return ret[0]
else:
return ret
def compute_distance(points1, points2) -> np.ndarray:
points1 = np.atleast_2d(points1)
points2 = np.atleast_2d(points2)
return _norm(points1 - points2)
def compute_angle(points1, points2, points3, *, degrees: bool = False) -> np.ndarray:
points1 = np.atleast_2d(points1)
points2 = np.atleast_2d(points2)
points3 = np.atleast_2d(points3)
v12 = points1 - points2
v23 = points2 - points3
denom = _norm(v12) * _norm(v23)
cosine_angle = np.einsum("ij,ij->i", v12, v23) / denom
angle = np.pi - np.arccos(cosine_angle)
if degrees:
return np.degrees(angle)
else:
return angle
def compute_dihedral(points1, points2, points3, points4, *, degrees: bool = False) -> np.ndarray:
points1 = np.atleast_2d(points1)
points2 = np.atleast_2d(points2)
points3 = np.atleast_2d(points3)
points4 = np.atleast_2d(points4)
v1 = -1.0 * (points2 - points1)
v2 = points3 - points2
v3 = points4 - points3
v2 = v2 / _norm(v2)
v = v1 - np.einsum("ij,ij->i", v1, v1) * v2
w = v3 - np.einsum("ij,ij->i", v3, v2) * v2
x = np.einsum("ij,ij->i", v, w)
y = np.einsum("ij,ij->i", np.cross(v2, v), w)
angle = np.arctan2(y, x)
if degrees:
return np.degrees(angle)
else:
return angle
| true | true |
f72f47d34b25fb540a99e2e2b836f2ad27836c97 | 3,066 | py | Python | tools/model_converters/regnet2mmdet.py | hyperlist/mmdetection | ba4918de7fb21a96edc373584fa21a17d098a843 | [
"Apache-2.0"
] | null | null | null | tools/model_converters/regnet2mmdet.py | hyperlist/mmdetection | ba4918de7fb21a96edc373584fa21a17d098a843 | [
"Apache-2.0"
] | null | null | null | tools/model_converters/regnet2mmdet.py | hyperlist/mmdetection | ba4918de7fb21a96edc373584fa21a17d098a843 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import paddle
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model = paddle.load(src)
blobs = regnet_model['model_state']
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
paddle.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 33.692308 | 77 | 0.647097 |
import argparse
from collections import OrderedDict
import paddle
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
regnet_model = paddle.load(src)
blobs = regnet_model['model_state']
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
checkpoint = dict()
checkpoint['state_dict'] = state_dict
paddle.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| true | true |
f72f47e775fcad2df9a2fb96e48014dd4b9267f7 | 1,543 | py | Python | src/py_mplus/objects/manga/title/__init__.py | SThomasP/py_mplus | 6d9c3075ae10b30a2482f3367f83f58037a04116 | [
"MIT"
] | null | null | null | src/py_mplus/objects/manga/title/__init__.py | SThomasP/py_mplus | 6d9c3075ae10b30a2482f3367f83f58037a04116 | [
"MIT"
] | null | null | null | src/py_mplus/objects/manga/title/__init__.py | SThomasP/py_mplus | 6d9c3075ae10b30a2482f3367f83f58037a04116 | [
"MIT"
] | null | null | null | from py_mplus.objects import MPObject
LANGUAGES = ['ENGLISH', 'SPANISH']
class Title(MPObject):
def _decode(self, buffer, category, skip):
if category == 1:
self.title_id = buffer.uint32()
elif category == 2:
self.name = buffer.string()
elif category == 3:
self.author = buffer.string()
elif category == 4:
self.portrait_image = buffer.string() # subtitle
elif category == 5:
self.landscape_image = buffer.string() # thumbnail
elif category == 6:
self.view_count = buffer.uint32()
elif category == 7:
self.language = buffer.int32() # end date
else:
buffer.skip_type(skip)
'''
e.decode = function (e, t) {
e instanceof x || (e = x.create(e));
var n = void 0 === t ? e.len : e.pos + t,
r = new R.Proto.Title;
while (e.pos < n) {
var a = e.uint32();
switch (a >>> 3) {
case 1:
r.titleId = e.uint32();
break;
case 2:
r.name = e.string();
break;
case 3:
r.author = e.string();
break;
case 4:
r.portraitImageUrl = e.string();
break;
case 5:
r.landscapeImageUrl = e.string();
break;
case 6:
r.viewCount = e.uint32();
break;
case 7:
r.language = e.int32();
break;
default:
e.skipType(7 & a);
break
}
}
return r
''' | 25.716667 | 63 | 0.480233 | from py_mplus.objects import MPObject
LANGUAGES = ['ENGLISH', 'SPANISH']
class Title(MPObject):
def _decode(self, buffer, category, skip):
if category == 1:
self.title_id = buffer.uint32()
elif category == 2:
self.name = buffer.string()
elif category == 3:
self.author = buffer.string()
elif category == 4:
self.portrait_image = buffer.string()
elif category == 5:
self.landscape_image = buffer.string()
elif category == 6:
self.view_count = buffer.uint32()
elif category == 7:
self.language = buffer.int32()
else:
buffer.skip_type(skip)
| true | true |
f72f487d46b2bfc1cb13b07d80c29f8c53395833 | 66 | py | Python | agents/agent_mct/__init__.py | elena-rae/connect4_bccn_pcp | ffa1c1fbd61a28901d67bdeb33250ae52c35c296 | [
"MIT"
] | null | null | null | agents/agent_mct/__init__.py | elena-rae/connect4_bccn_pcp | ffa1c1fbd61a28901d67bdeb33250ae52c35c296 | [
"MIT"
] | null | null | null | agents/agent_mct/__init__.py | elena-rae/connect4_bccn_pcp | ffa1c1fbd61a28901d67bdeb33250ae52c35c296 | [
"MIT"
] | null | null | null | from .montecarlo import generate_move_montecarlo as generate_move
| 33 | 65 | 0.893939 | from .montecarlo import generate_move_montecarlo as generate_move
| true | true |
f72f48c024608bb44e33fab35985d824af006978 | 6,490 | py | Python | conduit/fair/models/gpd.py | DavidHurst/palbolts | 72f9ca3f82499b532f14d0e797426e1b425d3efe | [
"MIT"
] | null | null | null | conduit/fair/models/gpd.py | DavidHurst/palbolts | 72f9ca3f82499b532f14d0e797426e1b425d3efe | [
"MIT"
] | null | null | null | conduit/fair/models/gpd.py | DavidHurst/palbolts | 72f9ca3f82499b532f14d0e797426e1b425d3efe | [
"MIT"
] | 1 | 2021-09-07T14:55:16.000Z | 2021-09-07T14:55:16.000Z | """Zhang Gradient Projection Debiasing Baseline Model."""
from __future__ import annotations
from typing import NamedTuple, cast
import ethicml as em
from kit import implements
from kit.torch import CrossEntropyLoss, TrainingMode
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.utilities.types import EPOCH_OUTPUT
import torch
from torch import Tensor, nn
from torch.optim.optimizer import Optimizer
from conduit.data.structures import TernarySample
from conduit.models.base import CdtModel
from conduit.models.utils import aggregate_over_epoch, prediction, prefix_keys
from conduit.types import LRScheduler, Stage
__all__ = ["GPD"]
def compute_proj_grads(*, model: nn.Module, loss_p: Tensor, loss_a: Tensor, alpha: float) -> None:
"""Computes the adversarial-gradient projection term.
:param model: Model whose parameters the gradients are to be computed w.r.t.
:param loss_p: Prediction loss.
:param loss_a: Adversarial loss.
:param alpha: Pre-factor for adversarial loss.
"""
grad_p = torch.autograd.grad(loss_p, tuple(model.parameters()), retain_graph=True)
grad_a = torch.autograd.grad(loss_a, tuple(model.parameters()), retain_graph=True)
def _proj(a: Tensor, b: Tensor) -> Tensor:
return b * torch.sum(a * b) / torch.sum(b * b).clamp(min=torch.finfo(b.dtype).eps)
grad_p = [p - _proj(p, a) - alpha * a for p, a in zip(grad_p, grad_a)]
for param, grad in zip(model.parameters(), grad_p):
param.grad = grad
def compute_grad(*, model: nn.Module, loss: Tensor) -> None:
"""Computes the adversarial gradient projection term.
:param model: Model whose parameters the gradients are to be computed w.r.t.
:param loss: Adversarial loss.
"""
grad_list = torch.autograd.grad(loss, tuple(model.parameters()), retain_graph=True)
for param, grad in zip(model.parameters(), grad_list):
param.grad = grad
class ModelOut(NamedTuple):
s: Tensor
y: Tensor
class GPD(CdtModel):
"""Zhang Mitigating Unwanted Biases."""
def __init__(
self,
*,
adv: nn.Module,
enc: nn.Module,
clf: nn.Module,
lr: float = 3.0e-4,
weight_decay: float = 0.0,
lr_initial_restart: int = 10,
lr_restart_mult: int = 2,
lr_sched_interval: TrainingMode = TrainingMode.epoch,
lr_sched_freq: int = 1,
) -> None:
super().__init__(
lr=lr,
weight_decay=weight_decay,
lr_initial_restart=lr_initial_restart,
lr_restart_mult=lr_restart_mult,
lr_sched_interval=lr_sched_interval,
lr_sched_freq=lr_sched_freq,
)
self.adv = adv
self.enc = enc
self.clf = clf
self._loss_adv_fn = CrossEntropyLoss()
self._loss_clf_fn = CrossEntropyLoss()
self.automatic_optimization = False # Mark for manual optimization
@implements(CdtModel)
@torch.no_grad()
def inference_step(self, batch: TernarySample, *, stage: Stage) -> dict[str, Tensor]:
assert isinstance(batch.x, Tensor)
model_out = self.forward(batch.x)
loss_adv, loss_clf, loss = self._get_losses(model_out=model_out, batch=batch)
logging_dict = {
"loss": loss.item(),
"loss_adv": loss_adv.item(),
"loss_clf": loss_clf.item(),
}
logging_dict = prefix_keys(dict_=logging_dict, prefix=str(stage), sep="/")
self.log_dict(logging_dict)
return {
"targets": batch.y.view(-1),
"subgroup_inf": batch.s.view(-1),
"logits_y": model_out.y,
}
@implements(CdtModel)
def inference_epoch_end(self, outputs: EPOCH_OUTPUT, stage: Stage) -> dict[str, float]:
targets_all = aggregate_over_epoch(outputs=outputs, metric="targets")
subgroup_inf_all = aggregate_over_epoch(outputs=outputs, metric="subgroup_inf")
logits_y_all = aggregate_over_epoch(outputs=outputs, metric="logits_y")
preds_y_all = prediction(logits_y_all)
dt = em.DataTuple(
x=pd.DataFrame(
torch.rand_like(subgroup_inf_all).detach().cpu().numpy(),
columns=["x0"],
),
s=pd.DataFrame(subgroup_inf_all.detach().cpu().numpy(), columns=["s"]),
y=pd.DataFrame(targets_all.detach().cpu().numpy(), columns=["y"]),
)
return em.run_metrics(
predictions=em.Prediction(hard=pd.Series(preds_y_all.detach().cpu().numpy())),
actual=dt,
metrics=[em.Accuracy(), em.RenyiCorrelation(), em.Yanovich()],
per_sens_metrics=[em.Accuracy(), em.ProbPos(), em.TPR()],
)
def _get_losses(
self, model_out: ModelOut, *, batch: TernarySample
) -> tuple[Tensor, Tensor, Tensor]:
loss_adv = self._loss_adv_fn(model_out.s, target=batch.s)
loss_clf = self._loss_clf_fn(model_out.y, target=batch.y)
return loss_adv, loss_clf, loss_adv + loss_clf
@implements(pl.LightningModule)
def training_step(self, batch: TernarySample, batch_idx: int) -> None:
assert isinstance(batch.x, Tensor)
opt = cast(Optimizer, self.optimizers())
opt.zero_grad()
model_out: ModelOut = self.forward(batch.x)
loss_adv, loss_clf, loss = self._get_losses(model_out=model_out, batch=batch)
logging_dict = {
"adv_loss": loss_adv.item(),
"clf_loss": loss_clf.item(),
"loss": loss.item(),
}
logging_dict = prefix_keys(dict_=logging_dict, prefix="train", sep="/")
self.log_dict(logging_dict)
compute_proj_grads(model=self.enc, loss_p=loss_clf, loss_a=loss_adv, alpha=1.0)
compute_grad(model=self.adv, loss=loss_adv)
compute_grad(model=self.clf, loss=loss_clf)
opt.step()
if (self.lr_sched_interval is TrainingMode.step) and (
self.global_step % self.lr_sched_freq == 0
):
sch = cast(LRScheduler, self.lr_schedulers())
sch.step()
if (self.lr_sched_interval is TrainingMode.epoch) and self.trainer.is_last_batch:
sch = cast(LRScheduler, self.lr_schedulers())
sch.step()
@implements(nn.Module)
def forward(self, x: Tensor) -> ModelOut:
embedding = self.enc(x)
y_pred = self.clf(embedding)
s_pred = self.adv(embedding)
return ModelOut(y=y_pred, s=s_pred)
| 35.271739 | 98 | 0.643297 | from __future__ import annotations
from typing import NamedTuple, cast
import ethicml as em
from kit import implements
from kit.torch import CrossEntropyLoss, TrainingMode
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.utilities.types import EPOCH_OUTPUT
import torch
from torch import Tensor, nn
from torch.optim.optimizer import Optimizer
from conduit.data.structures import TernarySample
from conduit.models.base import CdtModel
from conduit.models.utils import aggregate_over_epoch, prediction, prefix_keys
from conduit.types import LRScheduler, Stage
__all__ = ["GPD"]
def compute_proj_grads(*, model: nn.Module, loss_p: Tensor, loss_a: Tensor, alpha: float) -> None:
grad_p = torch.autograd.grad(loss_p, tuple(model.parameters()), retain_graph=True)
grad_a = torch.autograd.grad(loss_a, tuple(model.parameters()), retain_graph=True)
def _proj(a: Tensor, b: Tensor) -> Tensor:
return b * torch.sum(a * b) / torch.sum(b * b).clamp(min=torch.finfo(b.dtype).eps)
grad_p = [p - _proj(p, a) - alpha * a for p, a in zip(grad_p, grad_a)]
for param, grad in zip(model.parameters(), grad_p):
param.grad = grad
def compute_grad(*, model: nn.Module, loss: Tensor) -> None:
grad_list = torch.autograd.grad(loss, tuple(model.parameters()), retain_graph=True)
for param, grad in zip(model.parameters(), grad_list):
param.grad = grad
class ModelOut(NamedTuple):
s: Tensor
y: Tensor
class GPD(CdtModel):
def __init__(
self,
*,
adv: nn.Module,
enc: nn.Module,
clf: nn.Module,
lr: float = 3.0e-4,
weight_decay: float = 0.0,
lr_initial_restart: int = 10,
lr_restart_mult: int = 2,
lr_sched_interval: TrainingMode = TrainingMode.epoch,
lr_sched_freq: int = 1,
) -> None:
super().__init__(
lr=lr,
weight_decay=weight_decay,
lr_initial_restart=lr_initial_restart,
lr_restart_mult=lr_restart_mult,
lr_sched_interval=lr_sched_interval,
lr_sched_freq=lr_sched_freq,
)
self.adv = adv
self.enc = enc
self.clf = clf
self._loss_adv_fn = CrossEntropyLoss()
self._loss_clf_fn = CrossEntropyLoss()
self.automatic_optimization = False
@implements(CdtModel)
@torch.no_grad()
def inference_step(self, batch: TernarySample, *, stage: Stage) -> dict[str, Tensor]:
assert isinstance(batch.x, Tensor)
model_out = self.forward(batch.x)
loss_adv, loss_clf, loss = self._get_losses(model_out=model_out, batch=batch)
logging_dict = {
"loss": loss.item(),
"loss_adv": loss_adv.item(),
"loss_clf": loss_clf.item(),
}
logging_dict = prefix_keys(dict_=logging_dict, prefix=str(stage), sep="/")
self.log_dict(logging_dict)
return {
"targets": batch.y.view(-1),
"subgroup_inf": batch.s.view(-1),
"logits_y": model_out.y,
}
@implements(CdtModel)
def inference_epoch_end(self, outputs: EPOCH_OUTPUT, stage: Stage) -> dict[str, float]:
targets_all = aggregate_over_epoch(outputs=outputs, metric="targets")
subgroup_inf_all = aggregate_over_epoch(outputs=outputs, metric="subgroup_inf")
logits_y_all = aggregate_over_epoch(outputs=outputs, metric="logits_y")
preds_y_all = prediction(logits_y_all)
dt = em.DataTuple(
x=pd.DataFrame(
torch.rand_like(subgroup_inf_all).detach().cpu().numpy(),
columns=["x0"],
),
s=pd.DataFrame(subgroup_inf_all.detach().cpu().numpy(), columns=["s"]),
y=pd.DataFrame(targets_all.detach().cpu().numpy(), columns=["y"]),
)
return em.run_metrics(
predictions=em.Prediction(hard=pd.Series(preds_y_all.detach().cpu().numpy())),
actual=dt,
metrics=[em.Accuracy(), em.RenyiCorrelation(), em.Yanovich()],
per_sens_metrics=[em.Accuracy(), em.ProbPos(), em.TPR()],
)
def _get_losses(
self, model_out: ModelOut, *, batch: TernarySample
) -> tuple[Tensor, Tensor, Tensor]:
loss_adv = self._loss_adv_fn(model_out.s, target=batch.s)
loss_clf = self._loss_clf_fn(model_out.y, target=batch.y)
return loss_adv, loss_clf, loss_adv + loss_clf
@implements(pl.LightningModule)
def training_step(self, batch: TernarySample, batch_idx: int) -> None:
assert isinstance(batch.x, Tensor)
opt = cast(Optimizer, self.optimizers())
opt.zero_grad()
model_out: ModelOut = self.forward(batch.x)
loss_adv, loss_clf, loss = self._get_losses(model_out=model_out, batch=batch)
logging_dict = {
"adv_loss": loss_adv.item(),
"clf_loss": loss_clf.item(),
"loss": loss.item(),
}
logging_dict = prefix_keys(dict_=logging_dict, prefix="train", sep="/")
self.log_dict(logging_dict)
compute_proj_grads(model=self.enc, loss_p=loss_clf, loss_a=loss_adv, alpha=1.0)
compute_grad(model=self.adv, loss=loss_adv)
compute_grad(model=self.clf, loss=loss_clf)
opt.step()
if (self.lr_sched_interval is TrainingMode.step) and (
self.global_step % self.lr_sched_freq == 0
):
sch = cast(LRScheduler, self.lr_schedulers())
sch.step()
if (self.lr_sched_interval is TrainingMode.epoch) and self.trainer.is_last_batch:
sch = cast(LRScheduler, self.lr_schedulers())
sch.step()
@implements(nn.Module)
def forward(self, x: Tensor) -> ModelOut:
embedding = self.enc(x)
y_pred = self.clf(embedding)
s_pred = self.adv(embedding)
return ModelOut(y=y_pred, s=s_pred)
| true | true |
f72f4a6eaf811e134acc43e15c1fe1b2c788076f | 1,416 | py | Python | src/GUI_Elements/Entries.py | cedric-romain/lins | 1ccae895a8393b90422ff859a756a2c11ee585b0 | [
"MIT"
] | null | null | null | src/GUI_Elements/Entries.py | cedric-romain/lins | 1ccae895a8393b90422ff859a756a2c11ee585b0 | [
"MIT"
] | null | null | null | src/GUI_Elements/Entries.py | cedric-romain/lins | 1ccae895a8393b90422ff859a756a2c11ee585b0 | [
"MIT"
] | null | null | null | import tkinter as tk
class MyEntry:
def __init__(self, root, **kwargs):
self.root = root
self.value = None
self.frame = tk.Frame(self.root)
self.frame.pack(anchor="nw")
self.kwargs = kwargs
self.title_label = tk.Label(self.frame, text=self.parse_title(), anchor='w', width=18)
self.title_label.pack(side=tk.LEFT)
self.entry_payload = self.create_and_get_entry_field()
self.output_label = tk.Label(self.frame, text="")
self.output_label.pack(side=tk.LEFT, padx=5)
# Create and empty Label to put the result in
self.output_label.pack(fill=tk.X)
def create_and_get_entry_field(self):
entry = tk.Entry(self.frame, width=4)
entry.pack(side=tk.LEFT, padx=5)
if "focus" in self.kwargs:
if self.kwargs["focus"]:
entry.focus()
entry.bind("<Return>", self.return_entry) # bind the return / enter key to action
return entry
def parse_title(self):
if "title" in self.kwargs:
return self.kwargs["title"]
else:
return "Title missing!"
def return_entry(self, arg=None):
"""Gets the result from Entry and return it to the Label"""
result = self.entry_payload.get()
self.output_label.config(text=result)
self.value = result
self.entry_payload.delete(0, tk.END)
| 30.782609 | 94 | 0.610876 | import tkinter as tk
class MyEntry:
def __init__(self, root, **kwargs):
self.root = root
self.value = None
self.frame = tk.Frame(self.root)
self.frame.pack(anchor="nw")
self.kwargs = kwargs
self.title_label = tk.Label(self.frame, text=self.parse_title(), anchor='w', width=18)
self.title_label.pack(side=tk.LEFT)
self.entry_payload = self.create_and_get_entry_field()
self.output_label = tk.Label(self.frame, text="")
self.output_label.pack(side=tk.LEFT, padx=5)
self.output_label.pack(fill=tk.X)
def create_and_get_entry_field(self):
entry = tk.Entry(self.frame, width=4)
entry.pack(side=tk.LEFT, padx=5)
if "focus" in self.kwargs:
if self.kwargs["focus"]:
entry.focus()
entry.bind("<Return>", self.return_entry)
return entry
def parse_title(self):
if "title" in self.kwargs:
return self.kwargs["title"]
else:
return "Title missing!"
def return_entry(self, arg=None):
result = self.entry_payload.get()
self.output_label.config(text=result)
self.value = result
self.entry_payload.delete(0, tk.END)
| true | true |
f72f4a775d0236aea7d4fb2265ce24f07fab6865 | 22,040 | py | Python | primary_info.py | Tulioas/dfp_analyser | d66ff94ba0b88a5d421a992ad27661011db36091 | [
"MIT"
] | 2 | 2022-02-09T03:27:17.000Z | 2022-02-09T03:27:23.000Z | primary_info.py | Tulioas/dfp_analyser | d66ff94ba0b88a5d421a992ad27661011db36091 | [
"MIT"
] | null | null | null | primary_info.py | Tulioas/dfp_analyser | d66ff94ba0b88a5d421a992ad27661011db36091 | [
"MIT"
] | null | null | null | import pandas as pd
from zipfile import ZipFile
import numpy as np
import re
import os
def year_identifier(file_name):
'''
Abstrait: identify the year of the file
'''
folder_regex = re.compile(r'20\d\d')
match = folder_regex.search(str(file_name))
year = match.group()
return year
def debt_correction(dataframe):
debt_ident_list = ['Empréstimos e Financiamentos']
lpa_ident_list = ['ON']
count_debt = 1
count_lpa = 1
for row in range(len(dataframe)):
for col in range(len(dataframe.columns)):
if dataframe.iloc[row,col] in debt_ident_list:
prev_name = dataframe.iloc[row,col]
dataframe.iat[row, col] = f'{prev_name} {count_debt}'
count_debt += 1
if dataframe.iloc[row,col] in lpa_ident_list:
prev_name = dataframe.iloc[row,col]
dataframe.iat[row, col] = f'{prev_name} {count_lpa}'
count_lpa += 1
return dataframe
def dataframe_filtering(folder, file_name_list, company_list, prev=False):
'''
Input: folder name, list with important files in the folder and list with companies of interest
Output:
'''
dataframe_general = []
for company in company_list:
dataframe_company = []
dataframe_list = []
for file in file_name_list:
# Create BPA DataFrame
file_raw = pd.read_csv(f'raw_dfp\\{folder}\\{file}', encoding='iso-8859-1', delimiter=';', skiprows=0, low_memory=False)
# Filter year and last year results
if prev is False:
file_1 = file_raw[~file_raw['ORDEM_EXERC'].str.startswith('P')]
folder_year = year_identifier(file_name_list)
else:
file_1 = file_raw[file_raw['ORDEM_EXERC'].str.startswith('P')]
folder_year = int(year_identifier(file_name_list)) - 1
# Filter the right columns
file_2 = file_1[['DENOM_CIA', 'CD_CONTA','DS_CONTA', 'VL_CONTA']]
# Filter the right companies
file_3 = file_2[file_2['DENOM_CIA'].isin([company])]
# Filter the right data
if file.find('DRE') != -1:
interest_data = ['Receita de Venda de Bens e/ou Serviços', 'Resultado Bruto', 'Despesas com Vendas', 'Despesas com Pesquisa e Desenvolvimento',
'Custos com Pesquisa e Desenvolvimento', 'Despesas com pesquisas e desenvolvimento', 'Pesquisa e Desenvolvimento', 'Pesquisa', 'Despesas com Pesquisas e Desenvolvimento',
'Custo com Pesquisa e Desenvolvimento Tecnológico', 'Despesas com gastos com desenvolvimento', 'Despesas com desenvolvimento de tecnologia e produtos', 'Com estudos em desenvolvimento',
'Despesas Gerais e Administrativas', 'Despesas de Depreciação', 'Despesas/Receitas Operacionais',
'Resultado Antes do Resultado Financeiro e dos Tributos', 'Resultado Financeiro', 'Resultado Antes dos Tributos sobre o Lucro',
'Resultado Líquido das Operações Continuadas', 'Lucro Básico por Ação', 'ON']
elif file.find('BPA') != -1:
interest_data = ['Ativo Total', 'Ativo Circulante', 'Imobilizado']
elif file.find('BPP') != -1:
interest_data = ['Passivo Circulante', 'Empréstimos e Financiamentos', 'Passivo Não Circulante', 'Patrimônio Líquido Consolidado',
'Reservas de Lucros', 'Lucros/Prejuízos Acumulados']
elif file.find('DFC_MI') != -1:
interest_data = ['Lucro Líquido do exercício', 'Depreciação, Amortização e Impairment', 'Depreciação e amortização', 'Depreciação de arrendamento', 'Depreciação e Amortização', 'Depreciações e Amortizações', 'Amortização e Depreciação', 'Depreciação/amortização', 'Depreciações', 'Depreciação e Amortizações', 'Depreciação do imobilizado', 'Depreciação e depleção do imobilizado', 'Depreciação, exaustão e amortização', 'Depreciação, Amortização e Exaustão', 'Depreciação, Exaustão e Amortização',
'Aquisição de Imobilizado e Intangíveis', 'Adições de imobilizado', 'Compras de ativo imobilizado', 'Aquisições de imobilizado', 'Aquisições de Imobilizado',
'Aquisições de Imobilizado e Intangível', 'Aquisições de imobilizado e intangível', 'Aquisições de Imobilizados e Intangíveis (Exceto pelo Excedente de Cessão Onerosa)',
'Aquisições de imobilizados e intangíveis', 'Aquisições de imobilizado veículos frota', 'Aquisições de imobilizado de uso', 'Aquisições de Imobilizado de Uso', 'Aquisição de ativos imobilizados, intangível e propriedade para investimento', 'Aquisição de imobilizado e intangível']
file_4 = file_3[file_3['DS_CONTA'].isin(interest_data)]
dataframe_list.append(file_4)
# Concatenate each file dataframe into one and add year column
dataframe_company = pd.concat(dataframe_list)
dataframe_company = dataframe_company.rename(columns={"VL_CONTA": f"{folder_year}"})
# Append to general list
dataframe_general.append(dataframe_company)
return dataframe_general
def primary_info(companies, clear_prev_folder=False):
company_frames = []
for company in companies:
company_frames.append(pd.DataFrame())
# Identify zip year
for file in os.listdir('raw_dfp\\raw_zip'):
zip_year = year_identifier(f'raw_dfp\\raw_zip\\{file}')
# Create or clear the folder of the year
output_folder = zip_year
directory_elements = os.listdir('raw_dfp')
if output_folder not in directory_elements:
os.mkdir(f'raw_dfp\\{output_folder}')
elif os.listdir(f'raw_dfp\\{output_folder}') != [] and clear_prev_folder is True:
output_folder_elements = os.listdir(f'raw_dfp\\{output_folder}')
for element in output_folder_elements:
os.remove(f'raw_dfp\\{output_folder}\\{element}')
# Extract files from zip
if os.listdir(f'raw_dfp\\{output_folder}') == []:
with ZipFile(f'raw_dfp\\raw_zip\\{file}', 'r') as zip:
zip.extractall(path=f'raw_dfp\\{output_folder}')
else:
print(f"A pasta \"raw_dfp/{zip_year}\" ja tem arquivos internos. Confira a necessidade de descompactar o .zip.")
print('Prosseguindo ...')
# List folders in 'raw_dfp' and remove 'raw_zip'
raw_folders = os.listdir('raw_dfp')
raw_folders.remove('raw_zip')
# Travel around raw_dfp folders excluding "raw_zip"
for folder in raw_folders:
# Remove all individual reports, aiming only consolidated reports
file_list = os.listdir(f'raw_dfp\\{folder}')
for file in file_list:
file_regex = re.compile(r'ind_20\d\d')
mo = file_regex.search(str(file))
if mo is not None:
os.remove(f'raw_dfp\\{folder}\\{file}')
# Travel around folder files
for file in file_list:
# Save DRE file name in a variable
dre_regex = re.compile(r'DRE_con_20\d\d')
mo_dre = dre_regex.search(str(file))
if mo_dre is not None:
dre = file
# Save BPA file name in a variable
bpa_regex = re.compile(r'BPA_con_20\d\d')
mo_bpa = bpa_regex.search(str(file))
if mo_bpa is not None:
bpa = file
# Save BPP file name in a variable
bpp_regex = re.compile(r'BPP_con_20\d\d')
mo_bpp = bpp_regex.search(str(file))
if mo_bpp is not None:
bpp = file
# Save DFC_MI file name in a variable
dfc_regex = re.compile(r'DFC_MI_con_20\d\d')
mo_dfc = dfc_regex.search(str(file))
if mo_dfc is not None:
dfc = file
folder_list = dataframe_filtering(folder, [dre, bpa, bpp, dfc], companies)
# Create datframe for 2016 based on 2017 folder
if int(folder) == 2017:
folder_list_2 = dataframe_filtering(folder, [dre, bpa, bpp, dfc], companies, prev=True)
for company_index in range(len(companies)):
if len(folder_list_2[company_index]) == 0: # Do not add empty dataframe
pass
else:
company_frames[company_index] = debt_correction(folder_list_2[company_index])
# Construct and append a final dataframe for each company with all years information
for company_index in range(len(companies)):
if len(folder_list[company_index]) == 0:
pass
elif len(company_frames[company_index]) == 0:
company_frames[company_index] = debt_correction(folder_list[company_index])
else:
main = company_frames[company_index]
serie_corrected = debt_correction(folder_list[company_index][['DS_CONTA', str(folder)]])
serie = serie_corrected.set_index('DS_CONTA')
#serie_no_dups = serie
company_frames[company_index] = pd.merge(main, serie, on=['DS_CONTA'])
return company_frames
def worked_info(companies=['AMBEV S.A.'], clear_prev_folder=False):
# Create return variable
return_dict_list = []
# Extract primary information
prim_info = primary_info(companies, clear_prev_folder=False)
print('-+-' * 20)
print('CARREGANDO DATAFFRAME ...')
# Travel throught companies
for comp_index in range(len(companies)):
# Extract list of years collected
year_columns = []
for column in prim_info[comp_index].columns:
if '20' in column:
year_columns.append(column)
# Extract company frame
primary_frame = prim_info[comp_index]
#pd.set_option('display.expand_frame_repr', False)
#print(primary_frame)
#primary_frame.to_csv('primary_csv.csv',sep=' ')
# Duplicate checker
imobilizado_duplicate = 0
desp_ga_duplicate = 0
lucro_acumul_duplicate = 0
dai_duplicate = 0
ped_duplicate = 0
vendas_duplicate = 0
divida_curto_duplicate = 0
divida_longo_duplicate = 0
receita_duplicate = 0
# Initialize primary variables lists
receita_list = []
lucro_brut_list = []
desp_vendas_list = []
desp_ga_list = []
dai_list = []
desp_oper_list = []
financeiro_list = []
lucropreimp_list = []
lucro_liq_list = []
lucro_oper_list = []
lucroporacao_list = []
ativo_total_list = []
ativo_circ_list = []
imobilizado_list = []
passivo_circ_list = []
divida_curto_list = []
divida_longo_list = []
passivo_ncirc_list = []
patr_liq_list = []
lucro_acumul_list = []
lucro_liq_exerc_list = []
desp_ativo_fixo_list = []
# Initialize intermediate variables
desp_vga_list = []
desp_ped_list = []
# Travel trought cells
for row in range(len(primary_frame)):
col = 'DS_CONTA'
# Fill primary variable lists (DRE)
if primary_frame.iloc[row][col] == 'Receita de Venda de Bens e/ou Serviços':
if receita_duplicate == 0:
receita_duplicate += 1
for year in year_columns:
receita_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Resultado Bruto':
for year in year_columns:
lucro_brut_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Despesas com Vendas':
if vendas_duplicate == 0:
vendas_duplicate += 1
for year in year_columns:
desp_vendas_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Despesas Gerais e Administrativas':
if desp_ga_duplicate == 0:
desp_ga_duplicate += 1
for year in year_columns:
desp_ga_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] in ['Despesas de Depreciação', 'Depreciação, Amortização e Impairment', 'Depreciação e amortização', 'Depreciação de arrendamento',
'Depreciação e Amortização', 'Depreciações e Amortizações', 'Amortização e Depreciação', 'Depreciação/amortização',
'Depreciações', 'Depreciação e Amortizações', 'Depreciação do imobilizado', 'Depreciação e depleção do imobilizado', 'Depreciação, exaustão e amortização',
'Depreciação, Amortização e Exaustão', 'Depreciação, Exaustão e Amortização']:
if dai_duplicate == 0:
dai_duplicate += 1
for year in year_columns:
dai_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] in ['Despesas com Pesquisa e Desenvolvimento',
'Custos com Pesquisa e Desenvolvimento', 'Despesas com pesquisas e desenvolvimento', 'Pesquisa e Desenvolvimento', 'Pesquisa', 'Despesas com Pesquisas e Desenvolvimento',
'Custo com Pesquisa e Desenvolvimento Tecnológico', 'Despesas com gastos com desenvolvimento', 'Despesas com desenvolvimento de tecnologia e produtos', 'Com estudos em desenvolvimento']:
if ped_duplicate == 0:
ped_duplicate += 1
for year in year_columns:
desp_ped_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Despesas/Receitas Operacionais':
for year in year_columns:
desp_oper_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Resultado Antes do Resultado Financeiro e dos Tributos':
for year in year_columns:
lucro_oper_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Resultado Financeiro':
for year in year_columns:
financeiro_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Resultado Antes dos Tributos sobre o Lucro':
for year in year_columns:
lucropreimp_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Resultado Líquido das Operações Continuadas':
for year in year_columns:
lucro_liq_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'ON 1':
for year in year_columns:
lucroporacao_list.append(primary_frame.iloc[row][year])
# Fill primary variable lists (BPA and BPP)
if primary_frame.iloc[row][col] == 'Ativo Total':
for year in year_columns:
ativo_total_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Ativo Circulante':
for year in year_columns:
ativo_circ_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Imobilizado':
if imobilizado_duplicate == 0:
imobilizado_duplicate += 1
for year in year_columns:
imobilizado_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Passivo Circulante':
for year in year_columns:
passivo_circ_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Empréstimos e Financiamentos 1':
if divida_curto_duplicate == 0:
divida_curto_duplicate += 1
for year in year_columns:
divida_curto_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Empréstimos e Financiamentos 3':
if divida_longo_duplicate == 0:
divida_longo_duplicate += 1
for year in year_columns:
divida_longo_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Passivo Não Circulante':
for year in year_columns:
passivo_ncirc_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Patrimônio Líquido Consolidado':
for year in year_columns:
patr_liq_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Reservas de Lucros' or primary_frame.iloc[row][col] == 'Lucros/Prejuízos Acumulados':
if lucro_acumul_duplicate == 0:
lucro_acumul_duplicate += 1
for year in year_columns:
lucro_acumul_list.append(primary_frame.iloc[row][year])
else:
pass
# Fill primary variable lists (DFC)
elif primary_frame.iloc[row][col] == 'Lucro Líquido do exercício':
for year in year_columns:
lucro_liq_exerc_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] in ['Aquisição de Imobilizado e Intangíveis',
'Adições de imobilizado', 'Compras de ativo imobilizado', 'Aquisições de imobilizado', 'Aquisições de Imobilizado',
'Aquisições de Imobilizado e Intangível', 'Aquisições de imobilizado e intangível', 'Aquisições de Imobilizados e Intangíveis (Exceto pelo Excedente de Cessão Onerosa)',
'Aquisições de imobilizados e intangíveis', 'Aquisições de imobilizado veículos frota', 'Aquisições de imobilizado de uso', 'Aquisições de Imobilizado de Uso',
'Aquisição de ativos imobilizados, intangível e propriedade para investimento', 'Aquisição de imobilizado e intangível']:
for year in year_columns:
desp_ativo_fixo_list.append(primary_frame.iloc[row][year])
# Build intermediate Variables
desp_vga_list = np.array(desp_vendas_list) + np.array(desp_ga_list)
divida_tot_list = np.array(divida_curto_list) + np.array(divida_longo_list)
if lucro_brut_list == []:
lucro_brut_list = np.zeros(len(year_columns))
if desp_ped_list == []:
desp_ped_list = np.zeros(len(year_columns))
if dai_list == []:
dai_list = np.zeros(len(year_columns))
if desp_ativo_fixo_list == []:
desp_ativo_fixo_list = np.zeros(len(year_columns))
if lucro_liq_exerc_list == []:
lucro_liq_exerc_list = lucro_liq_list
# Build worked info
marg_brut_list = 100 * np.divide(np.array(lucro_brut_list), np.array(receita_list))
marg_liq_list = 100 * np.divide(np.array(lucro_liq_list), np.array(receita_list))
vga_lucro_brut_list = 100 * np.divide(np.array(desp_vga_list), np.array(lucro_brut_list))
ped_lucro_brut_list = 100 * np.divide(np.array(desp_ped_list), np.array(lucro_brut_list))
deprec_lucro_brut_list = 100 * np.divide(np.array(dai_list), np.array(lucro_brut_list))
juros_lucro_oper_list = 100 * np.divide(np.array(financeiro_list), np.array(lucro_oper_list))
coef_liquidez_list = np.divide(np.array(ativo_circ_list), np.array(passivo_circ_list))
passivo_tot_patrliq_list = np.divide((np.array(passivo_circ_list) + np.array(passivo_ncirc_list)), np.array(patr_liq_list))
roe_list = 100 * np.divide(np.array(lucro_liq_list), np.array(patr_liq_list))
roa_list = 100 * np.divide(np.array(lucro_liq_list), np.array(ativo_total_list))
desp_ativo_fixo_lucro_liq_exerc_list = 100 * np.divide(np.array(desp_ativo_fixo_list), np.array(lucro_liq_exerc_list))
divida_curto_tot_list = 100 * np.divide(np.array(divida_curto_list), np.array(divida_tot_list))
divida_tot_lucro_oper_list = np.divide(np.array(divida_tot_list), np.array(lucro_oper_list))
company_dict = {
'year_columns': year_columns,
'marg_brut_list': marg_brut_list,
'marg_liq_list': marg_liq_list,
'vga_lucro_brut_list': vga_lucro_brut_list,
'ped_lucro_brut_list': ped_lucro_brut_list,
'deprec_lucro_brut_list': deprec_lucro_brut_list,
'juros_lucro_oper_list': juros_lucro_oper_list,
'lucro_brut_list': lucro_brut_list,
'lucro_liq_list': lucro_liq_list,
'lucroporacao_list':lucroporacao_list,
'coef_liquidez_list': coef_liquidez_list,
'imobilizado_list': imobilizado_list,
'passivo_tot_patrliq_list': passivo_tot_patrliq_list,
'roe_list': roe_list,
'roa_list': roa_list,
'lucro_acumul_list': lucro_acumul_list,
'desp_ativo_fixo_lucro_liq_exerc_list': desp_ativo_fixo_lucro_liq_exerc_list,
'divida_curto_tot_list': divida_curto_tot_list,
'divida_tot_lucro_oper_list': divida_tot_lucro_oper_list
}
return_dict_list.append(company_dict)
return return_dict_list
| 46.108787 | 513 | 0.614338 | import pandas as pd
from zipfile import ZipFile
import numpy as np
import re
import os
def year_identifier(file_name):
folder_regex = re.compile(r'20\d\d')
match = folder_regex.search(str(file_name))
year = match.group()
return year
def debt_correction(dataframe):
debt_ident_list = ['Empréstimos e Financiamentos']
lpa_ident_list = ['ON']
count_debt = 1
count_lpa = 1
for row in range(len(dataframe)):
for col in range(len(dataframe.columns)):
if dataframe.iloc[row,col] in debt_ident_list:
prev_name = dataframe.iloc[row,col]
dataframe.iat[row, col] = f'{prev_name} {count_debt}'
count_debt += 1
if dataframe.iloc[row,col] in lpa_ident_list:
prev_name = dataframe.iloc[row,col]
dataframe.iat[row, col] = f'{prev_name} {count_lpa}'
count_lpa += 1
return dataframe
def dataframe_filtering(folder, file_name_list, company_list, prev=False):
dataframe_general = []
for company in company_list:
dataframe_company = []
dataframe_list = []
for file in file_name_list:
file_raw = pd.read_csv(f'raw_dfp\\{folder}\\{file}', encoding='iso-8859-1', delimiter=';', skiprows=0, low_memory=False)
if prev is False:
file_1 = file_raw[~file_raw['ORDEM_EXERC'].str.startswith('P')]
folder_year = year_identifier(file_name_list)
else:
file_1 = file_raw[file_raw['ORDEM_EXERC'].str.startswith('P')]
folder_year = int(year_identifier(file_name_list)) - 1
file_2 = file_1[['DENOM_CIA', 'CD_CONTA','DS_CONTA', 'VL_CONTA']]
file_3 = file_2[file_2['DENOM_CIA'].isin([company])]
if file.find('DRE') != -1:
interest_data = ['Receita de Venda de Bens e/ou Serviços', 'Resultado Bruto', 'Despesas com Vendas', 'Despesas com Pesquisa e Desenvolvimento',
'Custos com Pesquisa e Desenvolvimento', 'Despesas com pesquisas e desenvolvimento', 'Pesquisa e Desenvolvimento', 'Pesquisa', 'Despesas com Pesquisas e Desenvolvimento',
'Custo com Pesquisa e Desenvolvimento Tecnológico', 'Despesas com gastos com desenvolvimento', 'Despesas com desenvolvimento de tecnologia e produtos', 'Com estudos em desenvolvimento',
'Despesas Gerais e Administrativas', 'Despesas de Depreciação', 'Despesas/Receitas Operacionais',
'Resultado Antes do Resultado Financeiro e dos Tributos', 'Resultado Financeiro', 'Resultado Antes dos Tributos sobre o Lucro',
'Resultado Líquido das Operações Continuadas', 'Lucro Básico por Ação', 'ON']
elif file.find('BPA') != -1:
interest_data = ['Ativo Total', 'Ativo Circulante', 'Imobilizado']
elif file.find('BPP') != -1:
interest_data = ['Passivo Circulante', 'Empréstimos e Financiamentos', 'Passivo Não Circulante', 'Patrimônio Líquido Consolidado',
'Reservas de Lucros', 'Lucros/Prejuízos Acumulados']
elif file.find('DFC_MI') != -1:
interest_data = ['Lucro Líquido do exercício', 'Depreciação, Amortização e Impairment', 'Depreciação e amortização', 'Depreciação de arrendamento', 'Depreciação e Amortização', 'Depreciações e Amortizações', 'Amortização e Depreciação', 'Depreciação/amortização', 'Depreciações', 'Depreciação e Amortizações', 'Depreciação do imobilizado', 'Depreciação e depleção do imobilizado', 'Depreciação, exaustão e amortização', 'Depreciação, Amortização e Exaustão', 'Depreciação, Exaustão e Amortização',
'Aquisição de Imobilizado e Intangíveis', 'Adições de imobilizado', 'Compras de ativo imobilizado', 'Aquisições de imobilizado', 'Aquisições de Imobilizado',
'Aquisições de Imobilizado e Intangível', 'Aquisições de imobilizado e intangível', 'Aquisições de Imobilizados e Intangíveis (Exceto pelo Excedente de Cessão Onerosa)',
'Aquisições de imobilizados e intangíveis', 'Aquisições de imobilizado veículos frota', 'Aquisições de imobilizado de uso', 'Aquisições de Imobilizado de Uso', 'Aquisição de ativos imobilizados, intangível e propriedade para investimento', 'Aquisição de imobilizado e intangível']
file_4 = file_3[file_3['DS_CONTA'].isin(interest_data)]
dataframe_list.append(file_4)
dataframe_company = pd.concat(dataframe_list)
dataframe_company = dataframe_company.rename(columns={"VL_CONTA": f"{folder_year}"})
dataframe_general.append(dataframe_company)
return dataframe_general
def primary_info(companies, clear_prev_folder=False):
company_frames = []
for company in companies:
company_frames.append(pd.DataFrame())
for file in os.listdir('raw_dfp\\raw_zip'):
zip_year = year_identifier(f'raw_dfp\\raw_zip\\{file}')
output_folder = zip_year
directory_elements = os.listdir('raw_dfp')
if output_folder not in directory_elements:
os.mkdir(f'raw_dfp\\{output_folder}')
elif os.listdir(f'raw_dfp\\{output_folder}') != [] and clear_prev_folder is True:
output_folder_elements = os.listdir(f'raw_dfp\\{output_folder}')
for element in output_folder_elements:
os.remove(f'raw_dfp\\{output_folder}\\{element}')
if os.listdir(f'raw_dfp\\{output_folder}') == []:
with ZipFile(f'raw_dfp\\raw_zip\\{file}', 'r') as zip:
zip.extractall(path=f'raw_dfp\\{output_folder}')
else:
print(f"A pasta \"raw_dfp/{zip_year}\" ja tem arquivos internos. Confira a necessidade de descompactar o .zip.")
print('Prosseguindo ...')
raw_folders = os.listdir('raw_dfp')
raw_folders.remove('raw_zip')
for folder in raw_folders:
file_list = os.listdir(f'raw_dfp\\{folder}')
for file in file_list:
file_regex = re.compile(r'ind_20\d\d')
mo = file_regex.search(str(file))
if mo is not None:
os.remove(f'raw_dfp\\{folder}\\{file}')
for file in file_list:
dre_regex = re.compile(r'DRE_con_20\d\d')
mo_dre = dre_regex.search(str(file))
if mo_dre is not None:
dre = file
bpa_regex = re.compile(r'BPA_con_20\d\d')
mo_bpa = bpa_regex.search(str(file))
if mo_bpa is not None:
bpa = file
bpp_regex = re.compile(r'BPP_con_20\d\d')
mo_bpp = bpp_regex.search(str(file))
if mo_bpp is not None:
bpp = file
dfc_regex = re.compile(r'DFC_MI_con_20\d\d')
mo_dfc = dfc_regex.search(str(file))
if mo_dfc is not None:
dfc = file
folder_list = dataframe_filtering(folder, [dre, bpa, bpp, dfc], companies)
if int(folder) == 2017:
folder_list_2 = dataframe_filtering(folder, [dre, bpa, bpp, dfc], companies, prev=True)
for company_index in range(len(companies)):
if len(folder_list_2[company_index]) == 0:
pass
else:
company_frames[company_index] = debt_correction(folder_list_2[company_index])
for company_index in range(len(companies)):
if len(folder_list[company_index]) == 0:
pass
elif len(company_frames[company_index]) == 0:
company_frames[company_index] = debt_correction(folder_list[company_index])
else:
main = company_frames[company_index]
serie_corrected = debt_correction(folder_list[company_index][['DS_CONTA', str(folder)]])
serie = serie_corrected.set_index('DS_CONTA')
company_frames[company_index] = pd.merge(main, serie, on=['DS_CONTA'])
return company_frames
def worked_info(companies=['AMBEV S.A.'], clear_prev_folder=False):
return_dict_list = []
prim_info = primary_info(companies, clear_prev_folder=False)
print('-+-' * 20)
print('CARREGANDO DATAFFRAME ...')
for comp_index in range(len(companies)):
year_columns = []
for column in prim_info[comp_index].columns:
if '20' in column:
year_columns.append(column)
primary_frame = prim_info[comp_index]
imobilizado_duplicate = 0
desp_ga_duplicate = 0
lucro_acumul_duplicate = 0
dai_duplicate = 0
ped_duplicate = 0
vendas_duplicate = 0
divida_curto_duplicate = 0
divida_longo_duplicate = 0
receita_duplicate = 0
receita_list = []
lucro_brut_list = []
desp_vendas_list = []
desp_ga_list = []
dai_list = []
desp_oper_list = []
financeiro_list = []
lucropreimp_list = []
lucro_liq_list = []
lucro_oper_list = []
lucroporacao_list = []
ativo_total_list = []
ativo_circ_list = []
imobilizado_list = []
passivo_circ_list = []
divida_curto_list = []
divida_longo_list = []
passivo_ncirc_list = []
patr_liq_list = []
lucro_acumul_list = []
lucro_liq_exerc_list = []
desp_ativo_fixo_list = []
desp_vga_list = []
desp_ped_list = []
for row in range(len(primary_frame)):
col = 'DS_CONTA'
if primary_frame.iloc[row][col] == 'Receita de Venda de Bens e/ou Serviços':
if receita_duplicate == 0:
receita_duplicate += 1
for year in year_columns:
receita_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Resultado Bruto':
for year in year_columns:
lucro_brut_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Despesas com Vendas':
if vendas_duplicate == 0:
vendas_duplicate += 1
for year in year_columns:
desp_vendas_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Despesas Gerais e Administrativas':
if desp_ga_duplicate == 0:
desp_ga_duplicate += 1
for year in year_columns:
desp_ga_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] in ['Despesas de Depreciação', 'Depreciação, Amortização e Impairment', 'Depreciação e amortização', 'Depreciação de arrendamento',
'Depreciação e Amortização', 'Depreciações e Amortizações', 'Amortização e Depreciação', 'Depreciação/amortização',
'Depreciações', 'Depreciação e Amortizações', 'Depreciação do imobilizado', 'Depreciação e depleção do imobilizado', 'Depreciação, exaustão e amortização',
'Depreciação, Amortização e Exaustão', 'Depreciação, Exaustão e Amortização']:
if dai_duplicate == 0:
dai_duplicate += 1
for year in year_columns:
dai_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] in ['Despesas com Pesquisa e Desenvolvimento',
'Custos com Pesquisa e Desenvolvimento', 'Despesas com pesquisas e desenvolvimento', 'Pesquisa e Desenvolvimento', 'Pesquisa', 'Despesas com Pesquisas e Desenvolvimento',
'Custo com Pesquisa e Desenvolvimento Tecnológico', 'Despesas com gastos com desenvolvimento', 'Despesas com desenvolvimento de tecnologia e produtos', 'Com estudos em desenvolvimento']:
if ped_duplicate == 0:
ped_duplicate += 1
for year in year_columns:
desp_ped_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Despesas/Receitas Operacionais':
for year in year_columns:
desp_oper_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Resultado Antes do Resultado Financeiro e dos Tributos':
for year in year_columns:
lucro_oper_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Resultado Financeiro':
for year in year_columns:
financeiro_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Resultado Antes dos Tributos sobre o Lucro':
for year in year_columns:
lucropreimp_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Resultado Líquido das Operações Continuadas':
for year in year_columns:
lucro_liq_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'ON 1':
for year in year_columns:
lucroporacao_list.append(primary_frame.iloc[row][year])
if primary_frame.iloc[row][col] == 'Ativo Total':
for year in year_columns:
ativo_total_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Ativo Circulante':
for year in year_columns:
ativo_circ_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Imobilizado':
if imobilizado_duplicate == 0:
imobilizado_duplicate += 1
for year in year_columns:
imobilizado_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Passivo Circulante':
for year in year_columns:
passivo_circ_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Empréstimos e Financiamentos 1':
if divida_curto_duplicate == 0:
divida_curto_duplicate += 1
for year in year_columns:
divida_curto_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Empréstimos e Financiamentos 3':
if divida_longo_duplicate == 0:
divida_longo_duplicate += 1
for year in year_columns:
divida_longo_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Passivo Não Circulante':
for year in year_columns:
passivo_ncirc_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Patrimônio Líquido Consolidado':
for year in year_columns:
patr_liq_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] == 'Reservas de Lucros' or primary_frame.iloc[row][col] == 'Lucros/Prejuízos Acumulados':
if lucro_acumul_duplicate == 0:
lucro_acumul_duplicate += 1
for year in year_columns:
lucro_acumul_list.append(primary_frame.iloc[row][year])
else:
pass
elif primary_frame.iloc[row][col] == 'Lucro Líquido do exercício':
for year in year_columns:
lucro_liq_exerc_list.append(primary_frame.iloc[row][year])
elif primary_frame.iloc[row][col] in ['Aquisição de Imobilizado e Intangíveis',
'Adições de imobilizado', 'Compras de ativo imobilizado', 'Aquisições de imobilizado', 'Aquisições de Imobilizado',
'Aquisições de Imobilizado e Intangível', 'Aquisições de imobilizado e intangível', 'Aquisições de Imobilizados e Intangíveis (Exceto pelo Excedente de Cessão Onerosa)',
'Aquisições de imobilizados e intangíveis', 'Aquisições de imobilizado veículos frota', 'Aquisições de imobilizado de uso', 'Aquisições de Imobilizado de Uso',
'Aquisição de ativos imobilizados, intangível e propriedade para investimento', 'Aquisição de imobilizado e intangível']:
for year in year_columns:
desp_ativo_fixo_list.append(primary_frame.iloc[row][year])
desp_vga_list = np.array(desp_vendas_list) + np.array(desp_ga_list)
divida_tot_list = np.array(divida_curto_list) + np.array(divida_longo_list)
if lucro_brut_list == []:
lucro_brut_list = np.zeros(len(year_columns))
if desp_ped_list == []:
desp_ped_list = np.zeros(len(year_columns))
if dai_list == []:
dai_list = np.zeros(len(year_columns))
if desp_ativo_fixo_list == []:
desp_ativo_fixo_list = np.zeros(len(year_columns))
if lucro_liq_exerc_list == []:
lucro_liq_exerc_list = lucro_liq_list
marg_brut_list = 100 * np.divide(np.array(lucro_brut_list), np.array(receita_list))
marg_liq_list = 100 * np.divide(np.array(lucro_liq_list), np.array(receita_list))
vga_lucro_brut_list = 100 * np.divide(np.array(desp_vga_list), np.array(lucro_brut_list))
ped_lucro_brut_list = 100 * np.divide(np.array(desp_ped_list), np.array(lucro_brut_list))
deprec_lucro_brut_list = 100 * np.divide(np.array(dai_list), np.array(lucro_brut_list))
juros_lucro_oper_list = 100 * np.divide(np.array(financeiro_list), np.array(lucro_oper_list))
coef_liquidez_list = np.divide(np.array(ativo_circ_list), np.array(passivo_circ_list))
passivo_tot_patrliq_list = np.divide((np.array(passivo_circ_list) + np.array(passivo_ncirc_list)), np.array(patr_liq_list))
roe_list = 100 * np.divide(np.array(lucro_liq_list), np.array(patr_liq_list))
roa_list = 100 * np.divide(np.array(lucro_liq_list), np.array(ativo_total_list))
desp_ativo_fixo_lucro_liq_exerc_list = 100 * np.divide(np.array(desp_ativo_fixo_list), np.array(lucro_liq_exerc_list))
divida_curto_tot_list = 100 * np.divide(np.array(divida_curto_list), np.array(divida_tot_list))
divida_tot_lucro_oper_list = np.divide(np.array(divida_tot_list), np.array(lucro_oper_list))
company_dict = {
'year_columns': year_columns,
'marg_brut_list': marg_brut_list,
'marg_liq_list': marg_liq_list,
'vga_lucro_brut_list': vga_lucro_brut_list,
'ped_lucro_brut_list': ped_lucro_brut_list,
'deprec_lucro_brut_list': deprec_lucro_brut_list,
'juros_lucro_oper_list': juros_lucro_oper_list,
'lucro_brut_list': lucro_brut_list,
'lucro_liq_list': lucro_liq_list,
'lucroporacao_list':lucroporacao_list,
'coef_liquidez_list': coef_liquidez_list,
'imobilizado_list': imobilizado_list,
'passivo_tot_patrliq_list': passivo_tot_patrliq_list,
'roe_list': roe_list,
'roa_list': roa_list,
'lucro_acumul_list': lucro_acumul_list,
'desp_ativo_fixo_lucro_liq_exerc_list': desp_ativo_fixo_lucro_liq_exerc_list,
'divida_curto_tot_list': divida_curto_tot_list,
'divida_tot_lucro_oper_list': divida_tot_lucro_oper_list
}
return_dict_list.append(company_dict)
return return_dict_list
| true | true |
f72f4aa4e427e9d3ee2d0c986960f64ae8d9cdb5 | 1,029 | py | Python | pyfatfs/__init__.py | abrasive/pyfatfs | a35586bfa2b1d3f8d4638142407db68f9318b86d | [
"MIT"
] | null | null | null | pyfatfs/__init__.py | abrasive/pyfatfs | a35586bfa2b1d3f8d4638142407db68f9318b86d | [
"MIT"
] | null | null | null | pyfatfs/__init__.py | abrasive/pyfatfs | a35586bfa2b1d3f8d4638142407db68f9318b86d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Python FAT filesystem module with :doc:`PyFilesystem2 <pyfilesystem2:index>` \
compatibility.
pyfatfs allows interaction with FAT12/16/32 filesystems, either via
:doc:`PyFilesystem2 <pyfilesystem2:index>` for file-level abstraction
or direct interaction with the filesystem for low-level access.
"""
__name__ = 'pyfatfs'
__author__ = 'Nathan-J. Hirschauer'
__author_email__ = 'nathanhi@deepserve.info'
__license__ = 'MIT License'
#: Specifies default ("OEM") encoding
from pyfatfs._exceptions import PyFATException
FAT_OEM_ENCODING = 'ibm437'
#: Specifies the long file name encoding, which is always UTF-16 (LE)
FAT_LFN_ENCODING = 'utf-16-le'
def _init_check(func):
def _wrapper(*args, **kwargs):
initialized = args[0].initialized
if initialized is True:
return func(*args, **kwargs)
else:
raise PyFATException("Class has not yet been fully initialized, "
"please instantiate first.")
return _wrapper
| 27.810811 | 78 | 0.696793 |
__name__ = 'pyfatfs'
__author__ = 'Nathan-J. Hirschauer'
__author_email__ = 'nathanhi@deepserve.info'
__license__ = 'MIT License'
from pyfatfs._exceptions import PyFATException
FAT_OEM_ENCODING = 'ibm437'
FAT_LFN_ENCODING = 'utf-16-le'
def _init_check(func):
def _wrapper(*args, **kwargs):
initialized = args[0].initialized
if initialized is True:
return func(*args, **kwargs)
else:
raise PyFATException("Class has not yet been fully initialized, "
"please instantiate first.")
return _wrapper
| true | true |
f72f4ab59584f2bc1d9c3a2887905b90bc8c7022 | 314 | py | Python | fwrap/__init__.py | wilsonify/fwrap | f2e20eb55eaa3de72905e2ef28198da00eebe262 | [
"BSD-3-Clause"
] | 23 | 2015-02-25T00:24:15.000Z | 2021-09-08T01:35:45.000Z | fwrap/__init__.py | fwrap/fwrap | 61a56f2d0050096b4973d88e5f11cfac2ef01a4b | [
"BSD-3-Clause"
] | 1 | 2021-09-08T01:45:02.000Z | 2021-09-08T01:45:02.000Z | fwrap/__init__.py | fwrap/fwrap | 61a56f2d0050096b4973d88e5f11cfac2ef01a4b | [
"BSD-3-Clause"
] | 4 | 2015-03-22T01:33:39.000Z | 2021-09-09T15:25:44.000Z | #------------------------------------------------------------------------------
# Copyright (c) 2010, Kurt W. Smith
# All rights reserved. See LICENSE.txt.
#------------------------------------------------------------------------------
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
__all__ = []
| 22.428571 | 79 | 0.334395 |
import os
import sys
__all__ = []
| true | true |
f72f4aeb1f0ee56032043a5cf920adea699c69c9 | 120 | py | Python | src/django_website/contact_us/urls.py | jdheinz/project-ordo_ab_chao | 4063f93b297bab43cff6ca64fa5ba103f0c75158 | [
"MIT"
] | 2 | 2019-09-23T18:42:32.000Z | 2019-09-27T00:33:38.000Z | src/django_website/contact_us/urls.py | jdheinz/project-ordo_ab_chao | 4063f93b297bab43cff6ca64fa5ba103f0c75158 | [
"MIT"
] | 6 | 2021-03-19T03:25:33.000Z | 2022-02-10T08:48:14.000Z | src/django_website/contact_us/urls.py | jdheinz/project-ordo_ab_chao | 4063f93b297bab43cff6ca64fa5ba103f0c75158 | [
"MIT"
] | 6 | 2019-09-23T18:53:41.000Z | 2020-02-06T00:20:06.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.contact_page, name='contact'),
] | 17.142857 | 51 | 0.683333 | from django.urls import path
from . import views
urlpatterns = [
path('', views.contact_page, name='contact'),
] | true | true |
f72f4b412f1dfb0ca6e24370f06424fb77c77d93 | 353 | py | Python | pylinkage/output_arch.py | Drumato/pylinkage | 2033112c95a15722efcd9271c08fd919df635eae | [
"MIT"
] | null | null | null | pylinkage/output_arch.py | Drumato/pylinkage | 2033112c95a15722efcd9271c08fd919df635eae | [
"MIT"
] | null | null | null | pylinkage/output_arch.py | Drumato/pylinkage | 2033112c95a15722efcd9271c08fd919df635eae | [
"MIT"
] | null | null | null | import enum
class OutputArch(enum.Enum):
"""Machine architecture"""
X86_64 = enum.auto()
I386 = enum.auto()
NONE = enum.auto()
def to_string(self) -> str:
if self == OutputArch.X86_64:
return "i386:x86-64"
elif self == OutputArch.I386:
return "i386"
else:
return "NONE" | 20.764706 | 37 | 0.543909 | import enum
class OutputArch(enum.Enum):
X86_64 = enum.auto()
I386 = enum.auto()
NONE = enum.auto()
def to_string(self) -> str:
if self == OutputArch.X86_64:
return "i386:x86-64"
elif self == OutputArch.I386:
return "i386"
else:
return "NONE" | true | true |
f72f4b5ce8a8b8deb81b605df4f7eb7f50221c03 | 3,678 | py | Python | mezzanine/utils/html.py | Cajoline/mezzanine | 54260343eec90f8c93e963465c4fdac55f66ec4c | [
"BSD-2-Clause"
] | null | null | null | mezzanine/utils/html.py | Cajoline/mezzanine | 54260343eec90f8c93e963465c4fdac55f66ec4c | [
"BSD-2-Clause"
] | null | null | null | mezzanine/utils/html.py | Cajoline/mezzanine | 54260343eec90f8c93e963465c4fdac55f66ec4c | [
"BSD-2-Clause"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from future.builtins import chr, int, str
try:
from html.parser import HTMLParser, HTMLParseError
from html.entities import name2codepoint
except ImportError: # Python 2
from HTMLParser import HTMLParser, HTMLParseError
from htmlentitydefs import name2codepoint
import re
SELF_CLOSING_TAGS = ['br', 'img']
NON_SELF_CLOSING_TAGS = ['script', 'iframe']
ABSOLUTE_URL_TAGS = {"img": "src", "a": "href", "iframe": "src"}
def absolute_urls(html):
"""
Converts relative URLs into absolute URLs. Used for RSS feeds to
provide more complete HTML for item descriptions, but could also
be used as a general richtext filter.
"""
from bs4 import BeautifulSoup
from mezzanine.core.request import current_request
request = current_request()
if request is not None:
dom = BeautifulSoup(html, "html.parser")
for tag, attr in ABSOLUTE_URL_TAGS.items():
for node in dom.findAll(tag):
url = node.get(attr, "")
if url:
node[attr] = request.build_absolute_uri(url)
html = str(dom)
return html
def decode_entities(html):
"""
Remove HTML entities from a string.
Adapted from http://effbot.org/zone/re-sub.htm#unescape-html
"""
def decode(m):
html = m.group(0)
if html[:2] == "&#":
try:
if html[:3] == "&#x":
return chr(int(html[3:-1], 16))
else:
return chr(int(html[2:-1]))
except ValueError:
pass
else:
try:
html = chr(name2codepoint[html[1:-1]])
except KeyError:
pass
return html
return re.sub("&#?\w+;", decode, html.replace("&", "&"))
def thumbnails(html):
"""
Given a HTML string, converts paths in img tags to thumbnail
paths, using Mezzanine's ``thumbnail`` template tag. Used as
one of the default values in the ``RICHTEXT_FILTERS`` setting.
"""
from django.conf import settings
from bs4 import BeautifulSoup
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
# If MEDIA_URL isn't in the HTML string, there aren't any
# images to replace, so bail early.
if settings.MEDIA_URL.lower() not in html.lower():
return html
dom = BeautifulSoup(html, "html.parser")
for img in dom.findAll("img"):
src = img.get("src", "")
src_in_media = src.lower().startswith(settings.MEDIA_URL.lower())
width = img.get("width")
height = img.get("height")
if src_in_media and width and height:
img["src"] = settings.MEDIA_URL + thumbnail(src, width, height)
# BS adds closing br tags, which the browser interprets as br tags.
return str(dom).replace("</br>", "")
class TagCloser(HTMLParser):
"""
HTMLParser that closes open tags. Takes a HTML string as its first
arg, and populate a ``html`` attribute on the parser with the
original HTML arg and any required closing tags.
"""
def __init__(self, html):
HTMLParser.__init__(self)
self.html = html
self.tags = []
try:
self.feed(self.html)
except HTMLParseError:
pass
else:
self.html += "".join(["</%s>" % tag for tag in self.tags])
def handle_starttag(self, tag, attrs):
if tag not in SELF_CLOSING_TAGS:
self.tags.insert(0, tag)
def handle_endtag(self, tag):
try:
self.tags.remove(tag)
except ValueError:
pass
| 30.907563 | 75 | 0.605492 | from __future__ import absolute_import, unicode_literals
from future.builtins import chr, int, str
try:
from html.parser import HTMLParser, HTMLParseError
from html.entities import name2codepoint
except ImportError:
from HTMLParser import HTMLParser, HTMLParseError
from htmlentitydefs import name2codepoint
import re
SELF_CLOSING_TAGS = ['br', 'img']
NON_SELF_CLOSING_TAGS = ['script', 'iframe']
ABSOLUTE_URL_TAGS = {"img": "src", "a": "href", "iframe": "src"}
def absolute_urls(html):
from bs4 import BeautifulSoup
from mezzanine.core.request import current_request
request = current_request()
if request is not None:
dom = BeautifulSoup(html, "html.parser")
for tag, attr in ABSOLUTE_URL_TAGS.items():
for node in dom.findAll(tag):
url = node.get(attr, "")
if url:
node[attr] = request.build_absolute_uri(url)
html = str(dom)
return html
def decode_entities(html):
def decode(m):
html = m.group(0)
if html[:2] == "&#":
try:
if html[:3] == "&#x":
return chr(int(html[3:-1], 16))
else:
return chr(int(html[2:-1]))
except ValueError:
pass
else:
try:
html = chr(name2codepoint[html[1:-1]])
except KeyError:
pass
return html
return re.sub("&#?\w+;", decode, html.replace("&", "&"))
def thumbnails(html):
from django.conf import settings
from bs4 import BeautifulSoup
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
if settings.MEDIA_URL.lower() not in html.lower():
return html
dom = BeautifulSoup(html, "html.parser")
for img in dom.findAll("img"):
src = img.get("src", "")
src_in_media = src.lower().startswith(settings.MEDIA_URL.lower())
width = img.get("width")
height = img.get("height")
if src_in_media and width and height:
img["src"] = settings.MEDIA_URL + thumbnail(src, width, height)
return str(dom).replace("</br>", "")
class TagCloser(HTMLParser):
def __init__(self, html):
HTMLParser.__init__(self)
self.html = html
self.tags = []
try:
self.feed(self.html)
except HTMLParseError:
pass
else:
self.html += "".join(["</%s>" % tag for tag in self.tags])
def handle_starttag(self, tag, attrs):
if tag not in SELF_CLOSING_TAGS:
self.tags.insert(0, tag)
def handle_endtag(self, tag):
try:
self.tags.remove(tag)
except ValueError:
pass
| true | true |
f72f4b7f8e1c3322d2100f5a273305db28e010d9 | 12,114 | py | Python | Tweet2Story/text2story/core/annotator.py | LIAAD/Tweet2Story | 1340c01bbc68915d51d67973f87325265b71f2dd | [
"MIT"
] | 5 | 2021-07-29T15:53:32.000Z | 2022-03-07T02:46:52.000Z | Tweet2Story/text2story/core/annotator.py | LIAAD/Tweet2Story | 1340c01bbc68915d51d67973f87325265b71f2dd | [
"MIT"
] | null | null | null | Tweet2Story/text2story/core/annotator.py | LIAAD/Tweet2Story | 1340c01bbc68915d51d67973f87325265b71f2dd | [
"MIT"
] | 1 | 2022-01-30T17:46:51.000Z | 2022-01-30T17:46:51.000Z | """
package.text2story.core.annotator
META-annotator
"""
from text2story.annotators import ACTOR_EXTRACTION_TOOLS, TIME_EXTRACTION_TOOLS, OBJECTAL_LINKS_RESOLUTION_TOOLS
from text2story.annotators import EVENT_EXTRACTION_TOOLS, SEMANTIC_ROLE_LABELLING_TOOLS
from text2story.annotators import extract_actors, extract_times, extract_objectal_links, extract_events
from text2story.annotators import extract_semantic_role_links
class Annotator:
"""
Representation of a META-Annotator (a combination of one or more annotators).
Useful to give a uniform interface to the rest of the package.
Attributes
----------
tools : list[str]
the list of annotators to be used
Methods
-------
extract_actors(lang, text)
Returns a list with the actors identified in the text.
Each actor is represented by a tuple, consisting of the start character offset, end character offset, the POS tag and the NE IOB tag, resp.
Example: (0, 4, 'Noun', 'Per')
Possible POS tags: 'Noun', 'Pronoun'.
Possible NE IOB tags: 'Per', 'Org', 'Loc', 'Obj', 'Nat' and 'Other'.
extract_times(lang, text)
Returns a list with the times identified in the text.
Each time is represented by a tuple, consisting of the start character offset, end character offset, it's type and it's value, resp.
Example: (6, 17, 'DATE', '2021-08-31')
extract_corefs(lang, text)
Returns a list with the clusters of entities identified in the text
Each cluster is a list with tuples, where every tuple is a 2D tuple with the start and end character offset of the span corresponding to the same entity.
Example: [(0, 6), (20, 22)]
"""
def __init__(self, tools):
"""
Parameters
----------
tools : list[str]
the list of the annotators to be used; can be used any combination of them
possible annotators are: 'spacy', 'nltk' and 'sparknlp'
"""
self.tools = tools
def extract_actors(self, lang, text):
"""
Parameters
----------
lang : str
the language of the text
current supported languages are: portuguese ('pt'); english ('en')
text : str
the text to be made the extraction
Returns
-------
list[tuple[tuple[int, int], str, str]]
the list of actors identified where each actor is represented by a tuple
"""
# The extraction can be done with more than one tool.
# Since different tools can make a different tokenization of the text, efforts were made to identify the same entity, even when character span doesn't match.
# For instance, some tool identified the entity with character span (2, 7) and other (5, 10). We are assuming that the entirely entity has the char. span of (2, 10).
# To do that, we are taking the first (in the sense of the char. span) identification made, and keep extending the character end offset as much as possible, with every entity that has a span that intersects with our current span.
# Also, note that we are obtaning a bunch of POS tags and NE IOB tags and we just want one.
# For the POS tag, we are taking the most common label.
# For the NE IOB tag, we do the same, but we favor all labels versus the generic 'Other' label. That is, even if the label 'Other' is the most common, if we have a more specific one, we use that, instead.
nr_tools = len(self.tools)
# If no tool specified, use all
if nr_tools == 0:
self.tools = ACTOR_EXTRACTION_TOOLS
nr_tools = len(self.tools)
# Gather the annotations made by the tools specified and combine the results
annotations = []
for tool in self.tools:
annotations.append(extract_actors(tool, lang, text))
final_annotation = []
idxs = [0] * nr_tools # Current actor position from each tool
while not(all([len(annotations[i]) == idxs[i] for i in range(nr_tools)])): # We finish when we consumed every actor identified by every tool
tool_id = -1
tool_id_start_char_offset = len(text) # len(self.text) acting as infinite
# Get the next entity chunk to be gather (the one with the lowest start character span)
for i in range(nr_tools):
if idxs[i] < len(annotations[i]):
current_actor = annotations[i][idxs[i]]
current_actor_start_character_span = current_actor[0][0]
if current_actor_start_character_span < tool_id_start_char_offset:
tool_id = i
tool_id_start_char_offset = current_actor_start_character_span
# For now, our actor consists of a unique annotation made by some tool
actor_start_character_offset = annotations[tool_id][idxs[tool_id]][0][0]
actor_end_character_offset = annotations[tool_id][idxs[tool_id]][0][1]
# For the lexical head and type we will accumulate the results and latter choose the best following a criterion
actor_lexical_heads = [annotations[tool_id][idxs[tool_id]][1]]
actor_types = [annotations[tool_id][idxs[tool_id]][2]]
idxs[tool_id] += 1 # Consume the annotation
# Other tools may have identified the same actor.
# We need to search if theres some intersection in the span of the identified actor, with the other tools.
# That is, they identified the same actor, but maybe missed some initial part of it.
# We identify this situation by looking to the character start char offset of the current actor identified by each tool,
# and if it happens to be less than our end char offset of our current identified actor, then we can extend the current information we have.
# Note that we may extend the end char offset, each force us to repete this process, till the offsets stabilize.
# In the first interation, the tool that first identified the actor, will be matched and add double information and we don't want that
# If we get to a second iteration, then that means the actor end char offset was extended which, in that case, the tool that first identified the actor, may now extend also...
first_iteration = True
while True:
flag = False
for i in range(nr_tools):
if first_iteration and i == tool_id:
continue
if idxs[i] < len(annotations[i]):
if annotations[i][idxs[i]][0][0] <= actor_end_character_offset:
if actor_end_character_offset < annotations[i][idxs[i]][0][1]:
actor_end_character_offset = annotations[i][idxs[i]][0][1]
flag = True
actor_lexical_heads.append(annotations[i][idxs[i]][1])
actor_types.append(annotations[i][idxs[i]][2])
idxs[i] = idxs[i] + 1
first_iteration = False
if not(flag):
break
# Now that we identified the larger span possible for the actor, we need to fix the lexical head and type.
# For the POS tag, we favor specifics the 'Noun' and 'Pronoun' tag and then we take the most common.
# Since we only defined this labels, the others will appear as 'UNDEF'
rmv_undef_pos_tags = [ne for ne in actor_lexical_heads if ne != 'UNDEF']
if rmv_undef_pos_tags:
actor_lexical_head = max(rmv_undef_pos_tags, key=rmv_undef_pos_tags.count)
else:
continue # Discard the actor if it's lexical head isn't a 'Noun' or 'Pronoun'
# For the NE, we also favor specifics NEs, in this case all labels versus the NE 'OTHER' and we take the most common.
rmv_other_ne = [ne for ne in actor_types if ne != 'Other']
if rmv_other_ne:
actor_type = max(rmv_other_ne, key=rmv_other_ne.count)
else:
actor_type = 'Other'
# Discard entities with types other than 'Per', 'Org', 'Loc', 'Obj', 'Nat' & 'Other'.
# Used, typically, to eliminate dates and durations incorrectly identified as an actor.
if actor_type in ['Per', 'Org', 'Loc', 'Obj', 'Nat', 'Other']:
final_annotation.append(((actor_start_character_offset, actor_end_character_offset), actor_lexical_head, actor_type))
return final_annotation
def extract_times(self, lang, text, publication_time):
"""
Parameters
----------
lang : str
the language of the text
current supported languages are: portuguese ('pt'); english ('en')
text : str
the text to be made the extraction
publication_time: str
the publication time
Returns
-------
list[tuple[tuple[int, int], str, str]]
a list consisting of the times identified, where each time is represented by a tuple
with the start and end character offset, it's value and type, respectively
"""
nr_tools = len(self.tools)
# If no tool specified, use all
if nr_tools == 0:
self.tools = TIME_EXTRACTION_TOOLS
nr_tools = len(self.tools)
# NOTE: The extraction is done with only one tool, so the result in just the extraction done by the tool
times = extract_times(self.tools[0], lang, text, publication_time) # :: [(time_start_offset, time_end_offset, time_type, time_value)]
return times
def extract_events(self, lang, text):
"""
Event extraction. Only has one tool so it only returns what the annotator finds.
@param lang: The language of the text
@param text: The text to be annotated
@return: Pandas DataFrame with each event found in the text and their character spans
"""
nr_tools = len(self.tools)
if nr_tools == 0:
self.tools = EVENT_EXTRACTION_TOOLS
nr_tools = len(self.tools)
events = extract_events(self.tools[0], lang, text)
return events
def extract_objectal_links(self, lang, text):
"""
Parameters
----------
lang : str
The language of the text.
Current supported languages are: english ('en')
text : str
The text to be made the extraction.
Returns
-------
list[list[tuple[int, int]]
A list with the clusters identified.
Each cluster is a list with tuples, where every tuple is a 2D tuple with the start and end character offset of the span corresponding to the same entity.
"""
nr_tools = len(self.tools)
# If no tool specified, use all
if nr_tools == 0:
self.tools = OBJECTAL_LINKS_RESOLUTION_TOOLS
nr_tools = len(self.tools)
# NOTE: The extraction is done with only one tool, so the result in just the extraction done by the tool
return extract_objectal_links(self.tools[0], lang, text)
def extract_semantic_role_links(self, lang, text):
"""
Semantic Role Link extraction. Only has one tool, so no tool merging is needed.
@param lang: The language of the text
@param text: The text to be annotated
@return: List of pandas DataFrames with the actors, events and their semantic roles to be linked
"""
nr_tools = len(self.tools)
# If no tool specified, use all
if nr_tools == 0:
self.tools = SEMANTIC_ROLE_LABELLING_TOOLS
nr_tools = len(self.tools)
srl_by_sentence = extract_semantic_role_links(self.tools[0], lang, text)
return srl_by_sentence
| 45.541353 | 237 | 0.623411 |
from text2story.annotators import ACTOR_EXTRACTION_TOOLS, TIME_EXTRACTION_TOOLS, OBJECTAL_LINKS_RESOLUTION_TOOLS
from text2story.annotators import EVENT_EXTRACTION_TOOLS, SEMANTIC_ROLE_LABELLING_TOOLS
from text2story.annotators import extract_actors, extract_times, extract_objectal_links, extract_events
from text2story.annotators import extract_semantic_role_links
class Annotator:
def __init__(self, tools):
self.tools = tools
def extract_actors(self, lang, text):
# For instance, some tool identified the entity with character span (2, 7) and other (5, 10). We are assuming that the entirely entity has the char. span of (2, 10).
# To do that, we are taking the first (in the sense of the char. span) identification made, and keep extending the character end offset as much as possible, with every entity that has a span that intersects with our current span.
# Also, note that we are obtaning a bunch of POS tags and NE IOB tags and we just want one.
# For the POS tag, we are taking the most common label.
# For the NE IOB tag, we do the same, but we favor all labels versus the generic 'Other' label. That is, even if the label 'Other' is the most common, if we have a more specific one, we use that, instead.
nr_tools = len(self.tools)
# If no tool specified, use all
if nr_tools == 0:
self.tools = ACTOR_EXTRACTION_TOOLS
nr_tools = len(self.tools)
# Gather the annotations made by the tools specified and combine the results
annotations = []
for tool in self.tools:
annotations.append(extract_actors(tool, lang, text))
final_annotation = []
idxs = [0] * nr_tools # Current actor position from each tool
while not(all([len(annotations[i]) == idxs[i] for i in range(nr_tools)])): # We finish when we consumed every actor identified by every tool
tool_id = -1
tool_id_start_char_offset = len(text) # len(self.text) acting as infinite
# Get the next entity chunk to be gather (the one with the lowest start character span)
for i in range(nr_tools):
if idxs[i] < len(annotations[i]):
current_actor = annotations[i][idxs[i]]
current_actor_start_character_span = current_actor[0][0]
if current_actor_start_character_span < tool_id_start_char_offset:
tool_id = i
tool_id_start_char_offset = current_actor_start_character_span
# For now, our actor consists of a unique annotation made by some tool
actor_start_character_offset = annotations[tool_id][idxs[tool_id]][0][0]
actor_end_character_offset = annotations[tool_id][idxs[tool_id]][0][1]
# For the lexical head and type we will accumulate the results and latter choose the best following a criterion
actor_lexical_heads = [annotations[tool_id][idxs[tool_id]][1]]
actor_types = [annotations[tool_id][idxs[tool_id]][2]]
idxs[tool_id] += 1 # Consume the annotation
# Other tools may have identified the same actor.
# We need to search if theres some intersection in the span of the identified actor, with the other tools.
# That is, they identified the same actor, but maybe missed some initial part of it.
# We identify this situation by looking to the character start char offset of the current actor identified by each tool,
# and if it happens to be less than our end char offset of our current identified actor, then we can extend the current information we have.
# Note that we may extend the end char offset, each force us to repete this process, till the offsets stabilize.
# In the first interation, the tool that first identified the actor, will be matched and add double information and we don't want that
first_iteration = True
while True:
flag = False
for i in range(nr_tools):
if first_iteration and i == tool_id:
continue
if idxs[i] < len(annotations[i]):
if annotations[i][idxs[i]][0][0] <= actor_end_character_offset:
if actor_end_character_offset < annotations[i][idxs[i]][0][1]:
actor_end_character_offset = annotations[i][idxs[i]][0][1]
flag = True
actor_lexical_heads.append(annotations[i][idxs[i]][1])
actor_types.append(annotations[i][idxs[i]][2])
idxs[i] = idxs[i] + 1
first_iteration = False
if not(flag):
break
rmv_undef_pos_tags = [ne for ne in actor_lexical_heads if ne != 'UNDEF']
if rmv_undef_pos_tags:
actor_lexical_head = max(rmv_undef_pos_tags, key=rmv_undef_pos_tags.count)
else:
continue
rmv_other_ne = [ne for ne in actor_types if ne != 'Other']
if rmv_other_ne:
actor_type = max(rmv_other_ne, key=rmv_other_ne.count)
else:
actor_type = 'Other'
if actor_type in ['Per', 'Org', 'Loc', 'Obj', 'Nat', 'Other']:
final_annotation.append(((actor_start_character_offset, actor_end_character_offset), actor_lexical_head, actor_type))
return final_annotation
def extract_times(self, lang, text, publication_time):
nr_tools = len(self.tools)
if nr_tools == 0:
self.tools = TIME_EXTRACTION_TOOLS
nr_tools = len(self.tools)
times = extract_times(self.tools[0], lang, text, publication_time)
return times
def extract_events(self, lang, text):
nr_tools = len(self.tools)
if nr_tools == 0:
self.tools = EVENT_EXTRACTION_TOOLS
nr_tools = len(self.tools)
events = extract_events(self.tools[0], lang, text)
return events
def extract_objectal_links(self, lang, text):
nr_tools = len(self.tools)
if nr_tools == 0:
self.tools = OBJECTAL_LINKS_RESOLUTION_TOOLS
nr_tools = len(self.tools)
return extract_objectal_links(self.tools[0], lang, text)
def extract_semantic_role_links(self, lang, text):
nr_tools = len(self.tools)
if nr_tools == 0:
self.tools = SEMANTIC_ROLE_LABELLING_TOOLS
nr_tools = len(self.tools)
srl_by_sentence = extract_semantic_role_links(self.tools[0], lang, text)
return srl_by_sentence
| true | true |
f72f4b9dc809d48633312a591811a639fb492796 | 8,934 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/_load_balancer_backend_address_pools_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/_load_balancer_backend_address_pools_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/_load_balancer_backend_address_pools_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations(object):
"""LoadBalancerBackendAddressPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.LoadBalancerBackendAddressPoolListResult"]
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_11_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
backend_address_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.BackendAddressPool"
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_11_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
| 48.291892 | 218 | 0.671032 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name,
load_balancer_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'}
def get(
self,
resource_group_name,
load_balancer_name,
backend_address_pool_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'}
| true | true |
f72f4c43e27a41f58a040e1b5bf6c7a3a262c3ec | 745 | py | Python | trypython/stdlib/re_/re03.py | devlights/try-python | 67d1d26476794da81c8c76662486108ce03f8fb4 | [
"MIT"
] | 4 | 2019-10-21T11:42:11.000Z | 2020-03-12T16:35:51.000Z | trypython/stdlib/re_/re03.py | devlights/try-python | 67d1d26476794da81c8c76662486108ce03f8fb4 | [
"MIT"
] | 80 | 2017-02-08T07:55:37.000Z | 2021-10-06T06:30:30.000Z | trypython/stdlib/re_/re03.py | devlights/try-python | 67d1d26476794da81c8c76662486108ce03f8fb4 | [
"MIT"
] | 1 | 2020-03-12T04:37:17.000Z | 2020-03-12T04:37:17.000Z | """
正規表現のサンプルです。
最長一致と最短一致について
"""
import re
from trypython.common.commoncls import SampleBase
from trypython.stdlib.re_ import util
class Sample(SampleBase):
def exec(self):
# ---------------------------------------------
# 正規表現 (最長一致と最短一致)
#
# 正規表現はデフォルトで閉包を表すメタキャラクタ「*」は
# 「最長一致」を行う。「最短一致」を行うには、「*?」を
# 使う。
# ---------------------------------------------
message = 'hello world hello world'
pattern = r'h.*d'
# 最長一致
m = re.match(pattern, message)
util.print_match_object(m)
# 最短一致
pattern = r'h.*?d'
m = re.match(pattern, message)
util.print_match_object(m)
def go():
obj = Sample()
obj.exec()
| 20.135135 | 55 | 0.485906 | import re
from trypython.common.commoncls import SampleBase
from trypython.stdlib.re_ import util
class Sample(SampleBase):
def exec(self):
message = 'hello world hello world'
pattern = r'h.*d'
m = re.match(pattern, message)
util.print_match_object(m)
pattern = r'h.*?d'
m = re.match(pattern, message)
util.print_match_object(m)
def go():
obj = Sample()
obj.exec()
| true | true |
f72f4dfc280dbab31721c4c7456f8993a0b4131e | 142 | py | Python | code/abc110_c_03.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/abc110_c_03.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/abc110_c_03.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | from collections import Counter
S=Counter(input())
T=Counter(input())
s=S.values()
t=T.values()
print("Yes" if sorted(s)==sorted(t) else "No") | 23.666667 | 46 | 0.704225 | from collections import Counter
S=Counter(input())
T=Counter(input())
s=S.values()
t=T.values()
print("Yes" if sorted(s)==sorted(t) else "No") | true | true |
f72f4f61c468d6cc2a2a7fa806aa36b41325acaa | 4,969 | py | Python | readwrite.py | ciakkig/book-snake | a5aa783b31029981cb4e302d6ff213c0ace828ba | [
"MIT"
] | null | null | null | readwrite.py | ciakkig/book-snake | a5aa783b31029981cb4e302d6ff213c0ace828ba | [
"MIT"
] | null | null | null | readwrite.py | ciakkig/book-snake | a5aa783b31029981cb4e302d6ff213c0ace828ba | [
"MIT"
] | null | null | null | ### functions for reading from and writing to input files
def read_books(books_file='data/input/books.txt'):
# reads the file containing the books
# ('books.txt' by default)
# and returns the list of tuples:
# [(author, title), ...]
books = []
try:
with open(books_file) as file:
for line in file:
line = line.strip()
if len(line) != 0:
line = line
book = line.split(',')
books.append(tuple(book))
except FileNotFoundError:
raise FileNotFoundError(f'Books file at {books_file} not found. FIX: make sure the file exists, is in the correct directory and has the correct name')
return books
def read_ratings(ratings_file='data/input/ratings.txt'):
# reads the file containing the rating vectors
# ('ratings.txt' by default)
# computes the rating index for every user
# and returns the dictionary of tuples:
# {user: (rating, ...), ...}
ratings = {}
try:
with open(ratings_file) as file:
user_flag = True
for line in file:
line = line.strip()
if len(line) != 0:
if user_flag:
user = line
else:
rating = line.split()
rating = tuple(int(r) for r in rating)
ratings.update({user: rating})
user_flag = not user_flag
except FileNotFoundError:
raise FileNotFoundError(f'Ratings file at {ratings_file} not found. FIX: make sure the file exists, is in the correct directory and has the correct name')
return ratings
def write_ratings(user, ratings, ratings_file='data/input/ratings.txt'):
# takes the new user and their new ratings:
# (rating, ...)
# and writes them to the file containing the rating vectors
# ('ratings.txt' by default)
with open(ratings_file, 'a') as file:
print(user, file=file)
for elt in ratings:
print(str(elt) + ' ', end='', file=file)
print('', file=file)
def printer(recommendations, user, books, ratings, rating_thr, score_thr):
# takes the recommendations:
# {(user, score): [(book, rating), ...], ...}
# and outputs them both to standard output
# and to the main output file
# 'output-{name}.txt'
with open(f'data/output/output-{user}.txt', 'w') as file:
s = f'* Recommendations for: {user} *'
stars = '*' * len(s)
s = stars + '\n' + s + '\n' + stars
print(f'Here are your recommendations based on the similarity algorithm, {user}:')
print(s, end='\n\n\n', file=file)
j = 0
for key, val in recommendations.items():
r_user, score = key
s = f'Recommended from: {r_user}'.ljust(55) + f'({score} similarity)'.rjust(15)
s += '\n' + '-' * len(s)
print(s, end='\n')
print(s, end='\n\n', file=file)
for i, elt in enumerate(val):
n, rating = elt
author, book = books[n]
j += 1
s = f'\t{j:2d}.\t{book}'.ljust(51) + f'(rated {rating})'.rjust(10) + f'\n\t\t\tby {author}'
print(s, end='\n')
print(s, end='\n\n', file=file)
print('')
print('', file=file)
s = f'''{j}\tRecommendations based on the similarity algorithm:
for:\t\t\t\t{user}
\twith ratings:\t{ratings[user]}
with rating threshold of:\t{rating_thr}
and similarity threshold of:\t{score_thr}'''
print(s, file=file)
print(f'Check the output file at /data/output/output-{user}.txt and the algorithm log at logs/recommend-{user}_log.txt for more details.')
print(f'Check the recommendation algorithm log at logs/recommend-{user}_log.txt for more details about the recommendation process.', file=file)
def random_printer(r_recommendations, user, books):
# takes the random recommendations:
# [book, ...]
# and outputs them both to standard output
# and to the main output file
# 'output-{user}.txt'
with open(f'data/output/output-{user}.txt', 'w') as file:
s = f'* Random recommendations for: {user} *'
stars = '*' * len(s)
s = stars + '\n' + s + '\n' + stars
print(f'Here are your random recommendations, {user}:')
print(s, end='\n\n\n', file=file)
for i, n in enumerate(r_recommendations):
author, book = books[n]
s = f'\t{j}.\t{book}'.ljust(50) + f'\n\t\t\tby {author}'
print(s, end='\n')
print(s, end='\n\n', file=file)
print('')
print('', file=file)
s = f'''{len(r_recommendations)}\tRandom recommendations:
for:\t\t\t\t{user}
\tsince your ratings are all 0:\t{ratings[user]}'''
print(s, file=file)
| 35.241135 | 162 | 0.555041 | open(books_file) as file:
for line in file:
line = line.strip()
if len(line) != 0:
line = line
book = line.split(',')
books.append(tuple(book))
except FileNotFoundError:
raise FileNotFoundError(f'Books file at {books_file} not found. FIX: make sure the file exists, is in the correct directory and has the correct name')
return books
def read_ratings(ratings_file='data/input/ratings.txt'):
ratings = {}
try:
with open(ratings_file) as file:
user_flag = True
for line in file:
line = line.strip()
if len(line) != 0:
if user_flag:
user = line
else:
rating = line.split()
rating = tuple(int(r) for r in rating)
ratings.update({user: rating})
user_flag = not user_flag
except FileNotFoundError:
raise FileNotFoundError(f'Ratings file at {ratings_file} not found. FIX: make sure the file exists, is in the correct directory and has the correct name')
return ratings
def write_ratings(user, ratings, ratings_file='data/input/ratings.txt'):
with open(ratings_file, 'a') as file:
print(user, file=file)
for elt in ratings:
print(str(elt) + ' ', end='', file=file)
print('', file=file)
def printer(recommendations, user, books, ratings, rating_thr, score_thr):
with open(f'data/output/output-{user}.txt', 'w') as file:
s = f'* Recommendations for: {user} *'
stars = '*' * len(s)
s = stars + '\n' + s + '\n' + stars
print(f'Here are your recommendations based on the similarity algorithm, {user}:')
print(s, end='\n\n\n', file=file)
j = 0
for key, val in recommendations.items():
r_user, score = key
s = f'Recommended from: {r_user}'.ljust(55) + f'({score} similarity)'.rjust(15)
s += '\n' + '-' * len(s)
print(s, end='\n')
print(s, end='\n\n', file=file)
for i, elt in enumerate(val):
n, rating = elt
author, book = books[n]
j += 1
s = f'\t{j:2d}.\t{book}'.ljust(51) + f'(rated {rating})'.rjust(10) + f'\n\t\t\tby {author}'
print(s, end='\n')
print(s, end='\n\n', file=file)
print('')
print('', file=file)
s = f'''{j}\tRecommendations based on the similarity algorithm:
for:\t\t\t\t{user}
\twith ratings:\t{ratings[user]}
with rating threshold of:\t{rating_thr}
and similarity threshold of:\t{score_thr}'''
print(s, file=file)
print(f'Check the output file at /data/output/output-{user}.txt and the algorithm log at logs/recommend-{user}_log.txt for more details.')
print(f'Check the recommendation algorithm log at logs/recommend-{user}_log.txt for more details about the recommendation process.', file=file)
def random_printer(r_recommendations, user, books):
with open(f'data/output/output-{user}.txt', 'w') as file:
s = f'* Random recommendations for: {user} *'
stars = '*' * len(s)
s = stars + '\n' + s + '\n' + stars
print(f'Here are your random recommendations, {user}:')
print(s, end='\n\n\n', file=file)
for i, n in enumerate(r_recommendations):
author, book = books[n]
s = f'\t{j}.\t{book}'.ljust(50) + f'\n\t\t\tby {author}'
print(s, end='\n')
print(s, end='\n\n', file=file)
print('')
print('', file=file)
s = f'''{len(r_recommendations)}\tRandom recommendations:
for:\t\t\t\t{user}
\tsince your ratings are all 0:\t{ratings[user]}'''
print(s, file=file)
| true | true |
f72f50b7cc2ce56f0c04120d8d35bdd4ff992462 | 26,731 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEGIET_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEGIET_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEGIET_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FCoEGIET(Base):
__slots__ = ()
_SDM_NAME = 'fCoEGIET'
_SDM_ATT_MAP = {
'FcoeHeaderVersion': 'fCoEGIET.header.fcoeHeader.version-1',
'FcoeHeaderReserved': 'fCoEGIET.header.fcoeHeader.reserved-2',
'FcoeHeaderESOF': 'fCoEGIET.header.fcoeHeader.eSOF-3',
'DeviceDataFramesDeviceDataInfo': 'fCoEGIET.header.fcHeader.rCTL.deviceDataFrames.deviceDataInfo-4',
'RCTLReserved': 'fCoEGIET.header.fcHeader.rCTL.reserved-5',
'ExtendedLinkServicesInfo': 'fCoEGIET.header.fcHeader.rCTL.extendedLinkServices.info-6',
'Fc4LinkDataInfo': 'fCoEGIET.header.fcHeader.rCTL.fc4LinkData.info-7',
'VideoDataInfo': 'fCoEGIET.header.fcHeader.rCTL.videoData.info-8',
'ExtendedHeaderInfo': 'fCoEGIET.header.fcHeader.rCTL.extendedHeader.info-9',
'BasicLinkServicesInfo': 'fCoEGIET.header.fcHeader.rCTL.basicLinkServices.info-10',
'LinkControlFramesInfo': 'fCoEGIET.header.fcHeader.rCTL.linkControlFrames.info-11',
'ExtendedRoutingInfo': 'fCoEGIET.header.fcHeader.rCTL.extendedRouting.info-12',
'FcHeaderDstId': 'fCoEGIET.header.fcHeader.dstId-13',
'FcHeaderCsCTLPriority': 'fCoEGIET.header.fcHeader.csCTLPriority-14',
'FcHeaderSrcId': 'fCoEGIET.header.fcHeader.srcId-15',
'FcHeaderType': 'fCoEGIET.header.fcHeader.type-16',
'FCTLCustom': 'fCoEGIET.header.fcHeader.fCTL.custom-17',
'BuildFCTLExchangeContext': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.exchangeContext-18',
'BuildFCTLSequenceContext': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.sequenceContext-19',
'BuildFCTLFirstSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.firstSequence-20',
'BuildFCTLLastSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.lastSequence-21',
'BuildFCTLEndSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.endSequence-22',
'BuildFCTLEndConnection': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.endConnection-23',
'BuildFCTLCsCTLPriority': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.csCTLPriority-24',
'BuildFCTLSequenceInitiative': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.sequenceInitiative-25',
'BuildFCTLFcXIDReassigned': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcXIDReassigned-26',
'BuildFCTLFcInvalidateXID': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcInvalidateXID-27',
'BuildFCTLAckForm': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.ackForm-28',
'BuildFCTLFcDataCompression': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcDataCompression-29',
'BuildFCTLFcDataEncryption': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcDataEncryption-30',
'BuildFCTLRetransmittedSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.retransmittedSequence-31',
'BuildFCTLUnidirectionalTransmit': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.unidirectionalTransmit-32',
'BuildFCTLContinueSeqCondition': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.continueSeqCondition-33',
'BuildFCTLAbortSeqCondition': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.abortSeqCondition-34',
'BuildFCTLRelativeOffsetPresent': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.relativeOffsetPresent-35',
'BuildFCTLExchangeReassembly': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.exchangeReassembly-36',
'BuildFCTLFillBytes': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fillBytes-37',
'FcHeaderSeqID': 'fCoEGIET.header.fcHeader.seqID-38',
'FcHeaderDfCTL': 'fCoEGIET.header.fcHeader.dfCTL-39',
'FcHeaderSeqCNT': 'fCoEGIET.header.fcHeader.seqCNT-40',
'FcHeaderOxID': 'fCoEGIET.header.fcHeader.oxID-41',
'FcHeaderRxID': 'fCoEGIET.header.fcHeader.rxID-42',
'FcHeaderParameter': 'fCoEGIET.header.fcHeader.parameter-43',
'FcCTRevision': 'fCoEGIET.header.fcCT.revision-44',
'FcCTInId': 'fCoEGIET.header.fcCT.inId-45',
'FcCTGsType': 'fCoEGIET.header.fcCT.gsType-46',
'FcCTGsSubtype': 'fCoEGIET.header.fcCT.gsSubtype-47',
'FcCTOptions': 'fCoEGIET.header.fcCT.options-48',
'FcCTReserved': 'fCoEGIET.header.fcCT.reserved-49',
'FCSOpcode': 'fCoEGIET.header.FCS.opcode-50',
'FCSMaxsize': 'fCoEGIET.header.FCS.maxsize-51',
'FCSReserved': 'fCoEGIET.header.FCS.reserved-52',
'FCSInterconnectElementName': 'fCoEGIET.header.FCS.interconnectElementName-53',
'FcCRCAutoCRC': 'fCoEGIET.header.fcCRC.autoCRC-54',
'FcCRCGenerateBadCRC': 'fCoEGIET.header.fcCRC.generateBadCRC-55',
'FcTrailerEEOF': 'fCoEGIET.header.fcTrailer.eEOF-56',
'FcTrailerReserved': 'fCoEGIET.header.fcTrailer.reserved-57',
}
def __init__(self, parent, list_op=False):
super(FCoEGIET, self).__init__(parent, list_op)
@property
def FcoeHeaderVersion(self):
"""
Display Name: Version
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderVersion']))
@property
def FcoeHeaderReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderReserved']))
@property
def FcoeHeaderESOF(self):
"""
Display Name: E-SOF
Default Value: 54
Value Format: decimal
Available enum values: SOFf - Fabric, 40, SOFi4 - Initiate Class 4, 41, SOFi2 - Initiate Class 2, 45, SOFi3 - Initiate Class 3, 46, SOFn4 - Normal Class 4, 49, SOFn2 - Normal Class 2, 53, SOFn3 - Normal Class 3, 54, SOFc4 - Connect Class 4, 57, SOFn1 - Normal Class 1 or 6, 250
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderESOF']))
@property
def DeviceDataFramesDeviceDataInfo(self):
"""
Display Name: Information
Default Value: 0
Value Format: decimal
Available enum values: Uncategorized Information, 0, Solicited Data, 1, Unsolicited Control, 2, Solicited Control, 3, Unsolicited Data, 4, Data Descriptor, 5, Unsolicited Command, 6, Command Status, 7
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DeviceDataFramesDeviceDataInfo']))
@property
def RCTLReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RCTLReserved']))
@property
def ExtendedLinkServicesInfo(self):
"""
Display Name: Information
Default Value: 33
Value Format: decimal
Available enum values: Solicited Data, 32, Request, 33, Reply, 34
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedLinkServicesInfo']))
@property
def Fc4LinkDataInfo(self):
"""
Display Name: Information
Default Value: 48
Value Format: decimal
Available enum values: Uncategorized Information, 48, Solicited Data, 49, Unsolicited Control, 50, Solicited Control, 51, Unsolicited Data, 52, Data Descriptor, 53, Unsolicited Command, 54, Command Status, 55
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Fc4LinkDataInfo']))
@property
def VideoDataInfo(self):
"""
Display Name: Information
Default Value: 68
Value Format: decimal
Available enum values: Unsolicited Data, 68
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VideoDataInfo']))
@property
def ExtendedHeaderInfo(self):
"""
Display Name: Information
Default Value: 80
Value Format: decimal
Available enum values: Virtual Fabric Tagging Header, 80, Inter Fabric Routing Header, 81, Encapsulation Header, 82
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedHeaderInfo']))
@property
def BasicLinkServicesInfo(self):
"""
Display Name: Information
Default Value: 128
Value Format: decimal
Available enum values: No Operation, 128, Abort Sequence, 129, Remove Connection, 130, Basic Accept, 132, Basic Reject, 133, Dedicated Connection Preempted, 134
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BasicLinkServicesInfo']))
@property
def LinkControlFramesInfo(self):
"""
Display Name: Information
Default Value: 192
Value Format: decimal
Available enum values: Acknowledge_1, 128, Acknowledge_0, 129, Nx Port Reject, 130, Fabric Reject, 131, Nx Port Busy, 132, Fabric Busy to Data Frame, 133, Fabric Busy to Link Control Frame, 134, Link Credit Reset, 135, Notify, 136, End, 137
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkControlFramesInfo']))
@property
def ExtendedRoutingInfo(self):
"""
Display Name: Information
Default Value: 240
Value Format: decimal
Available enum values: Vendor Unique, 240
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedRoutingInfo']))
@property
def FcHeaderDstId(self):
"""
Display Name: Destination ID
Default Value: 0
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDstId']))
@property
def FcHeaderCsCTLPriority(self):
"""
Display Name: CS_CTL/Priority
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderCsCTLPriority']))
@property
def FcHeaderSrcId(self):
"""
Display Name: Source ID
Default Value: 0
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSrcId']))
@property
def FcHeaderType(self):
"""
Display Name: Type
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderType']))
@property
def FCTLCustom(self):
"""
Display Name: Custom
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCTLCustom']))
@property
def BuildFCTLExchangeContext(self):
"""
Display Name: Exchange Context
Default Value: 0
Value Format: decimal
Available enum values: Originator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLExchangeContext']))
@property
def BuildFCTLSequenceContext(self):
"""
Display Name: Sequence Context
Default Value: 0
Value Format: decimal
Available enum values: Initiator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLSequenceContext']))
@property
def BuildFCTLFirstSequence(self):
"""
Display Name: First Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, First, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFirstSequence']))
@property
def BuildFCTLLastSequence(self):
"""
Display Name: Last Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLLastSequence']))
@property
def BuildFCTLEndSequence(self):
"""
Display Name: End Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLEndSequence']))
@property
def BuildFCTLEndConnection(self):
"""
Display Name: End Connection
Default Value: 0
Value Format: decimal
Available enum values: Alive, 0, Pending, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLEndConnection']))
@property
def BuildFCTLCsCTLPriority(self):
"""
Display Name: CS_CTL/Priority
Default Value: 0
Value Format: decimal
Available enum values: CS_CTL, 0, Priority, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLCsCTLPriority']))
@property
def BuildFCTLSequenceInitiative(self):
"""
Display Name: Sequence Initiative
Default Value: 0
Value Format: decimal
Available enum values: Hold, 0, Transfer, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLSequenceInitiative']))
@property
def BuildFCTLFcXIDReassigned(self):
"""
Display Name: FC XID Reassigned
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcXIDReassigned']))
@property
def BuildFCTLFcInvalidateXID(self):
"""
Display Name: FC Invalidate XID
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcInvalidateXID']))
@property
def BuildFCTLAckForm(self):
"""
Display Name: ACK_Form
Default Value: 0
Value Format: decimal
Available enum values: No assistance provided, 0, ACK_1 Required, 1, reserved, 2, Ack_0 Required, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLAckForm']))
@property
def BuildFCTLFcDataCompression(self):
"""
Display Name: FC Data Compression
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcDataCompression']))
@property
def BuildFCTLFcDataEncryption(self):
"""
Display Name: FC Data Encryption
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcDataEncryption']))
@property
def BuildFCTLRetransmittedSequence(self):
"""
Display Name: Retransmitted Sequence
Default Value: 0
Value Format: decimal
Available enum values: Original, 0, Retransmission, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLRetransmittedSequence']))
@property
def BuildFCTLUnidirectionalTransmit(self):
"""
Display Name: Unidirectional Transmit
Default Value: 0
Value Format: decimal
Available enum values: Bi-directional, 0, Unidirectional, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLUnidirectionalTransmit']))
@property
def BuildFCTLContinueSeqCondition(self):
"""
Display Name: Continue Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: No information, 0, Sequence to follow-immediately, 1, Squence to follow-soon, 2, Sequence to follow-delayed, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLContinueSeqCondition']))
@property
def BuildFCTLAbortSeqCondition(self):
"""
Display Name: Abort Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: 0x00, 0, 0x01, 1, 0x10, 2, 0x11, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLAbortSeqCondition']))
@property
def BuildFCTLRelativeOffsetPresent(self):
"""
Display Name: Relative Offset Present
Default Value: 0
Value Format: decimal
Available enum values: Parameter field defined, 0, Relative offset, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLRelativeOffsetPresent']))
@property
def BuildFCTLExchangeReassembly(self):
"""
Display Name: Exchange Reassembly
Default Value: 0
Value Format: decimal
Available enum values: off, 0, on, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLExchangeReassembly']))
@property
def BuildFCTLFillBytes(self):
"""
Display Name: Fill Bytes
Default Value: 0
Value Format: decimal
Available enum values: 0 bytes of fill, 0, 1 bytes of fill, 1, 2 bytes of fill, 2, 3 bytes of fill, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFillBytes']))
@property
def FcHeaderSeqID(self):
"""
Display Name: SEQ_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqID']))
@property
def FcHeaderDfCTL(self):
"""
Display Name: DF_CTL
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDfCTL']))
@property
def FcHeaderSeqCNT(self):
"""
Display Name: SEQ_CNT
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqCNT']))
@property
def FcHeaderOxID(self):
"""
Display Name: OX_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderOxID']))
@property
def FcHeaderRxID(self):
"""
Display Name: RX_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderRxID']))
@property
def FcHeaderParameter(self):
"""
Display Name: Parameter
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderParameter']))
@property
def FcCTRevision(self):
"""
Display Name: Revision
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTRevision']))
@property
def FcCTInId(self):
"""
Display Name: IN_ID
Default Value: 0x000000
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTInId']))
@property
def FcCTGsType(self):
"""
Display Name: GS_Type
Default Value: 250
Value Format: decimal
Available enum values: Event Service, 244, Key Distribution Service, 247, Alias Service, 248, Management Service, 250, Time Service, 251, Directory Service, 252
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTGsType']))
@property
def FcCTGsSubtype(self):
"""
Display Name: GS_Subtype
Default Value: 0x01
Value Format: hex
Available enum values: Fabric Configuration Server, 1, Unzoned Name Server, 2, Fabric Zone Server, 3, Lock Server, 4, Performance Server, 5, Security Policy Server, 6, Security Information Server, 7
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTGsSubtype']))
@property
def FcCTOptions(self):
"""
Display Name: Options
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTOptions']))
@property
def FcCTReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTReserved']))
@property
def FCSOpcode(self):
"""
Display Name: Command/Response Code
Default Value: 273
Value Format: decimal
Available enum values: GTIN, 256, GIEL, 257, GIET, 273, GDID, 274, GMID, 275, GFN, 276, GIELN, 277, GMAL, 278, GIEIL, 279, GPL, 280, GPT, 289, GPPN, 290, GAPNL, 292, GPS, 294, GPSC, 295, GSES, 304, GIEAG, 320, GPAG, 321, GPLNL, 401, GPLT, 402, GPLML, 403, GPAB, 407, GNPL, 417, GPNL, 418, GPFCP, 420, GPLI, 421, GNID, 433, RIELN, 533, RPL, 640, RPLN, 657, RPLT, 658, RPLM, 659, RPAB, 664, RPFCP, 666, RPLI, 667, DPL, 896, DPLN, 913, DPLM, 914, DPLML, 915, DPLI, 916, DPAB, 917, DPALL, 927, FTR, 1024, FPNG, 1025
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSOpcode']))
@property
def FCSMaxsize(self):
"""
Display Name: Maximum/Residual Size
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSMaxsize']))
@property
def FCSReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSReserved']))
@property
def FCSInterconnectElementName(self):
"""
Display Name: Interconnect Element Name
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSInterconnectElementName']))
@property
def FcCRCAutoCRC(self):
"""
Display Name: Auto
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCRCAutoCRC']))
@property
def FcCRCGenerateBadCRC(self):
"""
Display Name: Bad CRC
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCRCGenerateBadCRC']))
@property
def FcTrailerEEOF(self):
"""
Display Name: E-EOF
Default Value: 65
Value Format: decimal
Available enum values: EOFn - Normal, 65, EOFt - Terminate, 66, EOFrt - Remove Terminate, 68, EOFni - Normal Invalid, 73, EOFrti - Remove Terminate Invalid, 79, EOFa - Abort, 80
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcTrailerEEOF']))
@property
def FcTrailerReserved(self):
"""
Display Name: Reserved
Default Value: 0x000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcTrailerReserved']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 39.542899 | 519 | 0.669036 | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FCoEGIET(Base):
__slots__ = ()
_SDM_NAME = 'fCoEGIET'
_SDM_ATT_MAP = {
'FcoeHeaderVersion': 'fCoEGIET.header.fcoeHeader.version-1',
'FcoeHeaderReserved': 'fCoEGIET.header.fcoeHeader.reserved-2',
'FcoeHeaderESOF': 'fCoEGIET.header.fcoeHeader.eSOF-3',
'DeviceDataFramesDeviceDataInfo': 'fCoEGIET.header.fcHeader.rCTL.deviceDataFrames.deviceDataInfo-4',
'RCTLReserved': 'fCoEGIET.header.fcHeader.rCTL.reserved-5',
'ExtendedLinkServicesInfo': 'fCoEGIET.header.fcHeader.rCTL.extendedLinkServices.info-6',
'Fc4LinkDataInfo': 'fCoEGIET.header.fcHeader.rCTL.fc4LinkData.info-7',
'VideoDataInfo': 'fCoEGIET.header.fcHeader.rCTL.videoData.info-8',
'ExtendedHeaderInfo': 'fCoEGIET.header.fcHeader.rCTL.extendedHeader.info-9',
'BasicLinkServicesInfo': 'fCoEGIET.header.fcHeader.rCTL.basicLinkServices.info-10',
'LinkControlFramesInfo': 'fCoEGIET.header.fcHeader.rCTL.linkControlFrames.info-11',
'ExtendedRoutingInfo': 'fCoEGIET.header.fcHeader.rCTL.extendedRouting.info-12',
'FcHeaderDstId': 'fCoEGIET.header.fcHeader.dstId-13',
'FcHeaderCsCTLPriority': 'fCoEGIET.header.fcHeader.csCTLPriority-14',
'FcHeaderSrcId': 'fCoEGIET.header.fcHeader.srcId-15',
'FcHeaderType': 'fCoEGIET.header.fcHeader.type-16',
'FCTLCustom': 'fCoEGIET.header.fcHeader.fCTL.custom-17',
'BuildFCTLExchangeContext': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.exchangeContext-18',
'BuildFCTLSequenceContext': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.sequenceContext-19',
'BuildFCTLFirstSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.firstSequence-20',
'BuildFCTLLastSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.lastSequence-21',
'BuildFCTLEndSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.endSequence-22',
'BuildFCTLEndConnection': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.endConnection-23',
'BuildFCTLCsCTLPriority': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.csCTLPriority-24',
'BuildFCTLSequenceInitiative': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.sequenceInitiative-25',
'BuildFCTLFcXIDReassigned': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcXIDReassigned-26',
'BuildFCTLFcInvalidateXID': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcInvalidateXID-27',
'BuildFCTLAckForm': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.ackForm-28',
'BuildFCTLFcDataCompression': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcDataCompression-29',
'BuildFCTLFcDataEncryption': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcDataEncryption-30',
'BuildFCTLRetransmittedSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.retransmittedSequence-31',
'BuildFCTLUnidirectionalTransmit': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.unidirectionalTransmit-32',
'BuildFCTLContinueSeqCondition': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.continueSeqCondition-33',
'BuildFCTLAbortSeqCondition': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.abortSeqCondition-34',
'BuildFCTLRelativeOffsetPresent': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.relativeOffsetPresent-35',
'BuildFCTLExchangeReassembly': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.exchangeReassembly-36',
'BuildFCTLFillBytes': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fillBytes-37',
'FcHeaderSeqID': 'fCoEGIET.header.fcHeader.seqID-38',
'FcHeaderDfCTL': 'fCoEGIET.header.fcHeader.dfCTL-39',
'FcHeaderSeqCNT': 'fCoEGIET.header.fcHeader.seqCNT-40',
'FcHeaderOxID': 'fCoEGIET.header.fcHeader.oxID-41',
'FcHeaderRxID': 'fCoEGIET.header.fcHeader.rxID-42',
'FcHeaderParameter': 'fCoEGIET.header.fcHeader.parameter-43',
'FcCTRevision': 'fCoEGIET.header.fcCT.revision-44',
'FcCTInId': 'fCoEGIET.header.fcCT.inId-45',
'FcCTGsType': 'fCoEGIET.header.fcCT.gsType-46',
'FcCTGsSubtype': 'fCoEGIET.header.fcCT.gsSubtype-47',
'FcCTOptions': 'fCoEGIET.header.fcCT.options-48',
'FcCTReserved': 'fCoEGIET.header.fcCT.reserved-49',
'FCSOpcode': 'fCoEGIET.header.FCS.opcode-50',
'FCSMaxsize': 'fCoEGIET.header.FCS.maxsize-51',
'FCSReserved': 'fCoEGIET.header.FCS.reserved-52',
'FCSInterconnectElementName': 'fCoEGIET.header.FCS.interconnectElementName-53',
'FcCRCAutoCRC': 'fCoEGIET.header.fcCRC.autoCRC-54',
'FcCRCGenerateBadCRC': 'fCoEGIET.header.fcCRC.generateBadCRC-55',
'FcTrailerEEOF': 'fCoEGIET.header.fcTrailer.eEOF-56',
'FcTrailerReserved': 'fCoEGIET.header.fcTrailer.reserved-57',
}
def __init__(self, parent, list_op=False):
super(FCoEGIET, self).__init__(parent, list_op)
@property
def FcoeHeaderVersion(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderVersion']))
@property
def FcoeHeaderReserved(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderReserved']))
@property
def FcoeHeaderESOF(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderESOF']))
@property
def DeviceDataFramesDeviceDataInfo(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DeviceDataFramesDeviceDataInfo']))
@property
def RCTLReserved(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RCTLReserved']))
@property
def ExtendedLinkServicesInfo(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedLinkServicesInfo']))
@property
def Fc4LinkDataInfo(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Fc4LinkDataInfo']))
@property
def VideoDataInfo(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VideoDataInfo']))
@property
def ExtendedHeaderInfo(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedHeaderInfo']))
@property
def BasicLinkServicesInfo(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BasicLinkServicesInfo']))
@property
def LinkControlFramesInfo(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkControlFramesInfo']))
@property
def ExtendedRoutingInfo(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedRoutingInfo']))
@property
def FcHeaderDstId(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDstId']))
@property
def FcHeaderCsCTLPriority(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderCsCTLPriority']))
@property
def FcHeaderSrcId(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSrcId']))
@property
def FcHeaderType(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderType']))
@property
def FCTLCustom(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCTLCustom']))
@property
def BuildFCTLExchangeContext(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLExchangeContext']))
@property
def BuildFCTLSequenceContext(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLSequenceContext']))
@property
def BuildFCTLFirstSequence(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFirstSequence']))
@property
def BuildFCTLLastSequence(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLLastSequence']))
@property
def BuildFCTLEndSequence(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLEndSequence']))
@property
def BuildFCTLEndConnection(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLEndConnection']))
@property
def BuildFCTLCsCTLPriority(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLCsCTLPriority']))
@property
def BuildFCTLSequenceInitiative(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLSequenceInitiative']))
@property
def BuildFCTLFcXIDReassigned(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcXIDReassigned']))
@property
def BuildFCTLFcInvalidateXID(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcInvalidateXID']))
@property
def BuildFCTLAckForm(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLAckForm']))
@property
def BuildFCTLFcDataCompression(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcDataCompression']))
@property
def BuildFCTLFcDataEncryption(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcDataEncryption']))
@property
def BuildFCTLRetransmittedSequence(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLRetransmittedSequence']))
@property
def BuildFCTLUnidirectionalTransmit(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLUnidirectionalTransmit']))
@property
def BuildFCTLContinueSeqCondition(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLContinueSeqCondition']))
@property
def BuildFCTLAbortSeqCondition(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLAbortSeqCondition']))
@property
def BuildFCTLRelativeOffsetPresent(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLRelativeOffsetPresent']))
@property
def BuildFCTLExchangeReassembly(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLExchangeReassembly']))
@property
def BuildFCTLFillBytes(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFillBytes']))
@property
def FcHeaderSeqID(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqID']))
@property
def FcHeaderDfCTL(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDfCTL']))
@property
def FcHeaderSeqCNT(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqCNT']))
@property
def FcHeaderOxID(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderOxID']))
@property
def FcHeaderRxID(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderRxID']))
@property
def FcHeaderParameter(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderParameter']))
@property
def FcCTRevision(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTRevision']))
@property
def FcCTInId(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTInId']))
@property
def FcCTGsType(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTGsType']))
@property
def FcCTGsSubtype(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTGsSubtype']))
@property
def FcCTOptions(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTOptions']))
@property
def FcCTReserved(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTReserved']))
@property
def FCSOpcode(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSOpcode']))
@property
def FCSMaxsize(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSMaxsize']))
@property
def FCSReserved(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSReserved']))
@property
def FCSInterconnectElementName(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSInterconnectElementName']))
@property
def FcCRCAutoCRC(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCRCAutoCRC']))
@property
def FcCRCGenerateBadCRC(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCRCGenerateBadCRC']))
@property
def FcTrailerEEOF(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcTrailerEEOF']))
@property
def FcTrailerReserved(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcTrailerReserved']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| true | true |
f72f50c81d90829c24e33e5d2edfbdaff1582b9e | 1,241 | py | Python | test/core_variables_in_actions.py | sdarwin/build | 2c4217ebb6bdeb5001b33a5d0d6718420aef988c | [
"BSL-1.0"
] | 215 | 2015-01-10T17:16:34.000Z | 2022-02-23T15:22:08.000Z | test/core_variables_in_actions.py | sdarwin/build | 2c4217ebb6bdeb5001b33a5d0d6718420aef988c | [
"BSL-1.0"
] | 594 | 2015-01-22T16:17:55.000Z | 2022-02-26T22:11:01.000Z | test/core_variables_in_actions.py | sdarwin/build | 2c4217ebb6bdeb5001b33a5d0d6718420aef988c | [
"BSL-1.0"
] | 302 | 2015-02-03T01:20:29.000Z | 2022-02-12T07:01:28.000Z | #!/usr/bin/python
# Copyright 2012. Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.bfgroup.xyz/b2/LICENSE.txt)
# Tests that variables in actions get expanded but double quote characters
# get treated as regular characters and not string literal delimiters when
# determining string tokens concatenated to the variable being expanded.
#
# We also take care to make this test work correctly when run using both
# Windows and Unix echo command variant. That is why we add the extra single
# quotes around the text being echoed - they will make the double quotes be
# displayed as regular characters in both cases but will be displayed
# themselves only when using the Windows cmd shell's echo command.
import BoostBuild
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """\
rule dummy ( i )
{
local a = 1 2 3 ;
ECHO From "rule:" $(a)" seconds" ;
a on $(i) = $(a) ;
}
actions dummy
{
echo 'From action: $(a)" seconds"'
}
dummy all ;
""")
t.run_build_system(["-ffile.jam", "-d1"])
t.expect_output_lines("From rule: 1 seconds 2 seconds 3 seconds")
t.expect_output_lines('*From action: 1" 2" 3" seconds"*')
t.cleanup()
| 31.025 | 76 | 0.724416 |
import BoostBuild
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """\
rule dummy ( i )
{
local a = 1 2 3 ;
ECHO From "rule:" $(a)" seconds" ;
a on $(i) = $(a) ;
}
actions dummy
{
echo 'From action: $(a)" seconds"'
}
dummy all ;
""")
t.run_build_system(["-ffile.jam", "-d1"])
t.expect_output_lines("From rule: 1 seconds 2 seconds 3 seconds")
t.expect_output_lines('*From action: 1" 2" 3" seconds"*')
t.cleanup()
| true | true |
f72f50dbed30f0bd6003af7d51ecda8a8bc11fb9 | 2,355 | py | Python | setup.py | Anthchirp/pika | 55557c9172edb04256a5e49b7faf13078d5a0f66 | [
"BSD-3-Clause"
] | 2,479 | 2015-01-01T20:06:23.000Z | 2022-03-31T13:29:19.000Z | setup.py | Anthchirp/pika | 55557c9172edb04256a5e49b7faf13078d5a0f66 | [
"BSD-3-Clause"
] | 813 | 2015-01-07T07:13:49.000Z | 2022-03-28T05:05:06.000Z | setup.py | Anthchirp/pika | 55557c9172edb04256a5e49b7faf13078d5a0f66 | [
"BSD-3-Clause"
] | 763 | 2015-01-10T04:38:33.000Z | 2022-03-31T07:24:57.000Z | import setuptools
import os
# Conditionally include additional modules for docs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
requirements = list()
if on_rtd:
requirements.append('gevent')
requirements.append('tornado')
requirements.append('twisted')
long_description = ('Pika is a pure-Python implementation of the AMQP 0-9-1 '
'protocol that tries to stay fairly independent of the '
'underlying network support library. Pika was developed '
'primarily for use with RabbitMQ, but should also work '
'with other AMQP 0-9-1 brokers.')
setuptools.setup(
name='pika',
version='1.2.0',
description='Pika Python AMQP Client Library',
long_description=open('README.rst').read(),
maintainer='Gavin M. Roy',
maintainer_email='gavinmroy@gmail.com',
url='https://pika.readthedocs.io',
packages=setuptools.find_packages(include=['pika', 'pika.*']),
license='BSD',
install_requires=requirements,
package_data={'': ['LICENSE', 'README.rst']},
extras_require={
'gevent': ['gevent'],
'tornado': ['tornado'],
'twisted': ['twisted'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Communications', 'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking'
],
zip_safe=True)
| 39.915254 | 78 | 0.609766 | import setuptools
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
requirements = list()
if on_rtd:
requirements.append('gevent')
requirements.append('tornado')
requirements.append('twisted')
long_description = ('Pika is a pure-Python implementation of the AMQP 0-9-1 '
'protocol that tries to stay fairly independent of the '
'underlying network support library. Pika was developed '
'primarily for use with RabbitMQ, but should also work '
'with other AMQP 0-9-1 brokers.')
setuptools.setup(
name='pika',
version='1.2.0',
description='Pika Python AMQP Client Library',
long_description=open('README.rst').read(),
maintainer='Gavin M. Roy',
maintainer_email='gavinmroy@gmail.com',
url='https://pika.readthedocs.io',
packages=setuptools.find_packages(include=['pika', 'pika.*']),
license='BSD',
install_requires=requirements,
package_data={'': ['LICENSE', 'README.rst']},
extras_require={
'gevent': ['gevent'],
'tornado': ['tornado'],
'twisted': ['twisted'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Communications', 'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking'
],
zip_safe=True)
| true | true |
f72f5124f0b9c2a565e817ba853281a8aa8ce924 | 3,278 | py | Python | src/dev/amdgpu/AMDGPU.py | zinob15/gem5 | fb2946e314ea9e63c7696ee8023150ed13956582 | [
"BSD-3-Clause"
] | 19 | 2018-07-20T15:08:50.000Z | 2022-03-26T16:15:59.000Z | src/dev/amdgpu/AMDGPU.py | zinob15/gem5 | fb2946e314ea9e63c7696ee8023150ed13956582 | [
"BSD-3-Clause"
] | 148 | 2018-07-20T00:58:36.000Z | 2021-11-16T01:52:33.000Z | src/dev/amdgpu/AMDGPU.py | zinob15/gem5 | fb2946e314ea9e63c7696ee8023150ed13956582 | [
"BSD-3-Clause"
] | 10 | 2019-01-10T03:01:30.000Z | 2022-01-21T18:36:18.000Z | # Copyright (c) 2021 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.objects.PciDevice import PciDevice
from m5.objects.PciDevice import PciMemBar, PciMemUpperBar, PciLegacyIoBar
# PCI device model for an AMD Vega 10 based GPU. The PCI codes and BARs
# correspond to a Vega Frontier Edition hardware device. None of the PCI
# related values in this class should be changed.
#
# This class requires a ROM binary and an MMIO trace to initialize the
# device registers and memory. It is intended only to be used in full-system
# simulation under Linux where the amdgpu driver is modprobed.
class AMDGPUDevice(PciDevice):
type = 'AMDGPUDevice'
cxx_header = "dev/amdgpu/amdgpu_device.hh"
cxx_class = 'gem5::AMDGPUDevice'
# IDs for AMD Vega 10
VendorID = 0x1002
DeviceID = 0x6863
# Command 0x3 never gets sent indicating IO and Mem bars are enabled. Hard
# code the command here and deal unassigned BARs on C++ side.
Command = 0x3
Status = 0x0280
Revision = 0x0
ClassCode = 0x03
SubClassCode = 0x00
ProgIF = 0x00
# Use max possible BAR size for Vega 10. We can override with driver param
BAR0 = PciMemBar(size='16GiB')
BAR1 = PciMemUpperBar()
BAR2 = PciMemBar(size='2MiB')
BAR3 = PciMemUpperBar()
BAR4 = PciLegacyIoBar(addr=0xf000, size='256B')
BAR5 = PciMemBar(size='512KiB')
InterruptLine = 14
InterruptPin = 2
ExpansionROM = 0
rom_binary = Param.String("ROM binary dumped from hardware")
trace_file = Param.String("MMIO trace collected on hardware")
checkpoint_before_mmios = Param.Bool(False, "Take a checkpoint before the"
" device begins sending MMIOs")
| 43.131579 | 79 | 0.744356 |
from m5.params import *
from m5.objects.PciDevice import PciDevice
from m5.objects.PciDevice import PciMemBar, PciMemUpperBar, PciLegacyIoBar
class AMDGPUDevice(PciDevice):
type = 'AMDGPUDevice'
cxx_header = "dev/amdgpu/amdgpu_device.hh"
cxx_class = 'gem5::AMDGPUDevice'
VendorID = 0x1002
DeviceID = 0x6863
Command = 0x3
Status = 0x0280
Revision = 0x0
ClassCode = 0x03
SubClassCode = 0x00
ProgIF = 0x00
BAR0 = PciMemBar(size='16GiB')
BAR1 = PciMemUpperBar()
BAR2 = PciMemBar(size='2MiB')
BAR3 = PciMemUpperBar()
BAR4 = PciLegacyIoBar(addr=0xf000, size='256B')
BAR5 = PciMemBar(size='512KiB')
InterruptLine = 14
InterruptPin = 2
ExpansionROM = 0
rom_binary = Param.String("ROM binary dumped from hardware")
trace_file = Param.String("MMIO trace collected on hardware")
checkpoint_before_mmios = Param.Bool(False, "Take a checkpoint before the"
" device begins sending MMIOs")
| true | true |
f72f51bdd43655bb4de8a16aeea91f20e47a985f | 5,576 | py | Python | qiskit/circuit/library/blueprintcircuit.py | apcarrik/qiskit-terra | 456d036ade0e66f391f8932a944a1dd73d6354dd | [
"Apache-2.0"
] | 1 | 2021-07-06T09:07:47.000Z | 2021-07-06T09:07:47.000Z | qiskit/circuit/library/blueprintcircuit.py | evercodes/qiskit-terra | 649fec2cd1644c43eabc39b0a588c0a9347a2b50 | [
"Apache-2.0"
] | null | null | null | qiskit/circuit/library/blueprintcircuit.py | evercodes/qiskit-terra | 649fec2cd1644c43eabc39b0a588c0a9347a2b50 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Blueprint circuit object."""
from typing import Optional
from abc import ABC, abstractmethod
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.parametertable import ParameterTable, ParameterView
class BlueprintCircuit(QuantumCircuit, ABC):
"""Blueprint circuit object.
In many applications it is necessary to pass around the structure a circuit will have without
explicitly knowing e.g. its number of qubits, or other missing information. This can be solved
by having a circuit that knows how to construct itself, once all information is available.
This class provides an interface for such circuits. Before internal data of the circuit is
accessed, the ``_build`` method is called. There the configuration of the circuit is checked.
"""
def __init__(self, *regs, name: Optional[str] = None) -> None:
"""Create a new blueprint circuit.
The ``_data`` argument storing the internal circuit data is set to ``None`` to indicate
that the circuit has not been built yet.
"""
super().__init__(*regs, name=name)
self._data = None
self._qregs = []
self._cregs = []
self._qubits = []
self._qubit_set = set()
@abstractmethod
def _check_configuration(self, raise_on_failure: bool = True) -> bool:
"""Check if the current configuration allows the circuit to be built.
Args:
raise_on_failure: If True, raise if the configuration is invalid. If False, return
False if the configuration is invalid.
Returns:
True, if the configuration is valid. Otherwise, depending on the value of
``raise_on_failure`` an error is raised or False is returned.
"""
raise NotImplementedError
@abstractmethod
def _build(self) -> None:
"""Build the circuit."""
# do not build the circuit if _data is already populated
if self._data is not None:
return
self._data = []
# check whether the configuration is valid
self._check_configuration()
def _invalidate(self) -> None:
"""Invalidate the current circuit build."""
self._data = None
self._parameter_table = ParameterTable()
self.global_phase = 0
@property
def qregs(self):
"""A list of the quantum registers associated with the circuit."""
return self._qregs
@qregs.setter
def qregs(self, qregs):
"""Set the quantum registers associated with the circuit."""
self._qregs = qregs
self._qubits = [qbit for qreg in qregs for qbit in qreg]
self._qubit_set = set(self._qubits)
self._invalidate()
@property
def data(self):
if self._data is None:
self._build()
return super().data
@property
def num_parameters(self) -> int:
if self._data is None:
self._build()
return super().num_parameters
@property
def parameters(self) -> ParameterView:
if self._data is None:
self._build()
return super().parameters
def qasm(self, formatted=False, filename=None, encoding=None):
if self._data is None:
self._build()
return super().qasm(formatted, filename, encoding)
def append(self, instruction, qargs=None, cargs=None):
if self._data is None:
self._build()
return super().append(instruction, qargs, cargs)
def compose(self, other, qubits=None, clbits=None, front=False, inplace=False):
if self._data is None:
self._build()
return super().compose(other, qubits, clbits, front, inplace)
def inverse(self):
if self._data is None:
self._build()
return super().inverse()
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
def size(self):
if self._data is None:
self._build()
return super().size()
def to_instruction(self, parameter_map=None, label=None):
if self._data is None:
self._build()
return super().to_instruction(parameter_map, label=label)
def to_gate(self, parameter_map=None, label=None):
if self._data is None:
self._build()
return super().to_gate(parameter_map, label=label)
def depth(self):
if self._data is None:
self._build()
return super().depth()
def count_ops(self):
if self._data is None:
self._build()
return super().count_ops()
def num_nonlocal_gates(self):
if self._data is None:
self._build()
return super().num_nonlocal_gates()
def num_connected_components(self, unitary_only=False):
if self._data is None:
self._build()
return super().num_connected_components(unitary_only=unitary_only)
def copy(self, name=None):
if self._data is None:
self._build()
return super().copy(name=name)
| 32.231214 | 98 | 0.638989 |
from typing import Optional
from abc import ABC, abstractmethod
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.parametertable import ParameterTable, ParameterView
class BlueprintCircuit(QuantumCircuit, ABC):
def __init__(self, *regs, name: Optional[str] = None) -> None:
super().__init__(*regs, name=name)
self._data = None
self._qregs = []
self._cregs = []
self._qubits = []
self._qubit_set = set()
@abstractmethod
def _check_configuration(self, raise_on_failure: bool = True) -> bool:
raise NotImplementedError
@abstractmethod
def _build(self) -> None:
if self._data is not None:
return
self._data = []
self._check_configuration()
def _invalidate(self) -> None:
self._data = None
self._parameter_table = ParameterTable()
self.global_phase = 0
@property
def qregs(self):
return self._qregs
@qregs.setter
def qregs(self, qregs):
self._qregs = qregs
self._qubits = [qbit for qreg in qregs for qbit in qreg]
self._qubit_set = set(self._qubits)
self._invalidate()
@property
def data(self):
if self._data is None:
self._build()
return super().data
@property
def num_parameters(self) -> int:
if self._data is None:
self._build()
return super().num_parameters
@property
def parameters(self) -> ParameterView:
if self._data is None:
self._build()
return super().parameters
def qasm(self, formatted=False, filename=None, encoding=None):
if self._data is None:
self._build()
return super().qasm(formatted, filename, encoding)
def append(self, instruction, qargs=None, cargs=None):
if self._data is None:
self._build()
return super().append(instruction, qargs, cargs)
def compose(self, other, qubits=None, clbits=None, front=False, inplace=False):
if self._data is None:
self._build()
return super().compose(other, qubits, clbits, front, inplace)
def inverse(self):
if self._data is None:
self._build()
return super().inverse()
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
def size(self):
if self._data is None:
self._build()
return super().size()
def to_instruction(self, parameter_map=None, label=None):
if self._data is None:
self._build()
return super().to_instruction(parameter_map, label=label)
def to_gate(self, parameter_map=None, label=None):
if self._data is None:
self._build()
return super().to_gate(parameter_map, label=label)
def depth(self):
if self._data is None:
self._build()
return super().depth()
def count_ops(self):
if self._data is None:
self._build()
return super().count_ops()
def num_nonlocal_gates(self):
if self._data is None:
self._build()
return super().num_nonlocal_gates()
def num_connected_components(self, unitary_only=False):
if self._data is None:
self._build()
return super().num_connected_components(unitary_only=unitary_only)
def copy(self, name=None):
if self._data is None:
self._build()
return super().copy(name=name)
| true | true |
f72f536f5857554bc8e9d0890cb64eaae718c843 | 163 | py | Python | practice/3_basic_tensorflow/Keras/Example_simpleLinearRegression.py | rabbitsun2/toy_python | 32f84b4d15b13c4daa4fa212a40e685abc0d2a5d | [
"Apache-2.0"
] | null | null | null | practice/3_basic_tensorflow/Keras/Example_simpleLinearRegression.py | rabbitsun2/toy_python | 32f84b4d15b13c4daa4fa212a40e685abc0d2a5d | [
"Apache-2.0"
] | null | null | null | practice/3_basic_tensorflow/Keras/Example_simpleLinearRegression.py | rabbitsun2/toy_python | 32f84b4d15b13c4daa4fa212a40e685abc0d2a5d | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
x = np.arange(1, 6)
y = 3 * x + 2
print(x)
print(y)
# 시각화
plt.plot(x, y)
plt.title('y = 3x + 2')
plt.show()
| 10.866667 | 31 | 0.613497 | import numpy as np
import matplotlib.pyplot as plt
x = np.arange(1, 6)
y = 3 * x + 2
print(x)
print(y)
plt.plot(x, y)
plt.title('y = 3x + 2')
plt.show()
| true | true |
f72f5401fcba40741973080a187ab1412d11d4bb | 5,623 | py | Python | util/config.py | chunbolang/HPA | 645971c21519937ecbacf15e674e83e452eb1afc | [
"MIT"
] | 3 | 2021-09-01T13:04:09.000Z | 2021-12-19T16:25:01.000Z | util/config.py | chunbolang/HPA | 645971c21519937ecbacf15e674e83e452eb1afc | [
"MIT"
] | null | null | null | util/config.py | chunbolang/HPA | 645971c21519937ecbacf15e674e83e452eb1afc | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------
# Functions for parsing args
# -----------------------------------------------------------------------------
import yaml
import os
from ast import literal_eval
import copy
class CfgNode(dict):
"""
CfgNode represents an internal node in the configuration tree. It's a simple
dict-like container that allows for attribute-based access to keys.
"""
def __init__(self, init_dict=None, key_list=None, new_allowed=False):
# Recursively convert nested dictionaries in init_dict into CfgNodes
init_dict = {} if init_dict is None else init_dict
key_list = [] if key_list is None else key_list
for k, v in init_dict.items():
if type(v) is dict:
# Convert dict to CfgNode
init_dict[k] = CfgNode(v, key_list=key_list + [k])
super(CfgNode, self).__init__(init_dict)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def __str__(self):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
for k, v in sorted(self.items()):
seperator = "\n" if isinstance(v, CfgNode) else " "
attr_str = "{}:{}{}".format(str(k), seperator, str(v))
attr_str = _indent(attr_str, 2)
s.append(attr_str)
r += "\n".join(s)
return r
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__())
def load_cfg_from_cfg_file(file):
cfg = {}
assert os.path.isfile(file) and file.endswith('.yaml'), \
'{} is not a yaml file'.format(file)
with open(file, 'r') as f:
cfg_from_file = yaml.safe_load(f)
for key in cfg_from_file:
for k, v in cfg_from_file[key].items():
cfg[k] = v
cfg = CfgNode(cfg)
return cfg
def merge_cfg_from_args(cfg, args):
args_dict = args.__dict__
for k ,v in args_dict.items():
if not k == 'config' or k == 'opts':
cfg[k] = v
return cfg
def merge_cfg_from_list(cfg, cfg_list):
new_cfg = copy.deepcopy(cfg)
assert len(cfg_list) % 2 == 0
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
subkey = full_key.split('.')[-1]
assert subkey in cfg, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, cfg[subkey], subkey, full_key
)
setattr(new_cfg, subkey, value)
return new_cfg
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# All remaining processing is only applied to strings
if not isinstance(v, str):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
"""Checks that `replacement`, which is intended to replace `original` is of
the right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
original_type = type(original)
replacement_type = type(replacement)
# The types must match (with some exceptions)
if replacement_type == original_type:
return replacement
# Cast replacement from from_type to to_type if the replacement and original
# types match from_type and to_type
def conditional_cast(from_type, to_type):
if replacement_type == from_type and original_type == to_type:
return True, to_type(replacement)
else:
return False, None
# Conditionally casts
# list <-> tuple
casts = [(tuple, list), (list, tuple)]
# For py2: allow converting from str (bytes) to a unicode string
try:
casts.append((str, unicode)) # noqa: F821
except Exception:
pass
for (from_type, to_type) in casts:
converted, converted_value = conditional_cast(from_type, to_type)
if converted:
return converted_value
raise ValueError(
"Type mismatch ({} vs. {}) with values ({} vs. {}) for config "
"key: {}".format(
original_type, replacement_type, original, replacement, full_key
)
)
def _assert_with_logging(cond, msg):
if not cond:
logger.debug(msg)
assert cond, msg
| 32.316092 | 88 | 0.591677 |
import yaml
import os
from ast import literal_eval
import copy
class CfgNode(dict):
def __init__(self, init_dict=None, key_list=None, new_allowed=False):
init_dict = {} if init_dict is None else init_dict
key_list = [] if key_list is None else key_list
for k, v in init_dict.items():
if type(v) is dict:
init_dict[k] = CfgNode(v, key_list=key_list + [k])
super(CfgNode, self).__init__(init_dict)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def __str__(self):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
for k, v in sorted(self.items()):
seperator = "\n" if isinstance(v, CfgNode) else " "
attr_str = "{}:{}{}".format(str(k), seperator, str(v))
attr_str = _indent(attr_str, 2)
s.append(attr_str)
r += "\n".join(s)
return r
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__())
def load_cfg_from_cfg_file(file):
cfg = {}
assert os.path.isfile(file) and file.endswith('.yaml'), \
'{} is not a yaml file'.format(file)
with open(file, 'r') as f:
cfg_from_file = yaml.safe_load(f)
for key in cfg_from_file:
for k, v in cfg_from_file[key].items():
cfg[k] = v
cfg = CfgNode(cfg)
return cfg
def merge_cfg_from_args(cfg, args):
args_dict = args.__dict__
for k ,v in args_dict.items():
if not k == 'config' or k == 'opts':
cfg[k] = v
return cfg
def merge_cfg_from_list(cfg, cfg_list):
new_cfg = copy.deepcopy(cfg)
assert len(cfg_list) % 2 == 0
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
subkey = full_key.split('.')[-1]
assert subkey in cfg, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, cfg[subkey], subkey, full_key
)
setattr(new_cfg, subkey, value)
return new_cfg
def _decode_cfg_value(v):
if not isinstance(v, str):
return v
try:
v = literal_eval(v)
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
original_type = type(original)
replacement_type = type(replacement)
if replacement_type == original_type:
return replacement
def conditional_cast(from_type, to_type):
if replacement_type == from_type and original_type == to_type:
return True, to_type(replacement)
else:
return False, None
casts = [(tuple, list), (list, tuple)]
try:
casts.append((str, unicode))
except Exception:
pass
for (from_type, to_type) in casts:
converted, converted_value = conditional_cast(from_type, to_type)
if converted:
return converted_value
raise ValueError(
"Type mismatch ({} vs. {}) with values ({} vs. {}) for config "
"key: {}".format(
original_type, replacement_type, original, replacement, full_key
)
)
def _assert_with_logging(cond, msg):
if not cond:
logger.debug(msg)
assert cond, msg
| true | true |
f72f5537eacc861c1d47288d0342e45f60ac7392 | 1,053 | py | Python | crypto_balancer/order.py | GRTTX/crypto_balancer | a1c862cd6d01065a059664da605f03d972585418 | [
"MIT"
] | 28 | 2019-05-13T12:25:43.000Z | 2022-03-11T06:57:21.000Z | crypto_balancer/order.py | timff/crypto_balancer | cc5ed85b4f1f7222d6532f8f0ba46dac329bac7b | [
"MIT"
] | 5 | 2019-03-10T15:39:34.000Z | 2021-06-13T05:39:14.000Z | crypto_balancer/order.py | timff/crypto_balancer | cc5ed85b4f1f7222d6532f8f0ba46dac329bac7b | [
"MIT"
] | 17 | 2019-03-10T15:14:20.000Z | 2022-03-11T06:57:06.000Z | class Order():
def __init__(self, pair, direction, amount, price):
if direction.upper() not in ['BUY', 'SELL']:
raise ValueError("{} is not a valid direction".format(direction))
self.pair = pair
self.direction = direction
self.amount = float(amount)
self.price = float(price)
self.type_ = None
def __str__(self):
return f"{self.direction} {self.amount} {self.pair} @ {self.price}"
def __repr__(self):
return f"Order('{self.pair}', '{self.direction}', {self.amount}, {self.price})"
def __eq__(self, other):
return self.pair == other.pair and \
self.direction == other.direction and \
self.amount == other.amount and \
self.price == other.price
def __lt__(self, other):
return (self.pair, self.direction, self.amount, self.price) < \
(other.pair, other.direction, other.amount, other.price)
def __hash__(self):
return hash((self.pair, self.direction, self.amount, self.price))
| 35.1 | 87 | 0.60019 | class Order():
def __init__(self, pair, direction, amount, price):
if direction.upper() not in ['BUY', 'SELL']:
raise ValueError("{} is not a valid direction".format(direction))
self.pair = pair
self.direction = direction
self.amount = float(amount)
self.price = float(price)
self.type_ = None
def __str__(self):
return f"{self.direction} {self.amount} {self.pair} @ {self.price}"
def __repr__(self):
return f"Order('{self.pair}', '{self.direction}', {self.amount}, {self.price})"
def __eq__(self, other):
return self.pair == other.pair and \
self.direction == other.direction and \
self.amount == other.amount and \
self.price == other.price
def __lt__(self, other):
return (self.pair, self.direction, self.amount, self.price) < \
(other.pair, other.direction, other.amount, other.price)
def __hash__(self):
return hash((self.pair, self.direction, self.amount, self.price))
| true | true |
f72f55f3741f5c542c00c1b9f62273c35e1f6038 | 1,864 | py | Python | get_swa_model.py | Mr-GJ/PaddleDetection | 5fc2ce62d9b8f0bcc3d1c436b502d0371dec2a35 | [
"Apache-2.0"
] | null | null | null | get_swa_model.py | Mr-GJ/PaddleDetection | 5fc2ce62d9b8f0bcc3d1c436b502d0371dec2a35 | [
"Apache-2.0"
] | null | null | null | get_swa_model.py | Mr-GJ/PaddleDetection | 5fc2ce62d9b8f0bcc3d1c436b502d0371dec2a35 | [
"Apache-2.0"
] | null | null | null | import os
from argparse import ArgumentParser
import paddle
def main():
parser = ArgumentParser()
parser.add_argument(
'--model_dir', help='the directory where checkpoints are saved',default="output/swa_test")
parser.add_argument(
'--starting_model_id',
default=0,
type=int,
help='the id of the starting checkpoint for averaging, e.g. 1')
parser.add_argument(
'--ending_model_id',
default=11,
type=int,
help='the id of the ending checkpoint for averaging, e.g. 12')
parser.add_argument(
'--save_dir',
default=None,
help='the directory for saving the SWA model')
args = parser.parse_args()
model_dir = args.model_dir
starting_id = int(args.starting_model_id)
ending_id = int(args.ending_model_id)
model_names = list(range(starting_id, ending_id + 1))
model_dirs = [
os.path.join(model_dir, str(i) + '.pdparams')
for i in model_names
]
models = [paddle.load(model_dir) for model_dir in model_dirs]
model_num = len(models)
model_keys = models[-1].keys()
state_dict = models[-1]
new_state_dict = state_dict.copy()
ref_model = models[-1]
for key in model_keys:
sum_weight = 0.0
for m in models:
sum_weight += m[key]
avg_weight = sum_weight / model_num
new_state_dict[key] = avg_weight
ref_model = new_state_dict
save_model_name = 'swa_' + str(args.starting_model_id) + '-' +\
str(args.ending_model_id) + '.pdparams'
if args.save_dir is not None:
save_dir = os.path.join(args.save_dir, save_model_name)
else:
save_dir = os.path.join(model_dir, save_model_name)
paddle.save(ref_model, save_dir)
print('Model is saved at', save_dir)
if __name__ == '__main__':
main() | 31.066667 | 98 | 0.636266 | import os
from argparse import ArgumentParser
import paddle
def main():
parser = ArgumentParser()
parser.add_argument(
'--model_dir', help='the directory where checkpoints are saved',default="output/swa_test")
parser.add_argument(
'--starting_model_id',
default=0,
type=int,
help='the id of the starting checkpoint for averaging, e.g. 1')
parser.add_argument(
'--ending_model_id',
default=11,
type=int,
help='the id of the ending checkpoint for averaging, e.g. 12')
parser.add_argument(
'--save_dir',
default=None,
help='the directory for saving the SWA model')
args = parser.parse_args()
model_dir = args.model_dir
starting_id = int(args.starting_model_id)
ending_id = int(args.ending_model_id)
model_names = list(range(starting_id, ending_id + 1))
model_dirs = [
os.path.join(model_dir, str(i) + '.pdparams')
for i in model_names
]
models = [paddle.load(model_dir) for model_dir in model_dirs]
model_num = len(models)
model_keys = models[-1].keys()
state_dict = models[-1]
new_state_dict = state_dict.copy()
ref_model = models[-1]
for key in model_keys:
sum_weight = 0.0
for m in models:
sum_weight += m[key]
avg_weight = sum_weight / model_num
new_state_dict[key] = avg_weight
ref_model = new_state_dict
save_model_name = 'swa_' + str(args.starting_model_id) + '-' +\
str(args.ending_model_id) + '.pdparams'
if args.save_dir is not None:
save_dir = os.path.join(args.save_dir, save_model_name)
else:
save_dir = os.path.join(model_dir, save_model_name)
paddle.save(ref_model, save_dir)
print('Model is saved at', save_dir)
if __name__ == '__main__':
main() | true | true |
f72f56c346bc5ea495160905b6c7b1cd1bad0d4b | 4,664 | py | Python | pysnmp-with-texts/COSINE-GLOBAL-REG.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/COSINE-GLOBAL-REG.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/COSINE-GLOBAL-REG.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module COSINE-GLOBAL-REG (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/COSINE-GLOBAL-REG
# Produced by pysmi-0.3.4 at Wed May 1 12:27:00 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, IpAddress, ObjectIdentity, Gauge32, Counter32, TimeTicks, Counter64, Unsigned32, enterprises, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, iso, MibIdentifier, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "IpAddress", "ObjectIdentity", "Gauge32", "Counter32", "TimeTicks", "Counter64", "Unsigned32", "enterprises", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "iso", "MibIdentifier", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
csRoot = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085))
if mibBuilder.loadTexts: csRoot.setStatus('current')
if mibBuilder.loadTexts: csRoot.setDescription('The root of the OID sub-tree assigned to CoSine Commmunication by the Internet Assigned Numbers Authority (IANA).')
csReg = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 1))
if mibBuilder.loadTexts: csReg.setStatus('current')
if mibBuilder.loadTexts: csReg.setDescription('Sub-tree for registration.')
csModules = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 1, 1))
if mibBuilder.loadTexts: csModules.setStatus('current')
if mibBuilder.loadTexts: csModules.setDescription('Sub-tree for module registration.')
csGeneric = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 2))
if mibBuilder.loadTexts: csGeneric.setStatus('current')
if mibBuilder.loadTexts: csGeneric.setDescription('Sub-tree for common object and event definitions.')
csProduct = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 3))
if mibBuilder.loadTexts: csProduct.setStatus('current')
if mibBuilder.loadTexts: csProduct.setDescription('Sub-tree for specific object and event definitions.')
csOrionMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 3, 1))
if mibBuilder.loadTexts: csOrionMIB.setStatus('current')
if mibBuilder.loadTexts: csOrionMIB.setDescription('Sub-tree for Orion object and event definitions.')
csInVisionMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 3, 2))
if mibBuilder.loadTexts: csInVisionMIB.setStatus('current')
if mibBuilder.loadTexts: csInVisionMIB.setDescription('Sub-tree for SMS object and event definitions.')
csCaps = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 4))
if mibBuilder.loadTexts: csCaps.setStatus('current')
if mibBuilder.loadTexts: csCaps.setDescription('Sub-tree for agent profiles.')
csReqs = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 5))
if mibBuilder.loadTexts: csReqs.setStatus('current')
if mibBuilder.loadTexts: csReqs.setDescription('Sub-tree for management application requirements.')
csExpr = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 6))
if mibBuilder.loadTexts: csExpr.setStatus('current')
if mibBuilder.loadTexts: csExpr.setDescription('Sub-tree for experimental definitions.')
cosineGlobalRegMod = ModuleIdentity((1, 3, 6, 1, 4, 1, 3085, 1, 1, 1))
cosineGlobalRegMod.setRevisions(('1998-03-24 13:55',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: cosineGlobalRegMod.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: cosineGlobalRegMod.setLastUpdated('9803241355Z')
if mibBuilder.loadTexts: cosineGlobalRegMod.setOrganization('Cosine Communication Co.')
if mibBuilder.loadTexts: cosineGlobalRegMod.setContactInfo(' Lianghwa Jou Cosine Communications Co. 1070 Sixth Avenue Suite 200 Belmont, CA 94002 US 650-637-4777 ljou@cosinecom.com')
if mibBuilder.loadTexts: cosineGlobalRegMod.setDescription('. ')
mibBuilder.exportSymbols("COSINE-GLOBAL-REG", csInVisionMIB=csInVisionMIB, csModules=csModules, csReg=csReg, csCaps=csCaps, csRoot=csRoot, csExpr=csExpr, csGeneric=csGeneric, csProduct=csProduct, csOrionMIB=csOrionMIB, cosineGlobalRegMod=cosineGlobalRegMod, PYSNMP_MODULE_ID=cosineGlobalRegMod, csReqs=csReqs)
| 86.37037 | 505 | 0.78066 |
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, IpAddress, ObjectIdentity, Gauge32, Counter32, TimeTicks, Counter64, Unsigned32, enterprises, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, iso, MibIdentifier, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "IpAddress", "ObjectIdentity", "Gauge32", "Counter32", "TimeTicks", "Counter64", "Unsigned32", "enterprises", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "iso", "MibIdentifier", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
csRoot = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085))
if mibBuilder.loadTexts: csRoot.setStatus('current')
if mibBuilder.loadTexts: csRoot.setDescription('The root of the OID sub-tree assigned to CoSine Commmunication by the Internet Assigned Numbers Authority (IANA).')
csReg = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 1))
if mibBuilder.loadTexts: csReg.setStatus('current')
if mibBuilder.loadTexts: csReg.setDescription('Sub-tree for registration.')
csModules = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 1, 1))
if mibBuilder.loadTexts: csModules.setStatus('current')
if mibBuilder.loadTexts: csModules.setDescription('Sub-tree for module registration.')
csGeneric = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 2))
if mibBuilder.loadTexts: csGeneric.setStatus('current')
if mibBuilder.loadTexts: csGeneric.setDescription('Sub-tree for common object and event definitions.')
csProduct = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 3))
if mibBuilder.loadTexts: csProduct.setStatus('current')
if mibBuilder.loadTexts: csProduct.setDescription('Sub-tree for specific object and event definitions.')
csOrionMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 3, 1))
if mibBuilder.loadTexts: csOrionMIB.setStatus('current')
if mibBuilder.loadTexts: csOrionMIB.setDescription('Sub-tree for Orion object and event definitions.')
csInVisionMIB = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 3, 2))
if mibBuilder.loadTexts: csInVisionMIB.setStatus('current')
if mibBuilder.loadTexts: csInVisionMIB.setDescription('Sub-tree for SMS object and event definitions.')
csCaps = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 4))
if mibBuilder.loadTexts: csCaps.setStatus('current')
if mibBuilder.loadTexts: csCaps.setDescription('Sub-tree for agent profiles.')
csReqs = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 5))
if mibBuilder.loadTexts: csReqs.setStatus('current')
if mibBuilder.loadTexts: csReqs.setDescription('Sub-tree for management application requirements.')
csExpr = ObjectIdentity((1, 3, 6, 1, 4, 1, 3085, 6))
if mibBuilder.loadTexts: csExpr.setStatus('current')
if mibBuilder.loadTexts: csExpr.setDescription('Sub-tree for experimental definitions.')
cosineGlobalRegMod = ModuleIdentity((1, 3, 6, 1, 4, 1, 3085, 1, 1, 1))
cosineGlobalRegMod.setRevisions(('1998-03-24 13:55',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: cosineGlobalRegMod.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: cosineGlobalRegMod.setLastUpdated('9803241355Z')
if mibBuilder.loadTexts: cosineGlobalRegMod.setOrganization('Cosine Communication Co.')
if mibBuilder.loadTexts: cosineGlobalRegMod.setContactInfo(' Lianghwa Jou Cosine Communications Co. 1070 Sixth Avenue Suite 200 Belmont, CA 94002 US 650-637-4777 ljou@cosinecom.com')
if mibBuilder.loadTexts: cosineGlobalRegMod.setDescription('. ')
mibBuilder.exportSymbols("COSINE-GLOBAL-REG", csInVisionMIB=csInVisionMIB, csModules=csModules, csReg=csReg, csCaps=csCaps, csRoot=csRoot, csExpr=csExpr, csGeneric=csGeneric, csProduct=csProduct, csOrionMIB=csOrionMIB, cosineGlobalRegMod=cosineGlobalRegMod, PYSNMP_MODULE_ID=cosineGlobalRegMod, csReqs=csReqs)
| true | true |
f72f580eceb7de94295ce579304c454df2d18703 | 640 | py | Python | printto/management/commands/cleanprinteddocs.py | starofrainnight/vswprinter | b88591c44da545913a5a238bf4b8cd30bb1eeac8 | [
"Apache-2.0"
] | null | null | null | printto/management/commands/cleanprinteddocs.py | starofrainnight/vswprinter | b88591c44da545913a5a238bf4b8cd30bb1eeac8 | [
"Apache-2.0"
] | null | null | null | printto/management/commands/cleanprinteddocs.py | starofrainnight/vswprinter | b88591c44da545913a5a238bf4b8cd30bb1eeac8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import arrow
from django.core.management.base import BaseCommand
from printto.models import UploadedFileModel
class Command(BaseCommand):
help = 'Clean all printed docs after 3 minutes'
def handle(self, *args, **options):
now_time = arrow.now()
now_time = now_time.shift(minutes=-3)
now_time = now_time.datetime
records = UploadedFileModel.objects.filter(datetime__lt=now_time)
for record in records:
try:
os.remove(record.file.path)
except:
pass
if records:
records.delete()
| 24.615385 | 73 | 0.621875 |
import os
import arrow
from django.core.management.base import BaseCommand
from printto.models import UploadedFileModel
class Command(BaseCommand):
help = 'Clean all printed docs after 3 minutes'
def handle(self, *args, **options):
now_time = arrow.now()
now_time = now_time.shift(minutes=-3)
now_time = now_time.datetime
records = UploadedFileModel.objects.filter(datetime__lt=now_time)
for record in records:
try:
os.remove(record.file.path)
except:
pass
if records:
records.delete()
| true | true |
f72f594f8368ca86790fb4ec321791290b22fa4b | 7,735 | py | Python | deepspeed/runtime/zero/linear.py | manuelciosici/DeepSpeed | 3da841853ca07abf3a09e7bd325a576c4e642c11 | [
"MIT"
] | null | null | null | deepspeed/runtime/zero/linear.py | manuelciosici/DeepSpeed | 3da841853ca07abf3a09e7bd325a576c4e642c11 | [
"MIT"
] | null | null | null | deepspeed/runtime/zero/linear.py | manuelciosici/DeepSpeed | 3da841853ca07abf3a09e7bd325a576c4e642c11 | [
"MIT"
] | null | null | null | #Linear Module to use with ZeRO Stage 3 to allow for parameter memory release
#after the module execution during forward
#Instead of saving variables using save_for_backward, we save variable ids
#Allowing us to retrieve the variable without creating pointer to it
#Which allows for underlying tensor to be garbage collected
#When partitioned as needed by the Zero Stage 3 optimizer
#TODO instead of patching Linear module, we could patch the ctx.save_for_backward
#ctx.saved_tensors so that this approach works for all nn modules that are built upon
#torch.nn.function. However the issue is that many modules uses C++ implementations
#which does not have pytorch implementation. Eg torch.addmm which acts as a functional
#when implemented outside of torch.autograd.Function
import math
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn.modules.module import Module
from deepspeed.runtime.utils import noop_decorator
tensor_map = {}
def print_rank_0(message, debug=False, force=False):
if torch.distributed.get_rank() == 0 and (debug or force):
print(message)
try:
autocast_custom_fwd = torch.cuda.amp.custom_fwd
autocast_custom_bwd = torch.cuda.amp.custom_bwd
except (ImportError, AttributeError) as exp:
autocast_custom_fwd = noop_decorator
autocast_custom_bwd = noop_decorator
class LinearFunctionForZeroStage3(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
@autocast_custom_fwd
# bias is an optional argument
def forward(ctx, input, weight, bias=None):
#print("In ZeRO Linear Function")
weight_id = id(weight)
bias_id = id(bias)
#ctx.save_for_backward(input, weight, bias)
ctx.save_for_backward(input, torch.tensor(weight_id), torch.tensor(bias_id))
tensor_map[weight_id] = weight
tensor_map[bias_id] = bias
if input.dim() == 2 and bias is not None:
# fused op is marginally faster
ret = torch.addmm(bias, input, weight.t())
else:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
return ret
# This function has only a single output, so it gets only one gradient
@staticmethod
@autocast_custom_bwd
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
#input, weight, bias = ctx.saved_tensors
input, weight_id, bias_id = ctx.saved_tensors
weight = tensor_map[weight_id.item()]
bias = tensor_map[bias_id.item()]
grad_input = grad_weight = grad_bias = None
#print(f"backward shaped grad_output {grad_output.shape}, input {input.shape}, weight {weight.shape} and bias {bias.shape if bias is not None else None}")
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
#print(f"Computing grad input weight {weight.shape} grad_output {grad_output.shape}")
grad_input = grad_output.matmul(weight)
#print(f"Computed grad input {grad_input.shape}")
if ctx.needs_input_grad[1]:
#print("Computing grad weight")
dim = grad_output.dim()
if dim > 2:
grad_weight = grad_output.reshape(-1,
grad_output.shape[-1]).t().matmul(
input.reshape(-1,
input.shape[-1]))
else:
grad_weight = grad_output.t().matmul(input)
#print(f"Computed grad weight grad_weight {grad_weight.shape}")
if bias is not None and ctx.needs_input_grad[2]:
#print("Computing grad bias")
grad_bias = grad_output.sum(0)
#print("Done computing grad bias")
#print("needs bias")
#print(f"backward shaped grad_input {grad_input.shape}, grad_weight {grad_weight.shape}, grad_bias {grad_bias.shape if grad_bias is not None else None}")
return grad_input, grad_weight, grad_bias
def zero3_linear_wrap(input, weight, bias=None):
if bias is None:
return LinearFunctionForZeroStage3.apply(input, weight)
else:
return LinearFunctionForZeroStage3.apply(input, weight, bias)
class LinearModuleForZeroStage3(Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
The weights are pre-transposed and stored as A^T instead of transposing during each
forward. Memory savings proportional to the parameter size.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \text{in\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in\_features})`. The values are
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{1}{\text{in\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super(LinearModuleForZeroStage3, self).__init__()
print("Building ZeRO module")
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return LinearFunctionForZeroStage3.apply(input, self.weight, self.bias)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features,
self.out_features,
self.bias is not None)
| 41.363636 | 162 | 0.644473 |
import math
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn.modules.module import Module
from deepspeed.runtime.utils import noop_decorator
tensor_map = {}
def print_rank_0(message, debug=False, force=False):
if torch.distributed.get_rank() == 0 and (debug or force):
print(message)
try:
autocast_custom_fwd = torch.cuda.amp.custom_fwd
autocast_custom_bwd = torch.cuda.amp.custom_bwd
except (ImportError, AttributeError) as exp:
autocast_custom_fwd = noop_decorator
autocast_custom_bwd = noop_decorator
class LinearFunctionForZeroStage3(torch.autograd.Function):
@staticmethod
@autocast_custom_fwd
def forward(ctx, input, weight, bias=None):
weight_id = id(weight)
bias_id = id(bias)
ctx.save_for_backward(input, torch.tensor(weight_id), torch.tensor(bias_id))
tensor_map[weight_id] = weight
tensor_map[bias_id] = bias
if input.dim() == 2 and bias is not None:
ret = torch.addmm(bias, input, weight.t())
else:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
return ret
@staticmethod
@autocast_custom_bwd
def backward(ctx, grad_output):
input, weight_id, bias_id = ctx.saved_tensors
weight = tensor_map[weight_id.item()]
bias = tensor_map[bias_id.item()]
grad_input = grad_weight = grad_bias = None
# not an error.
if ctx.needs_input_grad[0]:
#print(f"Computing grad input weight {weight.shape} grad_output {grad_output.shape}")
grad_input = grad_output.matmul(weight)
#print(f"Computed grad input {grad_input.shape}")
if ctx.needs_input_grad[1]:
#print("Computing grad weight")
dim = grad_output.dim()
if dim > 2:
grad_weight = grad_output.reshape(-1,
grad_output.shape[-1]).t().matmul(
input.reshape(-1,
input.shape[-1]))
else:
grad_weight = grad_output.t().matmul(input)
#print(f"Computed grad weight grad_weight {grad_weight.shape}")
if bias is not None and ctx.needs_input_grad[2]:
#print("Computing grad bias")
grad_bias = grad_output.sum(0)
#print("Done computing grad bias")
#print("needs bias")
#print(f"backward shaped grad_input {grad_input.shape}, grad_weight {grad_weight.shape}, grad_bias {grad_bias.shape if grad_bias is not None else None}")
return grad_input, grad_weight, grad_bias
def zero3_linear_wrap(input, weight, bias=None):
if bias is None:
return LinearFunctionForZeroStage3.apply(input, weight)
else:
return LinearFunctionForZeroStage3.apply(input, weight, bias)
class LinearModuleForZeroStage3(Module):
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super(LinearModuleForZeroStage3, self).__init__()
print("Building ZeRO module")
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return LinearFunctionForZeroStage3.apply(input, self.weight, self.bias)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features,
self.out_features,
self.bias is not None)
| true | true |
f72f59c3cfb5739da698e3f5202cc2b3d8fead0a | 3,006 | py | Python | utilities/modify_torsions.py | slochower/host-guest-benchmarks | c398b499fe6dbae39523278946c0e25eb78d6d66 | [
"MIT"
] | null | null | null | utilities/modify_torsions.py | slochower/host-guest-benchmarks | c398b499fe6dbae39523278946c0e25eb78d6d66 | [
"MIT"
] | 8 | 2019-07-05T17:55:27.000Z | 2022-03-21T18:59:50.000Z | utilities/modify_torsions.py | slochower/host-guest-benchmarks | c398b499fe6dbae39523278946c0e25eb78d6d66 | [
"MIT"
] | 1 | 2020-05-05T22:51:21.000Z | 2020-05-05T22:51:21.000Z | from openforcefield.typing.engines import smirnoff
from simtk import unit
force_field = smirnoff.ForceField('smirnoff99Frosst_experimental.offxml')
# t9
# [#1:1]-[#6X4:2]-[#6X4:3]-[#8X2:4]
# H1-C1-C2-O2
# GAFF v2.1
# k = 0.16 per = 3
# SMIRNOFF99Frosst
# k = 0.25 per = 1
# k = 0.00 per = 3
#
# < Proper
# smirks = "[#1:1]-[#6X4:2]-[#6X4:3]-[#8X2:4]"
# id = "t9"
# idivf1 = "1"
# k1 = "0.000"
# periodicity1 = "3"
# phase1 = "0.0"
# phase2 = "0.0"
# k2 = "0.250"
# periodicity2 = "1"
# idivf2 = "1" / >
parameter = force_field.get_parameter_handler('ProperTorsions').parameters["[#1:1]-[#6X4:2]-[#6X4:3]-[#8X2:4]"]
assert(parameter.k[0] == 0.0 * unit.kilocalorie_per_mole)
assert(parameter.k[1] == 0.250 * unit.kilocalorie_per_mole)
assert(parameter.periodicity[0] == 3)
assert(parameter.periodicity[1] == 1)
parameter.k = [0.16 * unit.kilocalorie_per_mole]
parameter.periodicity = [3]
parameter.phase = [0 * unit.degrees]
parameter.idivf = [1]
# t87
# [#6X4:1]-[#6X4:2]-[#8X2H0:3]-[#6X4:4]
# C1-O1-C4-C3
# GAFF v2.1
# k = 0.00 per = 1
# k = 0.16 per = 2
# k = 0.24 per = 3
# SMIRNOFF99Frosst
# k = 0.10 per = 2
# k = 0.38 per = 3
# < Proper
# smirks = "[#6X4:1]-[#6X4:2]-[#8X2H0:3]-[#6X4:4]"
# id = "t87"
# idivf1 = "1"
# k1 = "0.383"
# periodicity1 = "3"
# phase1 = "0.0"
# phase2 = "180.0"
# k2 = "0.100"
# periodicity2 = "2"
# idivf2 = "1" / >
parameter = force_field.get_parameter_handler('ProperTorsions').parameters["[#6X4:1]-[#6X4:2]-[#8X2H0:3]-[#6X4:4]"]
assert(parameter.k[0] == 0.383 * unit.kilocalorie_per_mole)
assert(parameter.k[1] == 0.100 * unit.kilocalorie_per_mole)
assert(parameter.periodicity[0] == 3)
assert(parameter.periodicity[1] == 2)
parameter.k = [0.16 * unit.kilocalorie_per_mole, 0.24 * unit.kilocalorie_per_mole]
parameter.periodicity = [2, 3]
parameter.phase = [0 * unit.degrees, 0 * unit.degrees]
parameter.idivf = [1, 1]
# t5
# O1-C4-C3-O3
# [#8X2:1]-[#6X4:2]-[#6X4:3]-[#8X2:4]
# GAFF v2.1
# k = 0.02 per = 1
# k = 0.00 per = 2
# k = 1.01 per = 3
# SMIRNOFF99Frosst
# k = 1.18 per = 2
# k = 0.14 per = 3
# < Proper
# smirks = "[#8X2:1]-[#6X4:2]-[#6X4:3]-[#8X2:4]"
# id = "t5"
# idivf1 = "1"
# k1 = "0.144"
# periodicity1 = "3"
# phase1 = "0.0"
# phase2 = "0.0"
# k2 = "1.175"
# periodicity2 = "2"
# idivf2 = "1" / >
parameter = force_field.get_parameter_handler('ProperTorsions').parameters["[#8X2:1]-[#6X4:2]-[#6X4:3]-[#8X2:4]"]
assert(parameter.k[0] == 0.144 * unit.kilocalorie_per_mole)
assert(parameter.k[1] == 1.175 * unit.kilocalorie_per_mole)
assert(parameter.periodicity[0] == 3)
assert(parameter.periodicity[1] == 2)
parameter.k = [0.02 * unit.kilocalorie_per_mole, 1.01 * unit.kilocalorie_per_mole]
parameter.periodicity = [1, 3]
parameter.phase = [0 * unit.degrees, 0 * unit.degrees]
parameter.idivf = [1, 1]
force_field.to_file("smirnoff99Frosst-experimental-t5-t9-t87-modified.offxml")
force_field = smirnoff.ForceField("smirnoff99Frosst-experimental-t5-t9-t87-modified.offxml")
| 27.081081 | 115 | 0.627412 | from openforcefield.typing.engines import smirnoff
from simtk import unit
force_field = smirnoff.ForceField('smirnoff99Frosst_experimental.offxml')
ns').parameters["[#1:1]-[#6X4:2]-[#6X4:3]-[#8X2:4]"]
assert(parameter.k[0] == 0.0 * unit.kilocalorie_per_mole)
assert(parameter.k[1] == 0.250 * unit.kilocalorie_per_mole)
assert(parameter.periodicity[0] == 3)
assert(parameter.periodicity[1] == 1)
parameter.k = [0.16 * unit.kilocalorie_per_mole]
parameter.periodicity = [3]
parameter.phase = [0 * unit.degrees]
parameter.idivf = [1]
rameters["[#6X4:1]-[#6X4:2]-[#8X2H0:3]-[#6X4:4]"]
assert(parameter.k[0] == 0.383 * unit.kilocalorie_per_mole)
assert(parameter.k[1] == 0.100 * unit.kilocalorie_per_mole)
assert(parameter.periodicity[0] == 3)
assert(parameter.periodicity[1] == 2)
parameter.k = [0.16 * unit.kilocalorie_per_mole, 0.24 * unit.kilocalorie_per_mole]
parameter.periodicity = [2, 3]
parameter.phase = [0 * unit.degrees, 0 * unit.degrees]
parameter.idivf = [1, 1]
').parameters["[#8X2:1]-[#6X4:2]-[#6X4:3]-[#8X2:4]"]
assert(parameter.k[0] == 0.144 * unit.kilocalorie_per_mole)
assert(parameter.k[1] == 1.175 * unit.kilocalorie_per_mole)
assert(parameter.periodicity[0] == 3)
assert(parameter.periodicity[1] == 2)
parameter.k = [0.02 * unit.kilocalorie_per_mole, 1.01 * unit.kilocalorie_per_mole]
parameter.periodicity = [1, 3]
parameter.phase = [0 * unit.degrees, 0 * unit.degrees]
parameter.idivf = [1, 1]
force_field.to_file("smirnoff99Frosst-experimental-t5-t9-t87-modified.offxml")
force_field = smirnoff.ForceField("smirnoff99Frosst-experimental-t5-t9-t87-modified.offxml")
| true | true |
f72f59f6d7bf39d04bcaac65bd6d4197dac899ac | 2,250 | py | Python | sourcecode/VaspTestKPOINT.py | yizhiwoniu/VaspCZ | 44b890cf18d649c428c21e3f8fadc3222453d84d | [
"MIT"
] | 1 | 2021-01-10T04:01:57.000Z | 2021-01-10T04:01:57.000Z | sourcecode/VaspTestKPOINT.py | yizhiwoniu/VaspCZ | 44b890cf18d649c428c21e3f8fadc3222453d84d | [
"MIT"
] | null | null | null | sourcecode/VaspTestKPOINT.py | yizhiwoniu/VaspCZ | 44b890cf18d649c428c21e3f8fadc3222453d84d | [
"MIT"
] | null | null | null | #!/home/zhangzhengde/bin/bin/python3
#coding=utf-8
import os
import argparse
import VaspCZ.zzdlib as zzd
def modify_vasp_sh(jobname, nodes, ppn):
with open('./Vasp.sh', 'r') as f:
data = f.readlines()
new_data = []
for line in data:
if ' #PBS -N' in line:
new_data.append(f' #PBS -N {jobname}\n')
elif ' #PBS -l nodes' in line:
new_data.append(f' #PBS -l nodes={nodes}:ppn={ppn}\n')
else:
new_data.append(line)
with open('./Vasp.sh', 'w') as f:
f.writelines(new_data)
def run(jobname, nodes, ppn, K):
input_files = 'INCAR,POSCAR,POTCAR,KPOINTS'.split(',')
for i in input_files:
if i not in os.listdir():
raise NameError(f'ENCUT Test: input file "{i}" missing in current dir.')
if os.path.isdir(K): # 有目录什么也不做
print(f'k_mesh:{K} already exists, do nothing.')
pass
else:
os.system('mkdir '+K) # 创建目录
for file in input_files:
if os.path.isfile(file):
os.system(f'cp {file} {K}')# 拷贝输入文件
os.chdir(K) # 进入创建的目录
vasp_sh_path = zzd.File.Vaspsh_path()
os.system(f'cp {vasp_sh_path}/Vasp.sh .')
# 无需修改INCAT
# 无需修改POTCAR
# 无需修改POSCAR
# 修改KPOINTS
with open('./KPOINTS', 'r') as f:
data = f.readlines()
data[3] = f'{K[0]} {K[1]} {K[2]}\n'
with open('./KPOINTS', 'w') as f:
f.writelines(data)
# 修改Vasp.sh,指定任务和任务名,修改,提交任务
modify_vasp_sh(f'{jobname}_{K}', nodes, ppn)
# 测试代码,打印
## os.system('cat KPOINTS')
## os.system('cat Vasp.sh')
# os.system('qsub Vasp.sh') # 提交任务
zzd.Vasp.check_and_qsub()
os.chdir('..')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-jb', '--jobname_prefix', default='k_test', type=str)
parser.add_argument('-nd', '--nodes', default='1', type=str)
parser.add_argument('-np', '--ppn', default='8', type=str)
parser.add_argument('-k', '--k_mesh', default='111,333,555,777,999', type=str)
args = parser.parse_args()
jobname = args.jobname_prefix
nodes = args.nodes
k_mesh = args.k_mesh.split(',')
ppn = args.ppn
print(f'running k_point test \n parameter: \njobname_prefix:{jobname} nodes:{nodes} ppn:{ppn} \nk_mesh:{k_mesh}')
inp = input('confirm run ([y]es/no): ')
if inp in ['', 'y', 'yes', 'Y', 'Yes', 'YES']:
for K in k_mesh:
run(jobname, nodes, ppn, K)
else:
print('Did not run.') | 29.605263 | 114 | 0.643556 |
import os
import argparse
import VaspCZ.zzdlib as zzd
def modify_vasp_sh(jobname, nodes, ppn):
with open('./Vasp.sh', 'r') as f:
data = f.readlines()
new_data = []
for line in data:
if ' #PBS -N' in line:
new_data.append(f' #PBS -N {jobname}\n')
elif ' #PBS -l nodes' in line:
new_data.append(f' #PBS -l nodes={nodes}:ppn={ppn}\n')
else:
new_data.append(line)
with open('./Vasp.sh', 'w') as f:
f.writelines(new_data)
def run(jobname, nodes, ppn, K):
input_files = 'INCAR,POSCAR,POTCAR,KPOINTS'.split(',')
for i in input_files:
if i not in os.listdir():
raise NameError(f'ENCUT Test: input file "{i}" missing in current dir.')
if os.path.isdir(K):
print(f'k_mesh:{K} already exists, do nothing.')
pass
else:
os.system('mkdir '+K)
for file in input_files:
if os.path.isfile(file):
os.system(f'cp {file} {K}')
os.chdir(K)
vasp_sh_path = zzd.File.Vaspsh_path()
os.system(f'cp {vasp_sh_path}/Vasp.sh .')
with open('./KPOINTS', 'r') as f:
data = f.readlines()
data[3] = f'{K[0]} {K[1]} {K[2]}\n'
with open('./KPOINTS', 'w') as f:
f.writelines(data)
modify_vasp_sh(f'{jobname}_{K}', nodes, ppn)
_name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-jb', '--jobname_prefix', default='k_test', type=str)
parser.add_argument('-nd', '--nodes', default='1', type=str)
parser.add_argument('-np', '--ppn', default='8', type=str)
parser.add_argument('-k', '--k_mesh', default='111,333,555,777,999', type=str)
args = parser.parse_args()
jobname = args.jobname_prefix
nodes = args.nodes
k_mesh = args.k_mesh.split(',')
ppn = args.ppn
print(f'running k_point test \n parameter: \njobname_prefix:{jobname} nodes:{nodes} ppn:{ppn} \nk_mesh:{k_mesh}')
inp = input('confirm run ([y]es/no): ')
if inp in ['', 'y', 'yes', 'Y', 'Yes', 'YES']:
for K in k_mesh:
run(jobname, nodes, ppn, K)
else:
print('Did not run.') | true | true |
f72f5a8790c8b9fe823eb02d854fed6e481d1477 | 4,519 | py | Python | sciencebeam_gym/convert/grobid/grobid_xml_enhancer.py | elifesciences/sciencebeam-gym | 3ad654e08775e0c0cdd256753e14093bb5a42d44 | [
"MIT"
] | 25 | 2017-07-25T12:44:55.000Z | 2020-09-30T22:16:50.000Z | sciencebeam_gym/convert/grobid/grobid_xml_enhancer.py | elifesciences/sciencebeam-gym | 3ad654e08775e0c0cdd256753e14093bb5a42d44 | [
"MIT"
] | 192 | 2017-11-29T08:57:03.000Z | 2022-03-29T18:44:41.000Z | sciencebeam_gym/convert/grobid/grobid_xml_enhancer.py | elifesciences/sciencebeam-gym | 3ad654e08775e0c0cdd256753e14093bb5a42d44 | [
"MIT"
] | 6 | 2019-02-01T18:49:33.000Z | 2020-07-26T08:18:46.000Z | import logging
from io import BytesIO
from lxml import etree
from lxml.builder import E
from sciencebeam_gym.inference_model.extract_to_xml import (
XmlPaths,
create_node_recursive,
rsplit_xml_path
)
from .grobid_service import (
grobid_service,
GrobidApiPaths
)
TEI_NS = 'http://www.tei-c.org/ns/1.0'
TEI_NS_PREFIX = '{%s}' % TEI_NS
TEI_PERS_NAME = TEI_NS_PREFIX + 'persName'
TEI_FORNAME = TEI_NS_PREFIX + 'forename'
TEI_SURNAME = TEI_NS_PREFIX + 'surname'
JATS_SURNAME = 'surname'
JATS_GIVEN_NAMES = 'given-names'
JATS_ADDR_LINE = 'addr-line'
JATS_NAMED_CONTENT = 'named-content'
JATS_INSTITUTION = 'institution'
def get_logger():
return logging.getLogger(__name__)
def create_or_append(xml_root, path):
parent_path, tag_name = rsplit_xml_path(path)
parent_node = create_node_recursive(xml_root, parent_path, exists_ok=True)
node = E(tag_name) # pylint: disable=not-callable
parent_node.append(node)
return node
class GrobidXmlEnhancer(object):
def __init__(self, grobid_url, start_service):
self.process_header_names = grobid_service(
grobid_url,
GrobidApiPaths.PROCESS_HEADER_NAMES,
start_service=start_service,
field_name='names'
)
self.process_affiliations = grobid_service(
grobid_url,
GrobidApiPaths.PROCESS_AFFILIATIONS,
start_service=start_service,
field_name='affiliations'
)
def process_and_replace_authors(self, xml_root):
author_nodes = list(xml_root.findall(XmlPaths.AUTHOR))
if author_nodes:
authors = '\n'.join(x.text for x in author_nodes)
get_logger().debug('authors: %s', authors)
grobid_response = self.process_header_names(authors)
get_logger().debug('grobid_response: %s', grobid_response)
response_xml_root = etree.fromstring('<dummy>%s</dummy>' % grobid_response)
for author in author_nodes:
author.getparent().remove(author)
for pers_name in response_xml_root.findall(TEI_PERS_NAME):
get_logger().debug('pers_name: %s', pers_name)
node = create_or_append(xml_root, XmlPaths.AUTHOR)
for surname in pers_name.findall(TEI_SURNAME):
node.append(E(JATS_SURNAME, surname.text)) # pylint: disable=not-callable
forenames = [x.text for x in pers_name.findall(TEI_FORNAME)]
if forenames:
node.append(
E(JATS_GIVEN_NAMES, ' '.join(forenames)) # pylint: disable=not-callable
)
return xml_root
def process_and_replace_affiliations(self, xml_root):
aff_nodes = list(xml_root.findall(XmlPaths.AUTHOR_AFF))
if aff_nodes:
affiliations = '\n'.join(x.text for x in aff_nodes)
get_logger().debug('affiliations: %s', affiliations)
grobid_response = self.process_affiliations(affiliations)
get_logger().debug('grobid_response: %s', grobid_response)
response_xml_root = etree.fromstring('<dummy>%s</dummy>' % grobid_response)
for aff in aff_nodes:
aff.getparent().remove(aff)
for affiliation in response_xml_root.findall('affiliation'):
get_logger().debug('affiliation: %s', affiliation)
node = create_or_append(xml_root, XmlPaths.AUTHOR_AFF)
for department in affiliation.xpath('./orgName[@type="department"]'):
node.append(E( # pylint: disable=not-callable
JATS_ADDR_LINE,
E( # pylint: disable=not-callable
JATS_NAMED_CONTENT,
department.text,
{
'content-type': 'department'
}
)
))
for institution in affiliation.xpath('./orgName[@type="institution"]'):
node.append(E( # pylint: disable=not-callable
JATS_INSTITUTION,
institution.text
))
def __call__(self, extracted_xml):
xml_root = etree.parse(BytesIO(extracted_xml))
self.process_and_replace_authors(xml_root)
self.process_and_replace_affiliations(xml_root)
return etree.tostring(xml_root, pretty_print=True)
| 39.295652 | 96 | 0.614516 | import logging
from io import BytesIO
from lxml import etree
from lxml.builder import E
from sciencebeam_gym.inference_model.extract_to_xml import (
XmlPaths,
create_node_recursive,
rsplit_xml_path
)
from .grobid_service import (
grobid_service,
GrobidApiPaths
)
TEI_NS = 'http://www.tei-c.org/ns/1.0'
TEI_NS_PREFIX = '{%s}' % TEI_NS
TEI_PERS_NAME = TEI_NS_PREFIX + 'persName'
TEI_FORNAME = TEI_NS_PREFIX + 'forename'
TEI_SURNAME = TEI_NS_PREFIX + 'surname'
JATS_SURNAME = 'surname'
JATS_GIVEN_NAMES = 'given-names'
JATS_ADDR_LINE = 'addr-line'
JATS_NAMED_CONTENT = 'named-content'
JATS_INSTITUTION = 'institution'
def get_logger():
return logging.getLogger(__name__)
def create_or_append(xml_root, path):
parent_path, tag_name = rsplit_xml_path(path)
parent_node = create_node_recursive(xml_root, parent_path, exists_ok=True)
node = E(tag_name)
parent_node.append(node)
return node
class GrobidXmlEnhancer(object):
def __init__(self, grobid_url, start_service):
self.process_header_names = grobid_service(
grobid_url,
GrobidApiPaths.PROCESS_HEADER_NAMES,
start_service=start_service,
field_name='names'
)
self.process_affiliations = grobid_service(
grobid_url,
GrobidApiPaths.PROCESS_AFFILIATIONS,
start_service=start_service,
field_name='affiliations'
)
def process_and_replace_authors(self, xml_root):
author_nodes = list(xml_root.findall(XmlPaths.AUTHOR))
if author_nodes:
authors = '\n'.join(x.text for x in author_nodes)
get_logger().debug('authors: %s', authors)
grobid_response = self.process_header_names(authors)
get_logger().debug('grobid_response: %s', grobid_response)
response_xml_root = etree.fromstring('<dummy>%s</dummy>' % grobid_response)
for author in author_nodes:
author.getparent().remove(author)
for pers_name in response_xml_root.findall(TEI_PERS_NAME):
get_logger().debug('pers_name: %s', pers_name)
node = create_or_append(xml_root, XmlPaths.AUTHOR)
for surname in pers_name.findall(TEI_SURNAME):
node.append(E(JATS_SURNAME, surname.text))
forenames = [x.text for x in pers_name.findall(TEI_FORNAME)]
if forenames:
node.append(
E(JATS_GIVEN_NAMES, ' '.join(forenames))
)
return xml_root
def process_and_replace_affiliations(self, xml_root):
aff_nodes = list(xml_root.findall(XmlPaths.AUTHOR_AFF))
if aff_nodes:
affiliations = '\n'.join(x.text for x in aff_nodes)
get_logger().debug('affiliations: %s', affiliations)
grobid_response = self.process_affiliations(affiliations)
get_logger().debug('grobid_response: %s', grobid_response)
response_xml_root = etree.fromstring('<dummy>%s</dummy>' % grobid_response)
for aff in aff_nodes:
aff.getparent().remove(aff)
for affiliation in response_xml_root.findall('affiliation'):
get_logger().debug('affiliation: %s', affiliation)
node = create_or_append(xml_root, XmlPaths.AUTHOR_AFF)
for department in affiliation.xpath('./orgName[@type="department"]'):
node.append(E(
JATS_ADDR_LINE,
E(
JATS_NAMED_CONTENT,
department.text,
{
'content-type': 'department'
}
)
))
for institution in affiliation.xpath('./orgName[@type="institution"]'):
node.append(E(
JATS_INSTITUTION,
institution.text
))
def __call__(self, extracted_xml):
xml_root = etree.parse(BytesIO(extracted_xml))
self.process_and_replace_authors(xml_root)
self.process_and_replace_affiliations(xml_root)
return etree.tostring(xml_root, pretty_print=True)
| true | true |
f72f5ada789609c9da8c75cf0cc91f4f7569b058 | 19,098 | py | Python | python/test/function/refs.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 2,792 | 2017-06-26T13:05:44.000Z | 2022-03-28T07:55:26.000Z | python/test/function/refs.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 138 | 2017-06-27T07:04:44.000Z | 2022-02-28T01:37:15.000Z | python/test/function/refs.py | daniel-falk/nnabla | 3fe132ea52dc10521cc029a5d6ba8f565cf65ccf | [
"Apache-2.0"
] | 380 | 2017-06-26T13:23:52.000Z | 2022-03-25T16:51:30.000Z | # Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from six.moves import range
import itertools
import numpy as np
def get_conv_out_size(w, k, p, s, d=1):
return (w + 2 * p - (d * (k - 1) + 1)) // s + 1
def get_deconv_out_size(w, k, p, s, d):
return s * (w - 1) - 2 * p + (d * (k - 1) + 1)
def get_pool_out_size(w, k, p, s, ignore_border):
return (w + p - ((k - p) if ignore_border else 1)) // s + 1
class ChannelLastToFirstTranspose(object):
def __init__(self, dim, kdim):
base_axis = dim - kdim - 1
up_to_base = tuple(range(0, base_axis))
self.axes = up_to_base + (dim - 1,) + tuple(range(base_axis, dim - 1))
self.inv_axes = up_to_base + \
tuple(range(base_axis + 1, dim)) + (base_axis,)
def __call__(self, x):
return x.transpose(self.axes).copy()
def inv(self, x):
return x.transpose(self.inv_axes).copy()
def convolution_1d(x, w, b, pad, stride, dilation, group, dtype=np.float32):
"""
"""
C, H = x.shape
K, Cg, M = w.shape
Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])
x_pad = np.zeros((C, H + pad[0] * 2), dtype=dtype)
x_pad[:, pad[0]:pad[0] + H] = x
y = np.zeros((K, Ho), dtype=dtype)
for k in range(K):
g = int(k // (K // group))
for ho in range(Ho):
hi = ho * stride[0] + np.arange(0, M) * dilation[0]
ci = np.arange(g * Cg, (g + 1) * Cg)
y[k, ho] = (w[k] * x_pad[np.ix_(ci, hi)]).sum()
if b is not None:
y += b[..., np.newaxis]
return y
def convolution_2d(x, w, b, pad, stride, dilation, group, dtype=np.float32):
"""
"""
C, H, W = x.shape
K, Cg, M, N = w.shape
Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])
Wo = get_conv_out_size(W, N, pad[1], stride[1], dilation[1])
x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)
x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x
y = np.zeros((K, Ho, Wo), dtype=dtype)
for k in range(K):
g = int(k // (K // group))
for ho in range(Ho):
for wo in range(Wo):
hi = ho * stride[0] + np.arange(0, M) * dilation[0]
wi = wo * stride[1] + np.arange(0, N) * dilation[1]
ci = np.arange(g * Cg, (g + 1) * Cg)
y[k, ho, wo] = (w[k] * x_pad[np.ix_(ci, hi, wi)]).sum()
if b is not None:
y += b[..., np.newaxis, np.newaxis]
return y
def convolution_nd(x, w, b, pad, stride, dilation, group, dtype=np.float32):
"""
"""
C = x.shape[0]
inshape = x.shape[1:]
ndim = len(inshape)
assert w.ndim == ndim + 2
K, Cg = w.shape[:2]
kshape = w.shape[2:]
def get_conv_out_size_recursive(d, ndim):
if d == ndim:
return []
s = get_conv_out_size(
inshape[d], kshape[d], pad[d], stride[d], dilation[d])
return [s] + get_conv_out_size_recursive(d + 1, ndim)
outshape = get_conv_out_size_recursive(0, ndim)
inshape_pad = [C] + [inshape[d] + 2 * pad[d] for d in range(ndim)]
x_pad = np.zeros(inshape_pad, dtype=dtype)
x_pad[[slice(None,)] + [slice(pad[d], pad[d] + inshape[d])
for d in range(ndim)]] = x
y = np.zeros([K] + outshape, dtype=dtype)
for k in range(K):
g = int(k // (K // group))
for outindex in itertools.product(*map(range, outshape)):
inindex = [outindex[d] * stride[d] +
np.arange(0, kshape[d]) * dilation[d] for d in range(ndim)]
ci = np.arange(g * Cg, (g + 1) * Cg)
y[(k,) + tuple(outindex)] = (w[k] *
x_pad[np.ix_(ci, *inindex)]).sum()
if b is not None:
y += b[[Ellipsis] + [np.newaxis for d in range(ndim)]]
return y
def deconvolution_1d(x, w, b, pad, stride, dilation, group, dtype=np.float32,
output_padding=(0,)):
y = x
K, Ho = y.shape
K, Cg, M = w.shape
C = Cg * group
H = (get_deconv_out_size(Ho, M, pad[0], stride[0], dilation[0])
+ output_padding[0])
x_pad = np.zeros((C, H + pad[0] * 2), dtype=dtype)
for k in range(K):
g = int(k // (K // group))
for ho in range(Ho):
hi = ho * stride[0] + np.arange(0, M) * dilation[0]
ci = np.arange(g * Cg, (g + 1) * Cg)
x_pad[np.ix_(ci, hi)] += w[k] * y[k, ho]
x = x_pad[:, pad[0]:pad[0] + H]
if b is not None:
x += b[..., np.newaxis]
return x
def deconvolution_2d(x, w, b, pad, stride, dilation, group, dtype=np.float32,
output_padding=(0, 0)):
y = x
K, Ho, Wo = y.shape
K, Cg, M, N = w.shape
C = Cg * group
H = (get_deconv_out_size(Ho, M, pad[0], stride[0], dilation[0])
+ output_padding[0])
W = (get_deconv_out_size(Wo, N, pad[1], stride[1], dilation[1])
+ output_padding[1])
x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)
for k in range(K):
g = int(k // (K // group))
for ho in range(Ho):
for wo in range(Wo):
hi = ho * stride[0] + np.arange(0, M) * dilation[0]
wi = wo * stride[1] + np.arange(0, N) * dilation[1]
ci = np.arange(g * Cg, (g + 1) * Cg)
x_pad[np.ix_(ci, hi, wi)] += w[k] * y[k, ho, wo]
x = x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W]
if b is not None:
x += b[..., np.newaxis, np.newaxis]
return x
def deformable_convolution_2d(x, w, offset, mask, b, pad, stride,
dilation, group, deformable_group,
channel_last, dtype=np.float32):
"""
Deformable convlution 2D for a single batch data
"""
C, H, W = x.shape # without batch dimension
K, Cg, M, N = w.shape
assert C == Cg * \
group, "Wrong shape, x: {}, w: {}".format(x.shape, w.shape)
assert offset.shape[0] == 2 * deformable_group * M * N, \
"Wrong shape offset: {}, 2 * deformable_group * Kw * Kh: {}".format(
offset.shape, 2 * deformable_group * M * N)
assert offset.shape[1:] == (
H, W), "Wrong shape, offset: {}, w: {}".format(offset.shape, w.shape)
assert mask.shape[0] == deformable_group * M * N, \
"Wrong shape mask: {}, deformable_group * Kw * Kh: {}".format(
mask.shape, deformable_group * M * N)
assert mask.shape[1:] == (
H, W), "Wrong shape, mask: {}, w: {}".format(mask.shape, w.shape)
assert pad[0] < (w.shape[2] + 1)//2 and pad[1] < (w.shape[3] +
1)//2, "Wrong shape, kernel: {}, pad: {}".format(w.shape[2:], pad)
# Zero padding
x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)
x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x
# Create and initialize output variable
Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])
Wo = get_conv_out_size(W, N, pad[1], stride[1], dilation[1])
y = np.zeros((K, Ho, Wo), dtype=dtype)
_, Hp, Wp = x_pad.shape
# Deformable Convolution
for k in range(K):
for c in range(C//group):
g = k // (K//group)
ci = Cg * g + c
dg = ci // (C // deformable_group)
for ho in range(Ho):
for wo in range(Wo):
# Get the input coordinates {(hi, wi)} which are
# mapped to the output coordinate (ho, wo) by the kernel.
hi = ho * stride[0] + np.arange(0, M) * dilation[0]
wi = wo * stride[1] + np.arange(0, N) * dilation[1]
# Apply the kernel
modulated_x = np.zeros((M, N), dtype=dtype)
for m in range(M):
for n in range(N):
# Shift (hi, wi) to (ph, pw) by using offset
ph = hi[m] + offset[2*((dg*M*N) + (m * N) + n),
ho * stride[0], wo * stride[1]]
pw = wi[n] + offset[2*((dg*M*N) + (m * N) + n) + 1,
ho * stride[0], wo * stride[1]]
# Bilinear interpolation
h_low = int(np.floor(ph))
w_low = int(np.floor(pw))
h_high = h_low + 1
w_high = w_low + 1
if h_low >= Hp or w_low >= Wp or \
h_high < 0 or w_high < 0:
# Out of bounds.
# Interpolation cannot be perform.
val = 0
else:
v1 = 0 # (h_low, w_low)
v2 = 0 # (h_low, w_high)
v3 = 0 # (h_high, w_low)
v4 = 0 # (h_high, w_high)
if h_low >= 0 and w_low >= 0:
v1 = x_pad[ci, h_low, w_low]
if h_low >= 0 and w_high < Wp:
v2 = x_pad[ci, h_low, w_high]
if h_high < Hp and w_low >= 0:
v3 = x_pad[ci, h_high, w_low]
if h_high < Hp and w_high < Wp:
v4 = x_pad[ci, h_high, w_high]
lh = ph - h_low
lw = pw - w_low
hh = 1 - lh
hw = 1 - lw
w1 = hh * hw
w2 = hh * lw
w3 = lh * hw
w4 = lh * lw
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
# Apply mask
val *= mask[(dg*M*N) + (m * N) + n,
ho * stride[0], wo * stride[1]]
modulated_x[m, n] = val
y[k, ho, wo] += (w[k, c] * modulated_x).sum()
if b is not None:
y += b[..., np.newaxis, np.newaxis]
return y
def pooling_2d(x, mode, kernel, stride, pad, ignore_border=True,
including_pad=True, dtype=np.float32):
"""
"""
assert mode in ['average', 'sum', 'max']
C, H, W = x.shape
Ho = get_pool_out_size(H, kernel[0], pad[0], stride[0], ignore_border)
Wo = get_pool_out_size(W, kernel[1], pad[1], stride[1], ignore_border)
Hi = H + pad[0] + (pad[0] if ignore_border else kernel[0] - 1)
Wi = W + pad[1] + (pad[1] if ignore_border else kernel[1] - 1)
x_pad = np.ones((C, Hi, Wi), dtype=dtype)
x_pad *= x.min() if mode == 'max' else 0
x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x
if mode == 'average':
b_pad = np.zeros((C, Hi, Wi), dtype=np.uint)
h_beg = int(not including_pad) * pad[0]
w_beg = int(not including_pad) * pad[1]
h_end = H + (1 + int(including_pad)) * pad[0]
w_end = W + (1 + int(including_pad)) * pad[1]
b_pad[:, h_beg:h_end, w_beg:w_end] = 1
y = np.zeros((C, Ho, Wo), dtype=dtype)
for c in range(C):
for ho in range(Ho):
for wo in range(Wo):
hi = ho * stride[0] + np.arange(0, kernel[0])
wi = wo * stride[1] + np.arange(0, kernel[1])
yy = y[c]
xx = x_pad[c]
if mode == "max":
yy[ho, wo] = xx[np.ix_(hi, wi)].max()
elif mode == "sum":
yy[ho, wo] = xx[np.ix_(hi, wi)].sum()
elif mode == "average":
pad_sum = xx[np.ix_(hi, wi)].sum()
pad_cnt = b_pad[c][np.ix_(hi, wi)].sum()
yy[ho, wo] = pad_sum / pad_cnt
return y
def pooling_3d(x, mode, kernel, stride, pad, ignore_border=True,
including_pad=True, dtype=np.float32):
"""
"""
assert mode in ['average', 'sum', 'max']
C, Z, H, W = x.shape
Zo = get_pool_out_size(Z, kernel[0], pad[0], stride[0], ignore_border)
Ho = get_pool_out_size(H, kernel[1], pad[1], stride[1], ignore_border)
Wo = get_pool_out_size(W, kernel[2], pad[2], stride[2], ignore_border)
Zi = Z + pad[0] + (pad[0] if ignore_border else kernel[0] - 1)
Hi = H + pad[1] + (pad[1] if ignore_border else kernel[1] - 1)
Wi = W + pad[2] + (pad[2] if ignore_border else kernel[2] - 1)
x_pad = np.ones((C, Zi, Hi, Wi), dtype=dtype)
x_pad *= x.min() if mode == 'max' else 0
x_pad[:, pad[0]:pad[0] + Z, pad[1]:pad[1] + H, pad[2]:pad[2] + W] = x
if mode == 'average':
b_pad = np.zeros((C, Zi, Hi, Wi), dtype=np.uint)
z_beg = int(not including_pad) * pad[0]
h_beg = int(not including_pad) * pad[1]
w_beg = int(not including_pad) * pad[2]
z_end = Z + (1 + int(including_pad)) * pad[0]
h_end = H + (1 + int(including_pad)) * pad[1]
w_end = W + (1 + int(including_pad)) * pad[2]
b_pad[:, z_beg:z_end, h_beg:h_end, w_beg:w_end] = 1
#b_pad[:, pad[0]:pad[0] + Z, pad[1]:pad[1] + H, pad[2]:pad[2] + W] = 1
y = np.zeros((C, Zo, Ho, Wo), dtype=dtype)
for c in range(C):
for zo in range(Zo):
for ho in range(Ho):
for wo in range(Wo):
zi = zo * stride[0] + np.arange(0, kernel[0])
hi = ho * stride[1] + np.arange(0, kernel[1])
wi = wo * stride[2] + np.arange(0, kernel[2])
yy = y[c]
xx = x_pad[c]
if mode == "max":
yy[zo, ho, wo] = xx[np.ix_(zi, hi, wi)].max()
elif mode == "sum":
yy[zo, ho, wo] = xx[np.ix_(zi, hi, wi)].sum()
elif mode == "average":
pool_sum = xx[np.ix_(zi, hi, wi)].sum()
pool_cnt = b_pad[c][np.ix_(zi, hi, wi)].sum()
yy[zo, ho, wo] = pool_sum / pool_cnt
return y
def generate_rotation_2d(rng, B):
rotates = []
for i in range(B):
degree = 2 * np.pi * (2.0 * rng.rand() - 1.0)
c, s = np.cos(degree), np.sin(degree)
rotate = np.asarray([[c, -s],
[s, c]])
rotates.append(rotate)
return np.asarray(rotates)
def generate_rotation_3d(rng, B):
rotates = []
for i in range(B):
alpha = np.pi * (2.0 * rng.rand() - 1.0)
beta = np.pi / 2.0 * (2.0 * rng.rand() - 1.0)
gamma = np.pi * (2.0 * rng.rand() - 1.0)
c, s = np.cos(alpha), np.sin(alpha)
Ra = np.asarray([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
c, s = np.cos(beta), np.sin(beta)
Rb = np.asarray([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
c, s = np.cos(gamma), np.sin(gamma)
Rg = np.asarray([[1, 0, 0],
[0, c, -s],
[0, s, c]])
rotate = Ra.dot(Rb).dot(Rg)
rotates.append(rotate)
return np.asarray(rotates)
def generate_transformation_2d(rng, batch_size):
rotate = generate_rotation_2d(rng, batch_size)
translate = (2.0 * rng.rand(batch_size, 2, 1) - 1.0) * 0.001
theta = np.concatenate([rotate, translate], axis=2)
return theta.astype(np.float32)
def generate_transformation_3d(rng, batch_size):
rotate = generate_rotation_3d(rng, batch_size)
translate = (2.0 * rng.rand(batch_size, 3, 1) - 1.0) * 0.001
theta = np.concatenate([rotate, translate], axis=2)
return theta.astype(np.float32)
def generate_normalized_grid_2d(B, size, align_corners):
H, W = size
hgrid = np.linspace(-1.0, 1.0, H)
wgrid = np.linspace(-1.0, 1.0, W)
hgrid = hgrid if align_corners else hgrid * (H - 1) / H
wgrid = wgrid if align_corners else wgrid * (W - 1) / W
w, h = np.meshgrid(wgrid, hgrid)
x = w.reshape(-1)
y = h.reshape(-1)
t = np.ones(len(x))
normalized_grid = np.stack((x, y, t), axis=1)
normalized_grid = normalized_grid.reshape(H, W, 3)
normalized_grid = np.repeat(
normalized_grid[np.newaxis, :, :, :], B, axis=0)
return normalized_grid.astype(np.float32)
def generate_normalized_grid_3d(B, size, align_corners):
D, H, W = size
dgrid = np.linspace(-1.0, 1.0, D)
hgrid = np.linspace(-1.0, 1.0, H)
wgrid = np.linspace(-1.0, 1.0, W)
dgrid = dgrid if align_corners else dgrid * (D - 1) / D
hgrid = hgrid if align_corners else hgrid * (H - 1) / H
wgrid = wgrid if align_corners else wgrid * (W - 1) / W
h, d, w = np.meshgrid(hgrid, dgrid, wgrid)
x = w.reshape(-1)
y = h.reshape(-1)
z = d.reshape(-1)
t = np.ones(len(x))
normalized_grid = np.stack((x, y, z, t), axis=1)
normalized_grid = normalized_grid.reshape(D, H, W, 4)
normalized_grid = np.repeat(
normalized_grid[np.newaxis, :, :, :, :], B, axis=0)
return normalized_grid.astype(np.float32)
def affine_grid_2d(affine, size, align_corners):
B = affine.shape[0]
H, W = size
grid_t = generate_normalized_grid_2d(B, size, align_corners)
grid_s = np.matmul(grid_t.reshape(B, H * W, 3),
affine.transpose((0, 2, 1)))
grid_s = grid_s.reshape(B, H, W, 2)
return grid_s.astype(np.float32)
def affine_grid_3d(affine, size, align_corners):
B = affine.shape[0]
D, H, W = size
grid_t = generate_normalized_grid_3d(B, size, align_corners)
grid_s = np.matmul(grid_t.reshape(B, D * H * W, 4),
affine.transpose((0, 2, 1)))
grid_s = grid_s.reshape(B, D, H, W, 3)
return grid_s.astype(np.float32)
def pad_sequence(sequences, batch_first):
# sequences: list of nparray
# sequences[i]: (T_i, D_1, ..., D_M)
Ds = () if len(sequences[0].shape) == 1 else sequences[0].shape[1:]
B = len(sequences)
T = max([seq.shape[0] for seq in sequences])
data = np.zeros((B, T) + Ds) if batch_first else np.zeros((T, B) + Ds)
for b, seq in enumerate(sequences):
l = seq.shape[0]
if batch_first:
data[b, :l] = seq
else:
data[:l, b] = seq
return data
| 37.817822 | 120 | 0.48869 |
from __future__ import division
from six.moves import range
import itertools
import numpy as np
def get_conv_out_size(w, k, p, s, d=1):
return (w + 2 * p - (d * (k - 1) + 1)) // s + 1
def get_deconv_out_size(w, k, p, s, d):
return s * (w - 1) - 2 * p + (d * (k - 1) + 1)
def get_pool_out_size(w, k, p, s, ignore_border):
return (w + p - ((k - p) if ignore_border else 1)) // s + 1
class ChannelLastToFirstTranspose(object):
def __init__(self, dim, kdim):
base_axis = dim - kdim - 1
up_to_base = tuple(range(0, base_axis))
self.axes = up_to_base + (dim - 1,) + tuple(range(base_axis, dim - 1))
self.inv_axes = up_to_base + \
tuple(range(base_axis + 1, dim)) + (base_axis,)
def __call__(self, x):
return x.transpose(self.axes).copy()
def inv(self, x):
return x.transpose(self.inv_axes).copy()
def convolution_1d(x, w, b, pad, stride, dilation, group, dtype=np.float32):
C, H = x.shape
K, Cg, M = w.shape
Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])
x_pad = np.zeros((C, H + pad[0] * 2), dtype=dtype)
x_pad[:, pad[0]:pad[0] + H] = x
y = np.zeros((K, Ho), dtype=dtype)
for k in range(K):
g = int(k // (K // group))
for ho in range(Ho):
hi = ho * stride[0] + np.arange(0, M) * dilation[0]
ci = np.arange(g * Cg, (g + 1) * Cg)
y[k, ho] = (w[k] * x_pad[np.ix_(ci, hi)]).sum()
if b is not None:
y += b[..., np.newaxis]
return y
def convolution_2d(x, w, b, pad, stride, dilation, group, dtype=np.float32):
C, H, W = x.shape
K, Cg, M, N = w.shape
Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])
Wo = get_conv_out_size(W, N, pad[1], stride[1], dilation[1])
x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)
x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x
y = np.zeros((K, Ho, Wo), dtype=dtype)
for k in range(K):
g = int(k // (K // group))
for ho in range(Ho):
for wo in range(Wo):
hi = ho * stride[0] + np.arange(0, M) * dilation[0]
wi = wo * stride[1] + np.arange(0, N) * dilation[1]
ci = np.arange(g * Cg, (g + 1) * Cg)
y[k, ho, wo] = (w[k] * x_pad[np.ix_(ci, hi, wi)]).sum()
if b is not None:
y += b[..., np.newaxis, np.newaxis]
return y
def convolution_nd(x, w, b, pad, stride, dilation, group, dtype=np.float32):
C = x.shape[0]
inshape = x.shape[1:]
ndim = len(inshape)
assert w.ndim == ndim + 2
K, Cg = w.shape[:2]
kshape = w.shape[2:]
def get_conv_out_size_recursive(d, ndim):
if d == ndim:
return []
s = get_conv_out_size(
inshape[d], kshape[d], pad[d], stride[d], dilation[d])
return [s] + get_conv_out_size_recursive(d + 1, ndim)
outshape = get_conv_out_size_recursive(0, ndim)
inshape_pad = [C] + [inshape[d] + 2 * pad[d] for d in range(ndim)]
x_pad = np.zeros(inshape_pad, dtype=dtype)
x_pad[[slice(None,)] + [slice(pad[d], pad[d] + inshape[d])
for d in range(ndim)]] = x
y = np.zeros([K] + outshape, dtype=dtype)
for k in range(K):
g = int(k // (K // group))
for outindex in itertools.product(*map(range, outshape)):
inindex = [outindex[d] * stride[d] +
np.arange(0, kshape[d]) * dilation[d] for d in range(ndim)]
ci = np.arange(g * Cg, (g + 1) * Cg)
y[(k,) + tuple(outindex)] = (w[k] *
x_pad[np.ix_(ci, *inindex)]).sum()
if b is not None:
y += b[[Ellipsis] + [np.newaxis for d in range(ndim)]]
return y
def deconvolution_1d(x, w, b, pad, stride, dilation, group, dtype=np.float32,
output_padding=(0,)):
y = x
K, Ho = y.shape
K, Cg, M = w.shape
C = Cg * group
H = (get_deconv_out_size(Ho, M, pad[0], stride[0], dilation[0])
+ output_padding[0])
x_pad = np.zeros((C, H + pad[0] * 2), dtype=dtype)
for k in range(K):
g = int(k // (K // group))
for ho in range(Ho):
hi = ho * stride[0] + np.arange(0, M) * dilation[0]
ci = np.arange(g * Cg, (g + 1) * Cg)
x_pad[np.ix_(ci, hi)] += w[k] * y[k, ho]
x = x_pad[:, pad[0]:pad[0] + H]
if b is not None:
x += b[..., np.newaxis]
return x
def deconvolution_2d(x, w, b, pad, stride, dilation, group, dtype=np.float32,
output_padding=(0, 0)):
y = x
K, Ho, Wo = y.shape
K, Cg, M, N = w.shape
C = Cg * group
H = (get_deconv_out_size(Ho, M, pad[0], stride[0], dilation[0])
+ output_padding[0])
W = (get_deconv_out_size(Wo, N, pad[1], stride[1], dilation[1])
+ output_padding[1])
x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)
for k in range(K):
g = int(k // (K // group))
for ho in range(Ho):
for wo in range(Wo):
hi = ho * stride[0] + np.arange(0, M) * dilation[0]
wi = wo * stride[1] + np.arange(0, N) * dilation[1]
ci = np.arange(g * Cg, (g + 1) * Cg)
x_pad[np.ix_(ci, hi, wi)] += w[k] * y[k, ho, wo]
x = x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W]
if b is not None:
x += b[..., np.newaxis, np.newaxis]
return x
def deformable_convolution_2d(x, w, offset, mask, b, pad, stride,
dilation, group, deformable_group,
channel_last, dtype=np.float32):
C, H, W = x.shape
K, Cg, M, N = w.shape
assert C == Cg * \
group, "Wrong shape, x: {}, w: {}".format(x.shape, w.shape)
assert offset.shape[0] == 2 * deformable_group * M * N, \
"Wrong shape offset: {}, 2 * deformable_group * Kw * Kh: {}".format(
offset.shape, 2 * deformable_group * M * N)
assert offset.shape[1:] == (
H, W), "Wrong shape, offset: {}, w: {}".format(offset.shape, w.shape)
assert mask.shape[0] == deformable_group * M * N, \
"Wrong shape mask: {}, deformable_group * Kw * Kh: {}".format(
mask.shape, deformable_group * M * N)
assert mask.shape[1:] == (
H, W), "Wrong shape, mask: {}, w: {}".format(mask.shape, w.shape)
assert pad[0] < (w.shape[2] + 1)//2 and pad[1] < (w.shape[3] +
1)//2, "Wrong shape, kernel: {}, pad: {}".format(w.shape[2:], pad)
x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)
x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x
Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])
Wo = get_conv_out_size(W, N, pad[1], stride[1], dilation[1])
y = np.zeros((K, Ho, Wo), dtype=dtype)
_, Hp, Wp = x_pad.shape
for k in range(K):
for c in range(C//group):
g = k // (K//group)
ci = Cg * g + c
dg = ci // (C // deformable_group)
for ho in range(Ho):
for wo in range(Wo):
hi = ho * stride[0] + np.arange(0, M) * dilation[0]
wi = wo * stride[1] + np.arange(0, N) * dilation[1]
modulated_x = np.zeros((M, N), dtype=dtype)
for m in range(M):
for n in range(N):
ph = hi[m] + offset[2*((dg*M*N) + (m * N) + n),
ho * stride[0], wo * stride[1]]
pw = wi[n] + offset[2*((dg*M*N) + (m * N) + n) + 1,
ho * stride[0], wo * stride[1]]
h_low = int(np.floor(ph))
w_low = int(np.floor(pw))
h_high = h_low + 1
w_high = w_low + 1
if h_low >= Hp or w_low >= Wp or \
h_high < 0 or w_high < 0:
val = 0
else:
v1 = 0
v2 = 0
v3 = 0
v4 = 0
if h_low >= 0 and w_low >= 0:
v1 = x_pad[ci, h_low, w_low]
if h_low >= 0 and w_high < Wp:
v2 = x_pad[ci, h_low, w_high]
if h_high < Hp and w_low >= 0:
v3 = x_pad[ci, h_high, w_low]
if h_high < Hp and w_high < Wp:
v4 = x_pad[ci, h_high, w_high]
lh = ph - h_low
lw = pw - w_low
hh = 1 - lh
hw = 1 - lw
w1 = hh * hw
w2 = hh * lw
w3 = lh * hw
w4 = lh * lw
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
val *= mask[(dg*M*N) + (m * N) + n,
ho * stride[0], wo * stride[1]]
modulated_x[m, n] = val
y[k, ho, wo] += (w[k, c] * modulated_x).sum()
if b is not None:
y += b[..., np.newaxis, np.newaxis]
return y
def pooling_2d(x, mode, kernel, stride, pad, ignore_border=True,
including_pad=True, dtype=np.float32):
assert mode in ['average', 'sum', 'max']
C, H, W = x.shape
Ho = get_pool_out_size(H, kernel[0], pad[0], stride[0], ignore_border)
Wo = get_pool_out_size(W, kernel[1], pad[1], stride[1], ignore_border)
Hi = H + pad[0] + (pad[0] if ignore_border else kernel[0] - 1)
Wi = W + pad[1] + (pad[1] if ignore_border else kernel[1] - 1)
x_pad = np.ones((C, Hi, Wi), dtype=dtype)
x_pad *= x.min() if mode == 'max' else 0
x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x
if mode == 'average':
b_pad = np.zeros((C, Hi, Wi), dtype=np.uint)
h_beg = int(not including_pad) * pad[0]
w_beg = int(not including_pad) * pad[1]
h_end = H + (1 + int(including_pad)) * pad[0]
w_end = W + (1 + int(including_pad)) * pad[1]
b_pad[:, h_beg:h_end, w_beg:w_end] = 1
y = np.zeros((C, Ho, Wo), dtype=dtype)
for c in range(C):
for ho in range(Ho):
for wo in range(Wo):
hi = ho * stride[0] + np.arange(0, kernel[0])
wi = wo * stride[1] + np.arange(0, kernel[1])
yy = y[c]
xx = x_pad[c]
if mode == "max":
yy[ho, wo] = xx[np.ix_(hi, wi)].max()
elif mode == "sum":
yy[ho, wo] = xx[np.ix_(hi, wi)].sum()
elif mode == "average":
pad_sum = xx[np.ix_(hi, wi)].sum()
pad_cnt = b_pad[c][np.ix_(hi, wi)].sum()
yy[ho, wo] = pad_sum / pad_cnt
return y
def pooling_3d(x, mode, kernel, stride, pad, ignore_border=True,
including_pad=True, dtype=np.float32):
assert mode in ['average', 'sum', 'max']
C, Z, H, W = x.shape
Zo = get_pool_out_size(Z, kernel[0], pad[0], stride[0], ignore_border)
Ho = get_pool_out_size(H, kernel[1], pad[1], stride[1], ignore_border)
Wo = get_pool_out_size(W, kernel[2], pad[2], stride[2], ignore_border)
Zi = Z + pad[0] + (pad[0] if ignore_border else kernel[0] - 1)
Hi = H + pad[1] + (pad[1] if ignore_border else kernel[1] - 1)
Wi = W + pad[2] + (pad[2] if ignore_border else kernel[2] - 1)
x_pad = np.ones((C, Zi, Hi, Wi), dtype=dtype)
x_pad *= x.min() if mode == 'max' else 0
x_pad[:, pad[0]:pad[0] + Z, pad[1]:pad[1] + H, pad[2]:pad[2] + W] = x
if mode == 'average':
b_pad = np.zeros((C, Zi, Hi, Wi), dtype=np.uint)
z_beg = int(not including_pad) * pad[0]
h_beg = int(not including_pad) * pad[1]
w_beg = int(not including_pad) * pad[2]
z_end = Z + (1 + int(including_pad)) * pad[0]
h_end = H + (1 + int(including_pad)) * pad[1]
w_end = W + (1 + int(including_pad)) * pad[2]
b_pad[:, z_beg:z_end, h_beg:h_end, w_beg:w_end] = 1
y = np.zeros((C, Zo, Ho, Wo), dtype=dtype)
for c in range(C):
for zo in range(Zo):
for ho in range(Ho):
for wo in range(Wo):
zi = zo * stride[0] + np.arange(0, kernel[0])
hi = ho * stride[1] + np.arange(0, kernel[1])
wi = wo * stride[2] + np.arange(0, kernel[2])
yy = y[c]
xx = x_pad[c]
if mode == "max":
yy[zo, ho, wo] = xx[np.ix_(zi, hi, wi)].max()
elif mode == "sum":
yy[zo, ho, wo] = xx[np.ix_(zi, hi, wi)].sum()
elif mode == "average":
pool_sum = xx[np.ix_(zi, hi, wi)].sum()
pool_cnt = b_pad[c][np.ix_(zi, hi, wi)].sum()
yy[zo, ho, wo] = pool_sum / pool_cnt
return y
def generate_rotation_2d(rng, B):
rotates = []
for i in range(B):
degree = 2 * np.pi * (2.0 * rng.rand() - 1.0)
c, s = np.cos(degree), np.sin(degree)
rotate = np.asarray([[c, -s],
[s, c]])
rotates.append(rotate)
return np.asarray(rotates)
def generate_rotation_3d(rng, B):
rotates = []
for i in range(B):
alpha = np.pi * (2.0 * rng.rand() - 1.0)
beta = np.pi / 2.0 * (2.0 * rng.rand() - 1.0)
gamma = np.pi * (2.0 * rng.rand() - 1.0)
c, s = np.cos(alpha), np.sin(alpha)
Ra = np.asarray([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
c, s = np.cos(beta), np.sin(beta)
Rb = np.asarray([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
c, s = np.cos(gamma), np.sin(gamma)
Rg = np.asarray([[1, 0, 0],
[0, c, -s],
[0, s, c]])
rotate = Ra.dot(Rb).dot(Rg)
rotates.append(rotate)
return np.asarray(rotates)
def generate_transformation_2d(rng, batch_size):
rotate = generate_rotation_2d(rng, batch_size)
translate = (2.0 * rng.rand(batch_size, 2, 1) - 1.0) * 0.001
theta = np.concatenate([rotate, translate], axis=2)
return theta.astype(np.float32)
def generate_transformation_3d(rng, batch_size):
rotate = generate_rotation_3d(rng, batch_size)
translate = (2.0 * rng.rand(batch_size, 3, 1) - 1.0) * 0.001
theta = np.concatenate([rotate, translate], axis=2)
return theta.astype(np.float32)
def generate_normalized_grid_2d(B, size, align_corners):
H, W = size
hgrid = np.linspace(-1.0, 1.0, H)
wgrid = np.linspace(-1.0, 1.0, W)
hgrid = hgrid if align_corners else hgrid * (H - 1) / H
wgrid = wgrid if align_corners else wgrid * (W - 1) / W
w, h = np.meshgrid(wgrid, hgrid)
x = w.reshape(-1)
y = h.reshape(-1)
t = np.ones(len(x))
normalized_grid = np.stack((x, y, t), axis=1)
normalized_grid = normalized_grid.reshape(H, W, 3)
normalized_grid = np.repeat(
normalized_grid[np.newaxis, :, :, :], B, axis=0)
return normalized_grid.astype(np.float32)
def generate_normalized_grid_3d(B, size, align_corners):
D, H, W = size
dgrid = np.linspace(-1.0, 1.0, D)
hgrid = np.linspace(-1.0, 1.0, H)
wgrid = np.linspace(-1.0, 1.0, W)
dgrid = dgrid if align_corners else dgrid * (D - 1) / D
hgrid = hgrid if align_corners else hgrid * (H - 1) / H
wgrid = wgrid if align_corners else wgrid * (W - 1) / W
h, d, w = np.meshgrid(hgrid, dgrid, wgrid)
x = w.reshape(-1)
y = h.reshape(-1)
z = d.reshape(-1)
t = np.ones(len(x))
normalized_grid = np.stack((x, y, z, t), axis=1)
normalized_grid = normalized_grid.reshape(D, H, W, 4)
normalized_grid = np.repeat(
normalized_grid[np.newaxis, :, :, :, :], B, axis=0)
return normalized_grid.astype(np.float32)
def affine_grid_2d(affine, size, align_corners):
B = affine.shape[0]
H, W = size
grid_t = generate_normalized_grid_2d(B, size, align_corners)
grid_s = np.matmul(grid_t.reshape(B, H * W, 3),
affine.transpose((0, 2, 1)))
grid_s = grid_s.reshape(B, H, W, 2)
return grid_s.astype(np.float32)
def affine_grid_3d(affine, size, align_corners):
B = affine.shape[0]
D, H, W = size
grid_t = generate_normalized_grid_3d(B, size, align_corners)
grid_s = np.matmul(grid_t.reshape(B, D * H * W, 4),
affine.transpose((0, 2, 1)))
grid_s = grid_s.reshape(B, D, H, W, 3)
return grid_s.astype(np.float32)
def pad_sequence(sequences, batch_first):
Ds = () if len(sequences[0].shape) == 1 else sequences[0].shape[1:]
B = len(sequences)
T = max([seq.shape[0] for seq in sequences])
data = np.zeros((B, T) + Ds) if batch_first else np.zeros((T, B) + Ds)
for b, seq in enumerate(sequences):
l = seq.shape[0]
if batch_first:
data[b, :l] = seq
else:
data[:l, b] = seq
return data
| true | true |
f72f5b0f93cd1cae45d411cdbf0b9862569cbd4a | 14,421 | py | Python | models/wc3.py | prannayk/MSRASI17 | f7277d90ffdd062c1ba94391b7f82c621e619743 | [
"MIT"
] | null | null | null | models/wc3.py | prannayk/MSRASI17 | f7277d90ffdd062c1ba94391b7f82c621e619743 | [
"MIT"
] | null | null | null | models/wc3.py | prannayk/MSRASI17 | f7277d90ffdd062c1ba94391b7f82c621e619743 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import collections
import math
import time
import os
import random
import zipfile
import time
import numpy as np
import sys
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
sys.path.append( '../util/')
from generators import *
from loader import *
from print_tweets import *
from similar_tokens import *
from training import *
from similar_tokens import *
from expand_query import *
from argument_loader import *
from setup import *
from LSTM import *
dataset, query_type, filename, num_steps, num_steps_roll, num_steps_train, expand_flag,lr_, matchname = import_arguments(sys.argv)
char_batch_dict, word_batch_dict,data, count, dictionary, reverse_dictionary, word_max_len, char_max_len, vocabulary_size, char_dictionary, reverse_char_dictionary, data_index, char_data_index, buffer_index, batch_list, char_batch_list, word_batch_list, char_data = build_everything(dataset)
data_index, batch, labels = generate_batch(data, data_index, batch_size=8, num_skips=2, skip_window=1,)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
char_data_index, batch, labels = generate_batch_char(char_data, char_data_index, batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_char_dictionary[batch[i]],
'->', labels[i, 0], reverse_char_dictionary[labels[i, 0]])
lambda_1, tweet_batch_size, expand_start_count, query_name, query_tokens, query_tokens_alternate, char_batch_size, num_sampled, valid_examples, valid_window, valid_size, skip_window, num_skips, embedding_size, char_vocabulary_size, batch_size, num_char_skips, skip_char_window = setup(char_dictionary, dictionary, query_type)
learning_rate = lr_
graph = tf.Graph()
with graph.as_default():
# Input data.
need_constant = tf.constant(query_tokens,dtype=tf.int32)
avail_constant = tf.constant(query_tokens_alternate, dtype=tf.int32)
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_input_chars = tf.placeholder(tf.int32, shape=[char_batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
train_char_labels = tf.placeholder(tf.int32, shape=[char_batch_size, 1])
word_char_embeddings = tf.placeholder(tf.int32, shape=[batch_size, char_max_len])
valid_dataset = tf.constant(valid_examples[0], dtype=tf.int32)
valid_char_dataset = tf.constant(valid_examples[1], dtype=tf.int32)
query_ints = tf.placeholder(tf.int32, shape=len(query_tokens))
expanded_query_ints = tf.placeholder(tf.int32, shape=(len(query_tokens)+3))
tquery_word_holder = tf.placeholder(tf.int32, shape=[word_max_len],name="tweet_query_word_holder")
tquery_char_holder = tf.placeholder(tf.int32, shape=[word_max_len, char_max_len],name="tweet_query_char_holder")
# Ops and variables pinned to the CPU because of missing GPU implementation
tweet_char_holder = tf.placeholder(tf.int32, shape=[tweet_batch_size,word_max_len,char_max_len],name="tweet_char_holder")
tweet_word_holder = tf.placeholder(tf.int32, shape=[tweet_batch_size, word_max_len],name="tweet_word_holder")
with tf.device('/gpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
char_embeddings = tf.Variable(tf.random_uniform([char_vocabulary_size, embedding_size // 2],-1.0,1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
char_embed = tf.nn.embedding_lookup(char_embeddings,train_input_chars)
lambda_2 = tf.Variable(tf.random_normal([1],stddev=1.0))
# weight variables
w1 = tf.Variable(tf.random_normal([embedding_size,embedding_size // 4],stddev=1.0/math.sqrt(embedding_size)))
w2 = tf.Variable(tf.random_normal([embedding_size // 4,1],stddev=1.0/math.sqrt(embedding_size)))
weights = tf.stack([w1]*batch_size)
vvector = tf.stack([w2]*batch_size)
weights_tweet = tf.stack([w1]*tweet_batch_size*word_max_len)
vvector_tweet = tf.stack([w2]*tweet_batch_size*word_max_len)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# character weights
nce_char_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size // 2],
stddev=1.0 / math.sqrt(embedding_size // 2)))
nce_char_biases = tf.Variable(tf.zeros([vocabulary_size]))
nce_train_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_train_biases = tf.Variable(tf.zeros([vocabulary_size]))
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
loss_char = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_char_weights,
biases=nce_char_biases,
labels=train_char_labels,
inputs=char_embed,
num_sampled=10,
num_classes=char_vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
optimizer_char = tf.train.AdamOptimizer(learning_rate /5).minimize(loss_char)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
query_embedding_token = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,query_ints),axis=0),shape=[1,embedding_size])
expanded_query_embedding_token = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,expanded_query_ints),axis=0),shape=[1,embedding_size])
similarity_query = tf.reshape(tf.matmul(
query_embedding_token, normalized_embeddings, transpose_b=True),shape=[int(normalized_embeddings.shape[0])])
similarity_expanded_query = tf.reshape(tf.matmul(
expanded_query_embedding_token, normalized_embeddings, transpose_b=True),shape=[int(normalized_embeddings.shape[0])])
norm_char = tf.sqrt(tf.reduce_sum(tf.square(char_embeddings), 1, keep_dims=True))
normalized_char_embeddings = char_embeddings / norm_char
valid_embeddings_char = tf.nn.embedding_lookup(
normalized_char_embeddings, valid_char_dataset)
similarity_char = tf.matmul(
valid_embeddings_char, normalized_char_embeddings, transpose_b=True)
bilstm = biLSTM_setup(embedding_size)
character_word_embeddings = tf.nn.embedding_lookup(normalized_char_embeddings, word_char_embeddings)
intermediate = biLSTM_implementation(character_word_embeddings, bilstm, False)
output = attention(w1, w2, intermediate)
word_embeddings = tf.nn.embedding_lookup(normalized_embeddings, train_inputs)
final_embedding = lambda_2*word_embeddings + (1-lambda_2)*output
with tf.variable_scope(tf.get_variable_scope(), reuse=None):
loss_char_train = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_train_weights,
biases=nce_train_biases,
labels=train_labels,
inputs=final_embedding,
num_sampled=64,
num_classes=vocabulary_size))
optimizer_train = tf.train.AdamOptimizer(learning_rate/5).minimize(loss_char_train)
tweet_word_embed = tf.nn.embedding_lookup(normalized_embeddings, tweet_word_holder)
tweet_char_embeddings = tf.reshape(tf.nn.embedding_lookup(normalized_char_embeddings, tweet_char_holder),shape=[tweet_batch_size*word_max_len, char_max_len, embedding_size//2])
intermediate = biLSTM_implementation(tweet_char_embeddings, bilstm)
tweet_char_embed = tf.reshape(attention(w1,w2,intermediate),shape=[tweet_batch_size, word_max_len, embedding_size])
tweet_embedding = tf.reduce_mean(lambda_1*tweet_word_embed + (1-lambda_1)*tweet_char_embed,axis=1)
# query embeddings
query_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,query_ints),axis=0),shape=[1,embedding_size])
expanded_query_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,expanded_query_ints),axis=0),shape=[1,embedding_size],name="similarity_normal")
query_similarity = tf.reshape(tf.matmul(tweet_embedding, query_embedding, transpose_b=True),shape=[tweet_batch_size])
expanded_query_similarity = tf.reshape(tf.matmul(tweet_embedding, expanded_query_embedding, transpose_b=True),shape=[tweet_batch_size],name="similarity_expanded")
# tweet level query : for matching / extraction
tquery_word_embed = tf.nn.embedding_lookup(normalized_embeddings, tquery_word_holder)
tquery_char_embeddings = tf.reshape(tf.nn.embedding_lookup(normalized_char_embeddings, tquery_char_holder),shape=[word_max_len, char_max_len, embedding_size//2])
intermediate = biLSTM_implementation(tquery_char_embeddings, bilstm)
tquery_char_embed = attention(w1, w2, intermediate)
tquery_embedding = tf.reshape(tf.reduce_mean(lambda_1*tquery_word_embed + (1-lambda_1)*tquery_char_embed,axis=0),shape=[1,embedding_size])
norm_query = tf.sqrt(tf.reduce_sum(tf.square(tquery_embedding), 1, keep_dims=True))
tquery_embedding_norm = tquery_embedding / norm_query
cosine = tf.matmul(tweet_embedding, tquery_embedding_norm, transpose_b=True)
tweet_query_similarity = tf.reshape(cosine, shape=[tweet_batch_size], name="tweet_query_similarity")
tquery_embedding_norm_dim = tf.reshape(tquery_embedding_norm, shape=[1,embedding_size])
query_need_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings, need_constant),axis=0),shape=[1,embedding_size])
cosine_need = tf.matmul(tquery_embedding_norm_dim, query_need_embedding, transpose_b=True)
tquery_embedding_reqd = tf.reshape(tquery_embedding_norm_dim - (cosine_need*tquery_embedding_norm_dim),shape=[1,embedding_size])
# we have the need vector without the need vector
query_avail_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,avail_constant),axis=0),shape=[1,embedding_size])
query_norm = tf.sqrt(tf.reduce_sum(tf.square(query_avail_embedding),1,keep_dims=True))
query_avail_embedding_norm = query_embedding / query_norm
cosine_avail = tf.matmul(tweet_embedding, query_avail_embedding_norm, transpose_b=True)
reduced_tweet_embedding = tweet_embedding - (tweet_embedding*cosine_avail)
match_similarity = tf.reshape(tf.matmul(reduced_tweet_embedding, tquery_embedding_reqd, transpose_b=True),shape=[tweet_batch_size],name="match_similarity")
# Add variable initializer.
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# Step 5: Begin training.
# loading tweet list in integer marking form
# load more data
expand_count = 3
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
count = 0
print("Initialized")
generators = [generate_batch, generate_batch_char]
similarities = [similarity, similarity_char]
placeholders = [[train_inputs,train_labels],[train_input_chars,train_char_labels]]
losses = [loss, loss_char]
optimizers = [optimizer, optimizer_char]
interval1 = 800
interval2 = 8000
datas = [data,char_data]
data_index = [data_index, char_data_index, buffer_index]
reverse_dictionaries = [reverse_dictionary, reverse_char_dictionary]
if query_type == 0:
query_name = 'Need'
else :
query_name = 'Avail'
print(query_tokens)
print(query_name)
count_ = train_model(session, dataset,query_similarity, query_tokens, query_ints, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps, placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size)
placeholders += [[train_inputs, word_char_embeddings, train_labels]]
losses += [loss_char_train]
optimizers += [optimizer_train]
datas += [[word_batch_list, char_batch_list]]
count_ = train_model(session, dataset,query_similarity, query_tokens ,query_ints, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps_roll, placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size, count_)
expanded_query_tokens, expanded_query_holder, final_query_similarity= expand_query(expand_flag, session,query_ints, np.array(query_tokens),dataset ,similarity_query, word_batch_dict, 100, query_ints, expanded_query_ints, query_similarity, expanded_query_similarity, expand_start_count, expand_count)
expanded_query_tokens = query_tokens + expanded_query_tokens
print(expanded_query_tokens)
count_ = train_model(session, dataset, final_query_similarity, expanded_query_tokens, expanded_query_holder, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps_train , placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size, count_)
folder_name = './%s/%s/'%(dataset, query_type)
final_embeddings = normalized_embeddings.eval()
final_char_embedding = normalized_char_embeddings.eval()
np.save('../results/%s/%s/%s_word_embeddings.npy'%(dataset, query_name, filename), final_embeddings)
np.save('../results/%s/%s/%s_char_embeddings.npy'%(dataset, query_name, filename), final_char_embedding)
saver.save(session, '../results/%s/%s/%s_model.ckpt'%(dataset, query_name, filename))
| 60.592437 | 434 | 0.776437 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import collections
import math
import time
import os
import random
import zipfile
import time
import numpy as np
import sys
from six.moves import urllib
from six.moves import xrange
import tensorflow as tf
sys.path.append( '../util/')
from generators import *
from loader import *
from print_tweets import *
from similar_tokens import *
from training import *
from similar_tokens import *
from expand_query import *
from argument_loader import *
from setup import *
from LSTM import *
dataset, query_type, filename, num_steps, num_steps_roll, num_steps_train, expand_flag,lr_, matchname = import_arguments(sys.argv)
char_batch_dict, word_batch_dict,data, count, dictionary, reverse_dictionary, word_max_len, char_max_len, vocabulary_size, char_dictionary, reverse_char_dictionary, data_index, char_data_index, buffer_index, batch_list, char_batch_list, word_batch_list, char_data = build_everything(dataset)
data_index, batch, labels = generate_batch(data, data_index, batch_size=8, num_skips=2, skip_window=1,)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
char_data_index, batch, labels = generate_batch_char(char_data, char_data_index, batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_char_dictionary[batch[i]],
'->', labels[i, 0], reverse_char_dictionary[labels[i, 0]])
lambda_1, tweet_batch_size, expand_start_count, query_name, query_tokens, query_tokens_alternate, char_batch_size, num_sampled, valid_examples, valid_window, valid_size, skip_window, num_skips, embedding_size, char_vocabulary_size, batch_size, num_char_skips, skip_char_window = setup(char_dictionary, dictionary, query_type)
learning_rate = lr_
graph = tf.Graph()
with graph.as_default():
need_constant = tf.constant(query_tokens,dtype=tf.int32)
avail_constant = tf.constant(query_tokens_alternate, dtype=tf.int32)
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_input_chars = tf.placeholder(tf.int32, shape=[char_batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
train_char_labels = tf.placeholder(tf.int32, shape=[char_batch_size, 1])
word_char_embeddings = tf.placeholder(tf.int32, shape=[batch_size, char_max_len])
valid_dataset = tf.constant(valid_examples[0], dtype=tf.int32)
valid_char_dataset = tf.constant(valid_examples[1], dtype=tf.int32)
query_ints = tf.placeholder(tf.int32, shape=len(query_tokens))
expanded_query_ints = tf.placeholder(tf.int32, shape=(len(query_tokens)+3))
tquery_word_holder = tf.placeholder(tf.int32, shape=[word_max_len],name="tweet_query_word_holder")
tquery_char_holder = tf.placeholder(tf.int32, shape=[word_max_len, char_max_len],name="tweet_query_char_holder")
tweet_char_holder = tf.placeholder(tf.int32, shape=[tweet_batch_size,word_max_len,char_max_len],name="tweet_char_holder")
tweet_word_holder = tf.placeholder(tf.int32, shape=[tweet_batch_size, word_max_len],name="tweet_word_holder")
with tf.device('/gpu:0'):
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
char_embeddings = tf.Variable(tf.random_uniform([char_vocabulary_size, embedding_size // 2],-1.0,1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
char_embed = tf.nn.embedding_lookup(char_embeddings,train_input_chars)
lambda_2 = tf.Variable(tf.random_normal([1],stddev=1.0))
w1 = tf.Variable(tf.random_normal([embedding_size,embedding_size // 4],stddev=1.0/math.sqrt(embedding_size)))
w2 = tf.Variable(tf.random_normal([embedding_size // 4,1],stddev=1.0/math.sqrt(embedding_size)))
weights = tf.stack([w1]*batch_size)
vvector = tf.stack([w2]*batch_size)
weights_tweet = tf.stack([w1]*tweet_batch_size*word_max_len)
vvector_tweet = tf.stack([w2]*tweet_batch_size*word_max_len)
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
nce_char_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size // 2],
stddev=1.0 / math.sqrt(embedding_size // 2)))
nce_char_biases = tf.Variable(tf.zeros([vocabulary_size]))
nce_train_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_train_biases = tf.Variable(tf.zeros([vocabulary_size]))
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
loss_char = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_char_weights,
biases=nce_char_biases,
labels=train_char_labels,
inputs=char_embed,
num_sampled=10,
num_classes=char_vocabulary_size))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
optimizer_char = tf.train.AdamOptimizer(learning_rate /5).minimize(loss_char)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
query_embedding_token = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,query_ints),axis=0),shape=[1,embedding_size])
expanded_query_embedding_token = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,expanded_query_ints),axis=0),shape=[1,embedding_size])
similarity_query = tf.reshape(tf.matmul(
query_embedding_token, normalized_embeddings, transpose_b=True),shape=[int(normalized_embeddings.shape[0])])
similarity_expanded_query = tf.reshape(tf.matmul(
expanded_query_embedding_token, normalized_embeddings, transpose_b=True),shape=[int(normalized_embeddings.shape[0])])
norm_char = tf.sqrt(tf.reduce_sum(tf.square(char_embeddings), 1, keep_dims=True))
normalized_char_embeddings = char_embeddings / norm_char
valid_embeddings_char = tf.nn.embedding_lookup(
normalized_char_embeddings, valid_char_dataset)
similarity_char = tf.matmul(
valid_embeddings_char, normalized_char_embeddings, transpose_b=True)
bilstm = biLSTM_setup(embedding_size)
character_word_embeddings = tf.nn.embedding_lookup(normalized_char_embeddings, word_char_embeddings)
intermediate = biLSTM_implementation(character_word_embeddings, bilstm, False)
output = attention(w1, w2, intermediate)
word_embeddings = tf.nn.embedding_lookup(normalized_embeddings, train_inputs)
final_embedding = lambda_2*word_embeddings + (1-lambda_2)*output
with tf.variable_scope(tf.get_variable_scope(), reuse=None):
loss_char_train = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_train_weights,
biases=nce_train_biases,
labels=train_labels,
inputs=final_embedding,
num_sampled=64,
num_classes=vocabulary_size))
optimizer_train = tf.train.AdamOptimizer(learning_rate/5).minimize(loss_char_train)
tweet_word_embed = tf.nn.embedding_lookup(normalized_embeddings, tweet_word_holder)
tweet_char_embeddings = tf.reshape(tf.nn.embedding_lookup(normalized_char_embeddings, tweet_char_holder),shape=[tweet_batch_size*word_max_len, char_max_len, embedding_size//2])
intermediate = biLSTM_implementation(tweet_char_embeddings, bilstm)
tweet_char_embed = tf.reshape(attention(w1,w2,intermediate),shape=[tweet_batch_size, word_max_len, embedding_size])
tweet_embedding = tf.reduce_mean(lambda_1*tweet_word_embed + (1-lambda_1)*tweet_char_embed,axis=1)
query_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,query_ints),axis=0),shape=[1,embedding_size])
expanded_query_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,expanded_query_ints),axis=0),shape=[1,embedding_size],name="similarity_normal")
query_similarity = tf.reshape(tf.matmul(tweet_embedding, query_embedding, transpose_b=True),shape=[tweet_batch_size])
expanded_query_similarity = tf.reshape(tf.matmul(tweet_embedding, expanded_query_embedding, transpose_b=True),shape=[tweet_batch_size],name="similarity_expanded")
tquery_word_embed = tf.nn.embedding_lookup(normalized_embeddings, tquery_word_holder)
tquery_char_embeddings = tf.reshape(tf.nn.embedding_lookup(normalized_char_embeddings, tquery_char_holder),shape=[word_max_len, char_max_len, embedding_size//2])
intermediate = biLSTM_implementation(tquery_char_embeddings, bilstm)
tquery_char_embed = attention(w1, w2, intermediate)
tquery_embedding = tf.reshape(tf.reduce_mean(lambda_1*tquery_word_embed + (1-lambda_1)*tquery_char_embed,axis=0),shape=[1,embedding_size])
norm_query = tf.sqrt(tf.reduce_sum(tf.square(tquery_embedding), 1, keep_dims=True))
tquery_embedding_norm = tquery_embedding / norm_query
cosine = tf.matmul(tweet_embedding, tquery_embedding_norm, transpose_b=True)
tweet_query_similarity = tf.reshape(cosine, shape=[tweet_batch_size], name="tweet_query_similarity")
tquery_embedding_norm_dim = tf.reshape(tquery_embedding_norm, shape=[1,embedding_size])
query_need_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings, need_constant),axis=0),shape=[1,embedding_size])
cosine_need = tf.matmul(tquery_embedding_norm_dim, query_need_embedding, transpose_b=True)
tquery_embedding_reqd = tf.reshape(tquery_embedding_norm_dim - (cosine_need*tquery_embedding_norm_dim),shape=[1,embedding_size])
query_avail_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,avail_constant),axis=0),shape=[1,embedding_size])
query_norm = tf.sqrt(tf.reduce_sum(tf.square(query_avail_embedding),1,keep_dims=True))
query_avail_embedding_norm = query_embedding / query_norm
cosine_avail = tf.matmul(tweet_embedding, query_avail_embedding_norm, transpose_b=True)
reduced_tweet_embedding = tweet_embedding - (tweet_embedding*cosine_avail)
match_similarity = tf.reshape(tf.matmul(reduced_tweet_embedding, tquery_embedding_reqd, transpose_b=True),shape=[tweet_batch_size],name="match_similarity")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
expand_count = 3
with tf.Session(graph=graph) as session:
init.run()
count = 0
print("Initialized")
generators = [generate_batch, generate_batch_char]
similarities = [similarity, similarity_char]
placeholders = [[train_inputs,train_labels],[train_input_chars,train_char_labels]]
losses = [loss, loss_char]
optimizers = [optimizer, optimizer_char]
interval1 = 800
interval2 = 8000
datas = [data,char_data]
data_index = [data_index, char_data_index, buffer_index]
reverse_dictionaries = [reverse_dictionary, reverse_char_dictionary]
if query_type == 0:
query_name = 'Need'
else :
query_name = 'Avail'
print(query_tokens)
print(query_name)
count_ = train_model(session, dataset,query_similarity, query_tokens, query_ints, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps, placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size)
placeholders += [[train_inputs, word_char_embeddings, train_labels]]
losses += [loss_char_train]
optimizers += [optimizer_train]
datas += [[word_batch_list, char_batch_list]]
count_ = train_model(session, dataset,query_similarity, query_tokens ,query_ints, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps_roll, placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size, count_)
expanded_query_tokens, expanded_query_holder, final_query_similarity= expand_query(expand_flag, session,query_ints, np.array(query_tokens),dataset ,similarity_query, word_batch_dict, 100, query_ints, expanded_query_ints, query_similarity, expanded_query_similarity, expand_start_count, expand_count)
expanded_query_tokens = query_tokens + expanded_query_tokens
print(expanded_query_tokens)
count_ = train_model(session, dataset, final_query_similarity, expanded_query_tokens, expanded_query_holder, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps_train , placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size, count_)
folder_name = './%s/%s/'%(dataset, query_type)
final_embeddings = normalized_embeddings.eval()
final_char_embedding = normalized_char_embeddings.eval()
np.save('../results/%s/%s/%s_word_embeddings.npy'%(dataset, query_name, filename), final_embeddings)
np.save('../results/%s/%s/%s_char_embeddings.npy'%(dataset, query_name, filename), final_char_embedding)
saver.save(session, '../results/%s/%s/%s_model.ckpt'%(dataset, query_name, filename))
| true | true |
f72f5be70b1693d04dec3b426552809513564ddf | 1,188 | py | Python | Python-Basic/open-data.py | dicksonlam1011/python-training | ea43cae118081e607f7767c165eeda64b2a64d77 | [
"MIT"
] | null | null | null | Python-Basic/open-data.py | dicksonlam1011/python-training | ea43cae118081e607f7767c165eeda64b2a64d77 | [
"MIT"
] | null | null | null | Python-Basic/open-data.py | dicksonlam1011/python-training | ea43cae118081e607f7767c165eeda64b2a64d77 | [
"MIT"
] | null | null | null | # Connect to the internet (webpage)
import urllib.request as request
import json
##########################################################################################
# Set Proxy for Urllib
proxy="https://llm234:85167787887Ss!@proxy.ha.org.hk:8080"
proxy_support=request.ProxyHandler({'https':proxy}) # Build ProxyHandler object by given proxy
opener = request.build_opener(proxy_support) # Build opener with ProxyHandler object
request.install_opener(opener) # Install opener to request
# r = urllib.request.urlopen('http://icanhazip.com',timeout = 1000) # Open url
##########################################################################################
# src="https://www.ntu.edu.tw/" # Blocked by HA Proxy
# src="https://ha.home/visitor/"
src="https://geodata.gov.hk/gs/api/v1.0.0/locationSearch?q=cultural%20centre"
with request.urlopen(src) as response: # Get raw html code
# data=response.read()
#data=response.read().decode("utf-8")
data=json.load(response)
# print(data)
with open("data.txt","w",encoding="utf-8") as file: # Write the info into a file
for clist in data:
file.write(clist["addressZH"]+"\n")
#print(clist["addressZH"])
| 39.6 | 94 | 0.608586 |
import urllib.request as request
import json
| true | true |
f72f5c5da9321919ee315fed0547bbb36ffd0ef4 | 1,569 | py | Python | devind_helpers/utils.py | devind-team/devind-django-helpers | 5c64d46a12802bbe0b70e44aa9d19bf975511b6e | [
"MIT"
] | null | null | null | devind_helpers/utils.py | devind-team/devind-django-helpers | 5c64d46a12802bbe0b70e44aa9d19bf975511b6e | [
"MIT"
] | 4 | 2022-02-18T09:24:05.000Z | 2022-03-31T16:46:29.000Z | devind_helpers/utils.py | devind-team/devind-django-helpers | 5c64d46a12802bbe0b70e44aa9d19bf975511b6e | [
"MIT"
] | null | null | null | """Модуль со вспомогательными функциями."""
from argparse import ArgumentTypeError
from random import choice
from string import ascii_letters
from typing import Optional, Tuple, Union
from graphql_relay import from_global_id
def gid2int(gid: Union[str, int]) -> Optional[int]:
try:
return int(gid)
except ValueError:
try:
return int(from_global_id(gid)[1])
except TypeError:
return None
def from_gid_or_none(global_id: Optional[str]) -> Tuple[Optional[str], Optional[int]]:
"""Возвращает None в случае ошибки парсинга."""
if not global_id:
return None, None
try:
return from_global_id(global_id)
except TypeError:
return None, None
def random_string(count: int) -> str:
"""Генерация случайной строки из count."""
return ''.join(choice(ascii_letters) for _ in range(count))
def convert_str_to_bool(value: str) -> bool:
"""Преобразование строки в флаг."""
if isinstance(value, bool):
return value
if value.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif value.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ArgumentTypeError('Ожидался флаг (true/false)')
def convert_str_to_int(value: Optional[Union[str, bytes]]) -> Optional[int]:
"""Преобразование строки в целое число."""
if value is None:
return None
if isinstance(value, bytes):
value = value.decode('utf-8')
try:
return int(value)
except ValueError:
return None
| 27.051724 | 86 | 0.643722 |
from argparse import ArgumentTypeError
from random import choice
from string import ascii_letters
from typing import Optional, Tuple, Union
from graphql_relay import from_global_id
def gid2int(gid: Union[str, int]) -> Optional[int]:
try:
return int(gid)
except ValueError:
try:
return int(from_global_id(gid)[1])
except TypeError:
return None
def from_gid_or_none(global_id: Optional[str]) -> Tuple[Optional[str], Optional[int]]:
if not global_id:
return None, None
try:
return from_global_id(global_id)
except TypeError:
return None, None
def random_string(count: int) -> str:
return ''.join(choice(ascii_letters) for _ in range(count))
def convert_str_to_bool(value: str) -> bool:
if isinstance(value, bool):
return value
if value.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif value.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ArgumentTypeError('Ожидался флаг (true/false)')
def convert_str_to_int(value: Optional[Union[str, bytes]]) -> Optional[int]:
if value is None:
return None
if isinstance(value, bytes):
value = value.decode('utf-8')
try:
return int(value)
except ValueError:
return None
| true | true |
f72f5d5830c20fa73cabbf8a7fe2538bbbde66c2 | 1,208 | py | Python | src/tests/base/__init__.py | abrock/pretix | cd9c048458afce1198276e5936bf583578855a4f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-06-23T07:44:54.000Z | 2021-06-23T07:44:54.000Z | src/tests/base/__init__.py | awg24/pretix | b1d67a48601838bac0d4e498cbe8bdcd16013d60 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/base/__init__.py | awg24/pretix | b1d67a48601838bac0d4e498cbe8bdcd16013d60 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import os
import sys
import time
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
# could use Chrome, Firefox, etc... here
BROWSER = os.environ.get('TEST_BROWSER', 'PhantomJS')
class BrowserTest(StaticLiveServerTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
settings.DEBUG = ('--debug' in sys.argv)
def setUp(self):
if hasattr(webdriver, BROWSER):
self.driver = getattr(webdriver, BROWSER)()
else:
self.driver = webdriver.Remote(
desired_capabilities=webdriver.DesiredCapabilities.CHROME,
command_executor=BROWSER
)
self.driver.set_window_size(1920, 1080)
self.driver.implicitly_wait(10)
def tearDown(self):
self.driver.quit()
def scroll_into_view(self, element):
"""Scroll element into view"""
y = element.location['y']
self.driver.execute_script('window.scrollTo(0, {0})'.format(y))
def scroll_and_click(self, element):
self.scroll_into_view(element)
time.sleep(0.5)
element.click()
| 28.761905 | 74 | 0.653974 | import os
import sys
import time
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
BROWSER = os.environ.get('TEST_BROWSER', 'PhantomJS')
class BrowserTest(StaticLiveServerTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
settings.DEBUG = ('--debug' in sys.argv)
def setUp(self):
if hasattr(webdriver, BROWSER):
self.driver = getattr(webdriver, BROWSER)()
else:
self.driver = webdriver.Remote(
desired_capabilities=webdriver.DesiredCapabilities.CHROME,
command_executor=BROWSER
)
self.driver.set_window_size(1920, 1080)
self.driver.implicitly_wait(10)
def tearDown(self):
self.driver.quit()
def scroll_into_view(self, element):
y = element.location['y']
self.driver.execute_script('window.scrollTo(0, {0})'.format(y))
def scroll_and_click(self, element):
self.scroll_into_view(element)
time.sleep(0.5)
element.click()
| true | true |
f72f5da82bf29e795a9aadab84dc4c93b8d191f1 | 3,776 | py | Python | pyro/distributions/relaxed_straight_through.py | ruohoruotsi/pyro | b54a4b42b9474eb3ecee11505e45fde85b1cdc54 | [
"MIT"
] | null | null | null | pyro/distributions/relaxed_straight_through.py | ruohoruotsi/pyro | b54a4b42b9474eb3ecee11505e45fde85b1cdc54 | [
"MIT"
] | null | null | null | pyro/distributions/relaxed_straight_through.py | ruohoruotsi/pyro | b54a4b42b9474eb3ecee11505e45fde85b1cdc54 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import torch
from pyro.distributions.torch import RelaxedOneHotCategorical, RelaxedBernoulli
from pyro.distributions.util import copy_docs_from
from torch.distributions.utils import clamp_probs
@copy_docs_from(RelaxedOneHotCategorical)
class RelaxedOneHotCategoricalStraightThrough(RelaxedOneHotCategorical):
"""
An implementation of
:class:`~torch.distributions.relaxed_categorical.RelaxedOneHotCategorical`
with a straight-through gradient estimator.
This distribution has the following properties:
- The samples returned by the :meth:`rsample` method are discrete/quantized.
- The :meth:`log_prob` method returns the log probability of the
relaxed/unquantized sample using the GumbelSoftmax distribution.
- In the backward pass the gradient of the sample with respect to the
parameters of the distribution uses the relaxed/unquantized sample.
References:
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables,
Chris J. Maddison, Andriy Mnih, Yee Whye Teh
[2] Categorical Reparameterization with Gumbel-Softmax,
Eric Jang, Shixiang Gu, Ben Poole
"""
def rsample(self, sample_shape=torch.Size()):
soft_sample = super(RelaxedOneHotCategoricalStraightThrough, self).rsample(sample_shape)
soft_sample = clamp_probs(soft_sample)
hard_sample = QuantizeCategorical.apply(soft_sample)
return hard_sample
def log_prob(self, value):
value = getattr(value, '_unquantize', value)
return super(RelaxedOneHotCategoricalStraightThrough, self).log_prob(value)
class QuantizeCategorical(torch.autograd.Function):
@staticmethod
def forward(ctx, soft_value):
argmax = soft_value.max(-1)[1]
hard_value = torch.zeros_like(soft_value)
hard_value._unquantize = soft_value
if argmax.dim() < hard_value.dim():
argmax = argmax.unsqueeze(-1)
return hard_value.scatter_(-1, argmax, 1)
@staticmethod
def backward(ctx, grad):
return grad
@copy_docs_from(RelaxedBernoulli)
class RelaxedBernoulliStraightThrough(RelaxedBernoulli):
"""
An implementation of
:class:`~torch.distributions.relaxed_bernoulli.RelaxedBernoulli`
with a straight-through gradient estimator.
This distribution has the following properties:
- The samples returned by the :meth:`rsample` method are discrete/quantized.
- The :meth:`log_prob` method returns the log probability of the
relaxed/unquantized sample using the GumbelSoftmax distribution.
- In the backward pass the gradient of the sample with respect to the
parameters of the distribution uses the relaxed/unquantized sample.
References:
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables,
Chris J. Maddison, Andriy Mnih, Yee Whye Teh
[2] Categorical Reparameterization with Gumbel-Softmax,
Eric Jang, Shixiang Gu, Ben Poole
"""
def rsample(self, sample_shape=torch.Size()):
soft_sample = super(RelaxedBernoulliStraightThrough, self).rsample(sample_shape)
soft_sample = clamp_probs(soft_sample)
hard_sample = QuantizeBernoulli.apply(soft_sample)
return hard_sample
def log_prob(self, value):
value = getattr(value, '_unquantize', value)
return super(RelaxedBernoulliStraightThrough, self).log_prob(value)
class QuantizeBernoulli(torch.autograd.Function):
@staticmethod
def forward(ctx, soft_value):
hard_value = soft_value.round()
hard_value._unquantize = soft_value
return hard_value
@staticmethod
def backward(ctx, grad):
return grad
| 37.386139 | 96 | 0.736229 | from __future__ import absolute_import, division, print_function
import torch
from pyro.distributions.torch import RelaxedOneHotCategorical, RelaxedBernoulli
from pyro.distributions.util import copy_docs_from
from torch.distributions.utils import clamp_probs
@copy_docs_from(RelaxedOneHotCategorical)
class RelaxedOneHotCategoricalStraightThrough(RelaxedOneHotCategorical):
def rsample(self, sample_shape=torch.Size()):
soft_sample = super(RelaxedOneHotCategoricalStraightThrough, self).rsample(sample_shape)
soft_sample = clamp_probs(soft_sample)
hard_sample = QuantizeCategorical.apply(soft_sample)
return hard_sample
def log_prob(self, value):
value = getattr(value, '_unquantize', value)
return super(RelaxedOneHotCategoricalStraightThrough, self).log_prob(value)
class QuantizeCategorical(torch.autograd.Function):
@staticmethod
def forward(ctx, soft_value):
argmax = soft_value.max(-1)[1]
hard_value = torch.zeros_like(soft_value)
hard_value._unquantize = soft_value
if argmax.dim() < hard_value.dim():
argmax = argmax.unsqueeze(-1)
return hard_value.scatter_(-1, argmax, 1)
@staticmethod
def backward(ctx, grad):
return grad
@copy_docs_from(RelaxedBernoulli)
class RelaxedBernoulliStraightThrough(RelaxedBernoulli):
def rsample(self, sample_shape=torch.Size()):
soft_sample = super(RelaxedBernoulliStraightThrough, self).rsample(sample_shape)
soft_sample = clamp_probs(soft_sample)
hard_sample = QuantizeBernoulli.apply(soft_sample)
return hard_sample
def log_prob(self, value):
value = getattr(value, '_unquantize', value)
return super(RelaxedBernoulliStraightThrough, self).log_prob(value)
class QuantizeBernoulli(torch.autograd.Function):
@staticmethod
def forward(ctx, soft_value):
hard_value = soft_value.round()
hard_value._unquantize = soft_value
return hard_value
@staticmethod
def backward(ctx, grad):
return grad
| true | true |
f72f5f8cda52fd057ea9bad187ca38a2d6bf5f2a | 1,783 | py | Python | compsocsite/appauth/urls.py | ReedyChen/opra | 86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f | [
"MIT"
] | 8 | 2017-03-07T19:46:51.000Z | 2021-06-01T01:41:37.000Z | compsocsite/appauth/urls.py | ReedyChen/opra | 86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f | [
"MIT"
] | null | null | null | compsocsite/appauth/urls.py | ReedyChen/opra | 86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f | [
"MIT"
] | 9 | 2016-06-09T03:36:20.000Z | 2019-09-11T20:56:23.000Z | from django.conf.urls import url
#import cas.middleware
from . import views
app_name = 'appauth'
urlpatterns = [
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^login/forgetpassword/$', views.forgetPassword, name='forgetpassword'),
url(r'^login/forgetpasswordview/$', views.forgetPasswordView, name='forgetpasswordview'),
url(r'^resetpassword/(?P<key>\w+)/$', views.resetPage, name='resetpasswordview'),
url(r'^resetpassword/change/(?P<key>\w+)/$', views.resetPassword, name='resetpassword'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^settings/$', views.displaySettings, name='settings'),
url(r'^passwordpage/$', views.changePasswordView, name='passwordpage'),
url(r'^passwordpage/changepassword/$', views.changepassword, name='changepassword'),
url(r'^settings/global/$', views.globalSettings, name='globalSettings'),
url(r'^settings/update/$', views.updateSettings, name='updateSettings'),
url(r'^settings/diablehint/$', views.disableHint, name='disableHint'),
url(r'^settings/update/global$', views.updateGlobalSettings, name='updateGlobalSettings'),
url(r'^register/confirm/(?P<key>\w+)/$',views.confirm, name='confirm'),
url(r'^messages$', views.MessageView.as_view(), name='messages'),
url(r'^(?P<question_id>[0-9]+)/quickregister/$', views.quickRegister, name='quickregister'),
url(r'^(?P<question_id>[0-9]+)/quickconfirm/(?P<key>\w+)/$', views.quickConfirm, name='quickconfirm'),
url(r'^(?P<key>\w+)/(?P<question_id>[0-9]+)/quicklogin/$', views.quickLogin, name='quickLogin'),
url(r'^createmturk/$', views.createMturkUser, name='createmturk'),
url(r'^resetfinish/$', views.resetAllFinish, name='resetfinish'),
] | 61.482759 | 106 | 0.693214 | from django.conf.urls import url
from . import views
app_name = 'appauth'
urlpatterns = [
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^login/forgetpassword/$', views.forgetPassword, name='forgetpassword'),
url(r'^login/forgetpasswordview/$', views.forgetPasswordView, name='forgetpasswordview'),
url(r'^resetpassword/(?P<key>\w+)/$', views.resetPage, name='resetpasswordview'),
url(r'^resetpassword/change/(?P<key>\w+)/$', views.resetPassword, name='resetpassword'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^settings/$', views.displaySettings, name='settings'),
url(r'^passwordpage/$', views.changePasswordView, name='passwordpage'),
url(r'^passwordpage/changepassword/$', views.changepassword, name='changepassword'),
url(r'^settings/global/$', views.globalSettings, name='globalSettings'),
url(r'^settings/update/$', views.updateSettings, name='updateSettings'),
url(r'^settings/diablehint/$', views.disableHint, name='disableHint'),
url(r'^settings/update/global$', views.updateGlobalSettings, name='updateGlobalSettings'),
url(r'^register/confirm/(?P<key>\w+)/$',views.confirm, name='confirm'),
url(r'^messages$', views.MessageView.as_view(), name='messages'),
url(r'^(?P<question_id>[0-9]+)/quickregister/$', views.quickRegister, name='quickregister'),
url(r'^(?P<question_id>[0-9]+)/quickconfirm/(?P<key>\w+)/$', views.quickConfirm, name='quickconfirm'),
url(r'^(?P<key>\w+)/(?P<question_id>[0-9]+)/quicklogin/$', views.quickLogin, name='quickLogin'),
url(r'^createmturk/$', views.createMturkUser, name='createmturk'),
url(r'^resetfinish/$', views.resetAllFinish, name='resetfinish'),
] | true | true |
f72f5ffdbac0e1ccb48e304d2117456cfe4a1190 | 1,709 | py | Python | app/core/migrations/0001_initial.py | rahulnegi20/recipie-api | 9e99d3c6803fe87cb64eae6980e9e0817f643f5f | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | rahulnegi20/recipie-api | 9e99d3c6803fe87cb64eae6980e9e0817f643f5f | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | rahulnegi20/recipie-api | 9e99d3c6803fe87cb64eae6980e9e0817f643f5f | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2021-04-08 16:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.264706 | 266 | 0.63897 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true | true |
f72f6072b1970e61514fd8c68fbcfd4ee3ad6383 | 479 | py | Python | data/scripts/templates/object/draft_schematic/space/weapon/missile/shared_countermeasure_decoy_launcher.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/draft_schematic/space/weapon/missile/shared_countermeasure_decoy_launcher.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/draft_schematic/space/weapon/missile/shared_countermeasure_decoy_launcher.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/weapon/missile/shared_countermeasure_decoy_launcher.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 28.176471 | 105 | 0.741127 | true | true | |
f72f6097ff6b8bf50e05a8eb8b55115b810bdf61 | 2,663 | py | Python | flash/__init__.py | ibraheemmmoosa/lightning-flash | c60fef81b27174543d7ad3a4d841faf71ad8536c | [
"Apache-2.0"
] | 2 | 2021-06-25T08:42:36.000Z | 2021-06-25T08:49:29.000Z | flash/__init__.py | edenlightning/lightning-flash | 841986aa0081bdeaf785d1ed4c48dd108fa69a78 | [
"Apache-2.0"
] | null | null | null | flash/__init__.py | edenlightning/lightning-flash | 841986aa0081bdeaf785d1ed4c48dd108fa69a78 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Root package info."""
import os
__version__ = "0.1.1"
__author__ = "PyTorchLightning et al."
__author_email__ = "name@pytorchlightning.ai"
__license__ = 'Apache-2.0'
__copyright__ = f"Copyright (c) 2020-2021, f{__author__}."
__homepage__ = "https://github.com/PyTorchLightning/lightning-flash"
__docs__ = "Flash is a framework for fast prototyping, finetuning, and solving most standard deep learning challenges"
__long_doc__ = """
Flash is a task-based deep learning framework for flexible deep learning built on PyTorch Lightning.
Tasks can be anything from text classification to object segmentation.
Although PyTorch Lightning provides ultimate flexibility, for common tasks it does not remove 100% of the boilerplate.
Flash is built for applied researchers, beginners, data scientists, Kagglers or anyone starting out with Deep Learning.
But unlike other entry-level frameworks (keras, etc...), Flash users can switch to Lightning trivially when they need
the added flexibility.
"""
_PACKAGE_ROOT = os.path.dirname(__file__)
_PROJECT_ROOT = os.path.dirname(_PACKAGE_ROOT)
try:
# This variable is injected in the __builtins__ by the build process.
# It used to enable importing subpackages when the binaries are not built.
__LIGHTNING_FLASH_SETUP__
except NameError:
__LIGHTNING_FLASH_SETUP__: bool = False
if __LIGHTNING_FLASH_SETUP__:
import sys # pragma: no-cover
sys.stdout.write(f"Partial import of `{__name__}` during the build process.\n") # pragma: no-cover
# We are not importing the rest of the lightning during the build process, as it may not be compiled yet
else:
from flash import tabular, text, vision
from flash.core import data, utils
from flash.core.classification import ClassificationTask
from flash.core.data import DataModule
from flash.core.data.utils import download_data
from flash.core.model import Task
from flash.core.trainer import Trainer
__all__ = [
"Task", "ClassificationTask", "DataModule", "vision", "text", "tabular", "data", "utils", "download_data"
]
| 43.655738 | 119 | 0.763049 |
import os
__version__ = "0.1.1"
__author__ = "PyTorchLightning et al."
__author_email__ = "name@pytorchlightning.ai"
__license__ = 'Apache-2.0'
__copyright__ = f"Copyright (c) 2020-2021, f{__author__}."
__homepage__ = "https://github.com/PyTorchLightning/lightning-flash"
__docs__ = "Flash is a framework for fast prototyping, finetuning, and solving most standard deep learning challenges"
__long_doc__ = """
Flash is a task-based deep learning framework for flexible deep learning built on PyTorch Lightning.
Tasks can be anything from text classification to object segmentation.
Although PyTorch Lightning provides ultimate flexibility, for common tasks it does not remove 100% of the boilerplate.
Flash is built for applied researchers, beginners, data scientists, Kagglers or anyone starting out with Deep Learning.
But unlike other entry-level frameworks (keras, etc...), Flash users can switch to Lightning trivially when they need
the added flexibility.
"""
_PACKAGE_ROOT = os.path.dirname(__file__)
_PROJECT_ROOT = os.path.dirname(_PACKAGE_ROOT)
try:
__LIGHTNING_FLASH_SETUP__
except NameError:
__LIGHTNING_FLASH_SETUP__: bool = False
if __LIGHTNING_FLASH_SETUP__:
import sys
sys.stdout.write(f"Partial import of `{__name__}` during the build process.\n")
else:
from flash import tabular, text, vision
from flash.core import data, utils
from flash.core.classification import ClassificationTask
from flash.core.data import DataModule
from flash.core.data.utils import download_data
from flash.core.model import Task
from flash.core.trainer import Trainer
__all__ = [
"Task", "ClassificationTask", "DataModule", "vision", "text", "tabular", "data", "utils", "download_data"
]
| true | true |
f72f61d6f01b861b890955c4432bbf116ba0dd95 | 85 | py | Python | torchio/transforms/augmentation/__init__.py | siahuat0727/torchio | fc33ea9bd21dc60ba6c5fed46ca96ef63cae2179 | [
"Apache-2.0"
] | 1,340 | 2019-12-03T20:53:17.000Z | 2022-03-30T08:47:54.000Z | torchio/transforms/augmentation/__init__.py | siahuat0727/torchio | fc33ea9bd21dc60ba6c5fed46ca96ef63cae2179 | [
"Apache-2.0"
] | 532 | 2019-12-06T10:37:55.000Z | 2022-03-29T10:14:34.000Z | torchio/transforms/augmentation/__init__.py | siahuat0727/torchio | fc33ea9bd21dc60ba6c5fed46ca96ef63cae2179 | [
"Apache-2.0"
] | 193 | 2019-12-05T10:21:13.000Z | 2022-03-15T22:44:26.000Z | from .random_transform import RandomTransform
__all__ = [
'RandomTransform',
]
| 12.142857 | 45 | 0.741176 | from .random_transform import RandomTransform
__all__ = [
'RandomTransform',
]
| true | true |
f72f61f695ce68f2683e7c2c27eca4c02c50be0a | 1,920 | py | Python | make-test.py | todorokit/tensorflow_cnn_image_sample | 5f8dee00eebcbada9e03de7742026b2a37963860 | [
"Apache-1.1"
] | null | null | null | make-test.py | todorokit/tensorflow_cnn_image_sample | 5f8dee00eebcbada9e03de7742026b2a37963860 | [
"Apache-1.1"
] | null | null | null | make-test.py | todorokit/tensorflow_cnn_image_sample | 5f8dee00eebcbada9e03de7742026b2a37963860 | [
"Apache-1.1"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import random
sourceDir = '/data/deresute-face'
trFile = 'train.txt'
teFile = 'test.txt'
mapFile = 'config/classes.py'
if len(sys.argv) != 3:
print ("usage %s trainNum testNum" % (sys.argv[0]))
exit()
datanum = int(sys.argv[1])
testnum = int(sys.argv[2])
def listClass(dir):
ret = []
for file in os.listdir(dir):
if(file == "." or file == ".."):
# -1 は 全部 0 のラベルにしたかった。
# if(file == "." or file == ".." or file == "-1"):
continue;
ret.append(file)
return ret
def find(dir, dirs):
ret = dirs
for file in os.listdir(dir):
realfile = os.path.join("%s","%s")%(dir,file)
if (os.path.isdir(realfile)):
ret = find(realfile, ret)
else:
ret.append(realfile)
return ret
def ref(dict, key, default):
try:
return dict[key]
except:
return default
def addDict(dict, key):
try:
dict[key] += 1
except:
dict[key] = 1
dirs = listClass(sourceDir)
def getId(className):
return dirs.index(className)
images = find(sourceDir, [])
random.shuffle(images);
fp = open(mapFile, "w")
fp.write("classList = {}\n")
i = 0
for className in dirs:
fp.write("classList[%d] = \"%s\"\n"% (i, className))
i += 1
fp.close()
teFp = open(teFile, "w")
trFp = open(trFile, "w")
limits = {};
limits2 = {};
for image in images:
className = os.path.basename(os.path.dirname(image))
isTest = False
if ref(limits2, className, 0) >= testnum:
continue
elif ref(limits, className, 0) >= datanum:
addDict(limits2, className)
isTest = True
else:
addDict(limits, className)
# if className == "-1":
# continue
id = getId(className);
if isTest:
teFp.write("%s,%d\n" % (image, id));
else:
trFp.write("%s,%d\n" % (image, id));
trFp.close()
teFp.close()
| 20.869565 | 57 | 0.563021 |
import os
import sys
import random
sourceDir = '/data/deresute-face'
trFile = 'train.txt'
teFile = 'test.txt'
mapFile = 'config/classes.py'
if len(sys.argv) != 3:
print ("usage %s trainNum testNum" % (sys.argv[0]))
exit()
datanum = int(sys.argv[1])
testnum = int(sys.argv[2])
def listClass(dir):
ret = []
for file in os.listdir(dir):
if(file == "." or file == ".."):
continue;
ret.append(file)
return ret
def find(dir, dirs):
ret = dirs
for file in os.listdir(dir):
realfile = os.path.join("%s","%s")%(dir,file)
if (os.path.isdir(realfile)):
ret = find(realfile, ret)
else:
ret.append(realfile)
return ret
def ref(dict, key, default):
try:
return dict[key]
except:
return default
def addDict(dict, key):
try:
dict[key] += 1
except:
dict[key] = 1
dirs = listClass(sourceDir)
def getId(className):
return dirs.index(className)
images = find(sourceDir, [])
random.shuffle(images);
fp = open(mapFile, "w")
fp.write("classList = {}\n")
i = 0
for className in dirs:
fp.write("classList[%d] = \"%s\"\n"% (i, className))
i += 1
fp.close()
teFp = open(teFile, "w")
trFp = open(trFile, "w")
limits = {};
limits2 = {};
for image in images:
className = os.path.basename(os.path.dirname(image))
isTest = False
if ref(limits2, className, 0) >= testnum:
continue
elif ref(limits, className, 0) >= datanum:
addDict(limits2, className)
isTest = True
else:
addDict(limits, className)
id = getId(className);
if isTest:
teFp.write("%s,%d\n" % (image, id));
else:
trFp.write("%s,%d\n" % (image, id));
trFp.close()
teFp.close()
| true | true |
f72f62ec9ae934fb7e5b8a0c424979616160c47e | 3,343 | py | Python | src/oci/devops/models/compute_instance_group_failure_policy_by_count.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/devops/models/compute_instance_group_failure_policy_by_count.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/devops/models/compute_instance_group_failure_policy_by_count.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .compute_instance_group_failure_policy import ComputeInstanceGroupFailurePolicy
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ComputeInstanceGroupFailurePolicyByCount(ComputeInstanceGroupFailurePolicy):
"""
Specifies a failure policy by count for a compute instance group rolling deployment stage.
"""
def __init__(self, **kwargs):
"""
Initializes a new ComputeInstanceGroupFailurePolicyByCount object with values from keyword arguments. The default value of the :py:attr:`~oci.devops.models.ComputeInstanceGroupFailurePolicyByCount.policy_type` attribute
of this class is ``COMPUTE_INSTANCE_GROUP_FAILURE_POLICY_BY_COUNT`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param policy_type:
The value to assign to the policy_type property of this ComputeInstanceGroupFailurePolicyByCount.
Allowed values for this property are: "COMPUTE_INSTANCE_GROUP_FAILURE_POLICY_BY_COUNT", "COMPUTE_INSTANCE_GROUP_FAILURE_POLICY_BY_PERCENTAGE"
:type policy_type: str
:param failure_count:
The value to assign to the failure_count property of this ComputeInstanceGroupFailurePolicyByCount.
:type failure_count: int
"""
self.swagger_types = {
'policy_type': 'str',
'failure_count': 'int'
}
self.attribute_map = {
'policy_type': 'policyType',
'failure_count': 'failureCount'
}
self._policy_type = None
self._failure_count = None
self._policy_type = 'COMPUTE_INSTANCE_GROUP_FAILURE_POLICY_BY_COUNT'
@property
def failure_count(self):
"""
**[Required]** Gets the failure_count of this ComputeInstanceGroupFailurePolicyByCount.
The threshold count of failed instances in the group, which when reached or exceeded sets the stage as FAILED.
:return: The failure_count of this ComputeInstanceGroupFailurePolicyByCount.
:rtype: int
"""
return self._failure_count
@failure_count.setter
def failure_count(self, failure_count):
"""
Sets the failure_count of this ComputeInstanceGroupFailurePolicyByCount.
The threshold count of failed instances in the group, which when reached or exceeded sets the stage as FAILED.
:param failure_count: The failure_count of this ComputeInstanceGroupFailurePolicyByCount.
:type: int
"""
self._failure_count = failure_count
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 41.271605 | 245 | 0.718516 |
from .compute_instance_group_failure_policy import ComputeInstanceGroupFailurePolicy
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ComputeInstanceGroupFailurePolicyByCount(ComputeInstanceGroupFailurePolicy):
def __init__(self, **kwargs):
self.swagger_types = {
'policy_type': 'str',
'failure_count': 'int'
}
self.attribute_map = {
'policy_type': 'policyType',
'failure_count': 'failureCount'
}
self._policy_type = None
self._failure_count = None
self._policy_type = 'COMPUTE_INSTANCE_GROUP_FAILURE_POLICY_BY_COUNT'
@property
def failure_count(self):
return self._failure_count
@failure_count.setter
def failure_count(self, failure_count):
self._failure_count = failure_count
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72f6465f3130c96ec06ef148837cd9b01ea7937 | 9,957 | py | Python | app/recipe/tests/test_recipe_api.py | catzzz/recipe-app-api | 4da3bcc40da44cf2afa36a7ccf149f2a5f713292 | [
"MIT"
] | null | null | null | app/recipe/tests/test_recipe_api.py | catzzz/recipe-app-api | 4da3bcc40da44cf2afa36a7ccf149f2a5f713292 | [
"MIT"
] | null | null | null | app/recipe/tests/test_recipe_api.py | catzzz/recipe-app-api | 4da3bcc40da44cf2afa36a7ccf149f2a5f713292 | [
"MIT"
] | null | null | null | import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail URL /api/recipe/recipes/id"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main course'):
"""Create and return a smaple tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a smaple ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_required_auth(self):
"""Test the authenticaiton is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test authenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@londonappdev.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
'other@londonappdev.com',
'pass'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Test recipe',
'time_minutes': 30,
'price': 10.00,
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Tag 1')
tag2 = sample_tag(user=self.user, name='Tag 2')
payload = {
'title': 'Test recipe with two tags',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 10.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
payload = {
'title': 'Test recipe with ingredients',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 45,
'price': 15.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updaing a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spahetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('user', 'testpass')
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
# remover all the test system
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test upload an image t orecipe"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
# python seek, back to beginning.
ntf.seek(0)
# mulipart format ,
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
""""Test upload an invlaid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url,{'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and chips')
res = self.client.get(
RECIPES_URL,
{'tags': '{},{}'.format(tag1.id, tag2.id)}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Posh beans on toast')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms')
res = self.client.get(
RECIPES_URL,
{'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| 34.814685 | 78 | 0.645676 | import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main course'):
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_required_auth(self):
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@londonappdev.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
user2 = get_user_model().objects.create_user(
'other@londonappdev.com',
'pass'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
payload = {
'title': 'Test recipe',
'time_minutes': 30,
'price': 10.00,
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
tag1 = sample_tag(user=self.user, name='Tag 1')
tag2 = sample_tag(user=self.user, name='Tag 2')
payload = {
'title': 'Test recipe with two tags',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 10.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
payload = {
'title': 'Test recipe with ingredients',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 45,
'price': 15.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spahetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('user', 'testpass')
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
url = image_upload_url(self.recipe.id)
res = self.client.post(url,{'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and chips')
res = self.client.get(
RECIPES_URL,
{'tags': '{},{}'.format(tag1.id, tag2.id)}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
recipe1 = sample_recipe(user=self.user, title='Posh beans on toast')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms')
res = self.client.get(
RECIPES_URL,
{'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| true | true |
f72f65029479ff22dec38237529146bb2d013655 | 6,561 | py | Python | ltr/dataset/lasot.py | tsingqguo/ABA | c32edbbe5705b0332a08951b5ee436b5f58c2e70 | [
"MIT"
] | 12 | 2021-07-27T07:18:24.000Z | 2022-03-09T13:52:20.000Z | ltr/dataset/lasot.py | tsingqguo/ABA | c32edbbe5705b0332a08951b5ee436b5f58c2e70 | [
"MIT"
] | 2 | 2021-08-03T09:21:33.000Z | 2021-12-29T14:25:30.000Z | ltr/dataset/lasot.py | tsingqguo/ABA | c32edbbe5705b0332a08951b5ee436b5f58c2e70 | [
"MIT"
] | 3 | 2021-11-18T14:46:40.000Z | 2022-01-03T15:47:23.000Z | import os
import os.path
import torch
import numpy as np
import pandas
import csv
import random
from collections import OrderedDict
from .base_video_dataset import BaseVideoDataset
from ltr.data.image_loader import jpeg4py_loader
from ltr.admin.environment import env_settings
class Lasot(BaseVideoDataset):
""" LaSOT dataset.
Publication:
LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking
Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling
CVPR, 2019
https://arxiv.org/pdf/1809.07845.pdf
Download the dataset from https://cis.temple.edu/lasot/download.html
"""
def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):
"""
args:
root - path to the lasot dataset.
image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)
is used by default.
vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the
videos with subscripts -1, -3, and -5 from each class will be used for training.
split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of
vid_ids or split option can be used at a time.
data_fraction - Fraction of dataset to be used. The complete dataset is used by default
"""
root = env_settings().lasot_dir if root is None else root
super().__init__('LaSOT', root, image_loader)
# Keep a list of all classes
self.class_list = [f for f in os.listdir(self.root)]
self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}
self.sequence_list = self._build_sequence_list(vid_ids, split)
if data_fraction is not None:
self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))
self.seq_per_class = self._build_class_list()
def _build_sequence_list(self, vid_ids=None, split=None):
if split is not None:
if vid_ids is not None:
raise ValueError('Cannot set both split_name and vid_ids.')
ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
if split == 'train':
file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')
else:
raise ValueError('Unknown split name.')
sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()
elif vid_ids is not None:
sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]
else:
raise ValueError('Set either split_name or vid_ids.')
return sequence_list
def _build_class_list(self):
seq_per_class = {}
for seq_id, seq_name in enumerate(self.sequence_list):
class_name = seq_name.split('-')[0]
if class_name in seq_per_class:
seq_per_class[class_name].append(seq_id)
else:
seq_per_class[class_name] = [seq_id]
return seq_per_class
def get_name(self):
return 'lasot'
def has_class_info(self):
return True
def has_occlusion_info(self):
return True
def get_num_sequences(self):
return len(self.sequence_list)
def get_num_classes(self):
return len(self.class_list)
def get_sequences_in_class(self, class_name):
return self.seq_per_class[class_name]
def _read_bb_anno(self, seq_path):
bb_anno_file = os.path.join(seq_path, "groundtruth.txt")
gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values
return torch.tensor(gt)
def _read_target_visible(self, seq_path):
# Read full occlusion and out_of_view
occlusion_file = os.path.join(seq_path, "full_occlusion.txt")
out_of_view_file = os.path.join(seq_path, "out_of_view.txt")
with open(occlusion_file, 'r', newline='') as f:
occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])
with open(out_of_view_file, 'r') as f:
out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])
target_visible = ~occlusion & ~out_of_view
return target_visible
def _get_sequence_path(self, seq_id):
seq_name = self.sequence_list[seq_id]
class_name = seq_name.split('-')[0]
vid_id = seq_name.split('-')[1]
return os.path.join(self.root, class_name, class_name + '-' + vid_id)
def get_sequence_info(self, seq_id):
seq_path = self._get_sequence_path(seq_id)
bbox = self._read_bb_anno(seq_path)
valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)
visible = self._read_target_visible(seq_path) & valid.byte()
return {'bbox': bbox, 'valid': valid, 'visible': visible}
def _get_frame_path(self, seq_path, frame_id):
return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1
def _get_frame(self, seq_path, frame_id):
return self.image_loader(self._get_frame_path(seq_path, frame_id))
def _get_class(self, seq_path):
raw_class = seq_path.split('/')[-2]
return raw_class
def get_class_name(self, seq_id):
seq_path = self._get_sequence_path(seq_id)
obj_class = self._get_class(seq_path)
return obj_class
def get_frames(self, seq_id, frame_ids, anno=None):
seq_path = self._get_sequence_path(seq_id)
#print(seq_path)
obj_class = self._get_class(seq_path)
frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]
if anno is None:
anno = self.get_sequence_info(seq_id)
anno_frames = {}
for key, value in anno.items():
anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]
object_meta = OrderedDict({'object_class_name': obj_class,
'motion_class': None,
'major_class': None,
'root_class': None,
'motion_adverb': None})
return frame_list, anno_frames, object_meta
| 38.822485 | 130 | 0.63405 | import os
import os.path
import torch
import numpy as np
import pandas
import csv
import random
from collections import OrderedDict
from .base_video_dataset import BaseVideoDataset
from ltr.data.image_loader import jpeg4py_loader
from ltr.admin.environment import env_settings
class Lasot(BaseVideoDataset):
def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):
root = env_settings().lasot_dir if root is None else root
super().__init__('LaSOT', root, image_loader)
self.class_list = [f for f in os.listdir(self.root)]
self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}
self.sequence_list = self._build_sequence_list(vid_ids, split)
if data_fraction is not None:
self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))
self.seq_per_class = self._build_class_list()
def _build_sequence_list(self, vid_ids=None, split=None):
if split is not None:
if vid_ids is not None:
raise ValueError('Cannot set both split_name and vid_ids.')
ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
if split == 'train':
file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')
else:
raise ValueError('Unknown split name.')
sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()
elif vid_ids is not None:
sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]
else:
raise ValueError('Set either split_name or vid_ids.')
return sequence_list
def _build_class_list(self):
seq_per_class = {}
for seq_id, seq_name in enumerate(self.sequence_list):
class_name = seq_name.split('-')[0]
if class_name in seq_per_class:
seq_per_class[class_name].append(seq_id)
else:
seq_per_class[class_name] = [seq_id]
return seq_per_class
def get_name(self):
return 'lasot'
def has_class_info(self):
return True
def has_occlusion_info(self):
return True
def get_num_sequences(self):
return len(self.sequence_list)
def get_num_classes(self):
return len(self.class_list)
def get_sequences_in_class(self, class_name):
return self.seq_per_class[class_name]
def _read_bb_anno(self, seq_path):
bb_anno_file = os.path.join(seq_path, "groundtruth.txt")
gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values
return torch.tensor(gt)
def _read_target_visible(self, seq_path):
occlusion_file = os.path.join(seq_path, "full_occlusion.txt")
out_of_view_file = os.path.join(seq_path, "out_of_view.txt")
with open(occlusion_file, 'r', newline='') as f:
occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])
with open(out_of_view_file, 'r') as f:
out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])
target_visible = ~occlusion & ~out_of_view
return target_visible
def _get_sequence_path(self, seq_id):
seq_name = self.sequence_list[seq_id]
class_name = seq_name.split('-')[0]
vid_id = seq_name.split('-')[1]
return os.path.join(self.root, class_name, class_name + '-' + vid_id)
def get_sequence_info(self, seq_id):
seq_path = self._get_sequence_path(seq_id)
bbox = self._read_bb_anno(seq_path)
valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)
visible = self._read_target_visible(seq_path) & valid.byte()
return {'bbox': bbox, 'valid': valid, 'visible': visible}
def _get_frame_path(self, seq_path, frame_id):
return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1))
def _get_frame(self, seq_path, frame_id):
return self.image_loader(self._get_frame_path(seq_path, frame_id))
def _get_class(self, seq_path):
raw_class = seq_path.split('/')[-2]
return raw_class
def get_class_name(self, seq_id):
seq_path = self._get_sequence_path(seq_id)
obj_class = self._get_class(seq_path)
return obj_class
def get_frames(self, seq_id, frame_ids, anno=None):
seq_path = self._get_sequence_path(seq_id)
obj_class = self._get_class(seq_path)
frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]
if anno is None:
anno = self.get_sequence_info(seq_id)
anno_frames = {}
for key, value in anno.items():
anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]
object_meta = OrderedDict({'object_class_name': obj_class,
'motion_class': None,
'major_class': None,
'root_class': None,
'motion_adverb': None})
return frame_list, anno_frames, object_meta
| true | true |
f72f653e32796b549710c1e842206983ea462fda | 30,603 | py | Python | model-optimizer/mo/middle/passes/fusing/decomposition_test.py | anton-potapov/openvino | 84119afe9a8c965e0a0cd920fff53aee67b05108 | [
"Apache-2.0"
] | 1 | 2020-06-21T09:51:42.000Z | 2020-06-21T09:51:42.000Z | model-optimizer/mo/middle/passes/fusing/decomposition_test.py | anton-potapov/openvino | 84119afe9a8c965e0a0cd920fff53aee67b05108 | [
"Apache-2.0"
] | 4 | 2021-04-01T08:29:48.000Z | 2021-08-30T16:12:52.000Z | model-optimizer/mo/middle/passes/fusing/decomposition_test.py | anton-potapov/openvino | 84119afe9a8c965e0a0cd920fff53aee67b05108 | [
"Apache-2.0"
] | 3 | 2021-03-09T08:27:29.000Z | 2021-04-07T04:58:54.000Z | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph
nodes_attributes = {
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# ScaleShift layer
'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift', 'axis': 0},
'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul and Add operations
'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_1': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul and Add operations
'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_2': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_2_data': {'value': None, 'shape': None, 'kind': 'data'},
# Reshape
'placeholder_2/Reshape_': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'placeholder_2/Reshape_data': {'value': None, 'shape': None, 'kind': 'data'},
'placeholder_2/Reshape_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None},
'placeholder_2/Reshape_const_data': {'kind': 'data', 'value': None, 'shape': None},
# BatchNorm operation
'bn_op': {'type': None, 'kind': 'op', 'op': 'BatchNorm', 'can_be_fused': True},
'const_bn_const': {'value': None, 'shape': None, 'kind': 'op'},
'bn_const': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_beta': {'value': None, 'shape': None, 'kind': 'op'},
'bn_beta': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_mean': {'value': None, 'shape': None, 'kind': 'op'},
'bn_mean': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_var': {'value': None, 'shape': None, 'kind': 'op'},
'bn_var': {'value': None, 'shape': None, 'kind': 'data'},
'bn_data': {'value': None, 'shape': None, 'kind': 'data'},
# Concat1 operation
'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
'concat_data': {'value': None, 'shape': None, 'kind': 'data'},
'op_output': {'kind': 'op', 'op': 'Result'}
}
class ScaleShiftToMulAdd(unittest.TestCase):
# ScaleShift -> Mul
def test_scaleshift_to_mul_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift 2 inputs-> Mul
def test_scaleshift2_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift 2 inputs-> Mul (axis = 1)
def test_scaleshift2_axis1_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'scaleshift_1': {'axis': 1},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> Mul (Zero biases)
def test_scaleshift_to_mul_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> Mul->Add
def test_scaleshift_to_mul_add(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
'add_1': {'can_be_fused': True},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> None (Zero weights and biases)
def test_scaleshift_to_nothing(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}}
,nodes_with_edges_only=True)
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> ScaleShift (can_be_fused=False)
def test_scaleshift_can_be_fused(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1': {'can_be_fused': False},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'const_scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1': {'can_be_fused': False},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
})
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
self.assertTrue(flag, resp)
class BatchNormDecomposition(unittest.TestCase):
def test_bn_decomposition_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'bn_op'),
('const_bn_const', 'bn_const'),
('const_bn_beta', 'bn_beta'),
('const_bn_mean', 'bn_mean'),
('const_bn_var', 'bn_var'),
('bn_const', 'bn_op'),
('bn_beta', 'bn_op'),
('bn_mean', 'bn_op'),
('bn_var', 'bn_op'),
('bn_op', 'bn_data'),
('concat', 'concat_data'),
('bn_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'bn_op': {'eps': 1.2},
'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_data': {'shape': np.array([1, 227, 227, 3])},
'concat_data': {}
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'mul_2'),
('const_mul_2_w', 'mul_2_w'),
('mul_2_w', 'mul_2'),
('mul_2', 'mul_2_data'),
('mul_2_data', 'add_2'),
('const_add_2_w', 'add_2_w'),
('add_2_w', 'add_2'),
('add_2', 'add_2_data'),
('concat', 'concat_data'),
('add_2_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_data': {'shape': np.array([1, 227, 227, 3])},
'mul_1': {'can_be_fused': True},
'mul_2': {'can_be_fused': True},
'add_1': {'can_be_fused': True},
'add_2': {'can_be_fused': True},
'concat_data': {}
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NHWC'
convert_batch_norm(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
self.assertTrue(flag, resp)
# 'can_be_fused': False for BatchNorm
def test_bn_decomposition_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'bn_op'),
('const_bn_const', 'bn_const'),
('const_bn_beta', 'bn_beta'),
('const_bn_mean', 'bn_mean'),
('const_bn_var', 'bn_var'),
('bn_const', 'bn_op'),
('bn_beta', 'bn_op'),
('bn_mean', 'bn_op'),
('bn_var', 'bn_op'),
('bn_op', 'bn_data'),
('concat', 'concat_data'),
('bn_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'bn_op': {'eps': 1.2, 'can_be_fused': False},
'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_data': {'shape': np.array([1, 227, 227, 3])},
'concat_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'mul_2'),
('const_mul_2_w', 'mul_2_w'),
('mul_2_w', 'mul_2'),
('mul_2', 'mul_2_data'),
('mul_2_data', 'add_2'),
('const_add_2_w', 'add_2_w'),
('add_2_w', 'add_2'),
('add_2', 'add_2_data'),
('concat', 'concat_data'),
('add_2_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_data': {'shape': np.array([1, 227, 227, 3])},
'mul_1': {'can_be_fused': False},
'mul_2': {'can_be_fused': False},
'add_1': {'can_be_fused': False},
'add_2': {'can_be_fused': False},
'concat_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_batch_norm(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
self.assertTrue(flag, resp) | 59.30814 | 119 | 0.424109 |
import unittest
import numpy as np
from mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph
nodes_attributes = {
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift', 'axis': 0},
'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_1': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_2': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'placeholder_2/Reshape_': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'placeholder_2/Reshape_data': {'value': None, 'shape': None, 'kind': 'data'},
'placeholder_2/Reshape_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None},
'placeholder_2/Reshape_const_data': {'kind': 'data', 'value': None, 'shape': None},
'bn_op': {'type': None, 'kind': 'op', 'op': 'BatchNorm', 'can_be_fused': True},
'const_bn_const': {'value': None, 'shape': None, 'kind': 'op'},
'bn_const': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_beta': {'value': None, 'shape': None, 'kind': 'op'},
'bn_beta': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_mean': {'value': None, 'shape': None, 'kind': 'op'},
'bn_mean': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_var': {'value': None, 'shape': None, 'kind': 'op'},
'bn_var': {'value': None, 'shape': None, 'kind': 'data'},
'bn_data': {'value': None, 'shape': None, 'kind': 'data'},
'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
'concat_data': {'value': None, 'shape': None, 'kind': 'data'},
'op_output': {'kind': 'op', 'op': 'Result'}
}
class ScaleShiftToMulAdd(unittest.TestCase):
def test_scaleshift_to_mul_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
def test_scaleshift2_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
def test_scaleshift2_axis1_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'scaleshift_1': {'axis': 1},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
def test_scaleshift_to_mul_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
def test_scaleshift_to_mul_add(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
'add_1': {'can_be_fused': True},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
def test_scaleshift_to_nothing(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}}
,nodes_with_edges_only=True)
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
def test_scaleshift_can_be_fused(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1': {'can_be_fused': False},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'const_scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1': {'can_be_fused': False},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
})
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
self.assertTrue(flag, resp)
class BatchNormDecomposition(unittest.TestCase):
def test_bn_decomposition_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'bn_op'),
('const_bn_const', 'bn_const'),
('const_bn_beta', 'bn_beta'),
('const_bn_mean', 'bn_mean'),
('const_bn_var', 'bn_var'),
('bn_const', 'bn_op'),
('bn_beta', 'bn_op'),
('bn_mean', 'bn_op'),
('bn_var', 'bn_op'),
('bn_op', 'bn_data'),
('concat', 'concat_data'),
('bn_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'bn_op': {'eps': 1.2},
'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_data': {'shape': np.array([1, 227, 227, 3])},
'concat_data': {}
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'mul_2'),
('const_mul_2_w', 'mul_2_w'),
('mul_2_w', 'mul_2'),
('mul_2', 'mul_2_data'),
('mul_2_data', 'add_2'),
('const_add_2_w', 'add_2_w'),
('add_2_w', 'add_2'),
('add_2', 'add_2_data'),
('concat', 'concat_data'),
('add_2_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_data': {'shape': np.array([1, 227, 227, 3])},
'mul_1': {'can_be_fused': True},
'mul_2': {'can_be_fused': True},
'add_1': {'can_be_fused': True},
'add_2': {'can_be_fused': True},
'concat_data': {}
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NHWC'
convert_batch_norm(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
self.assertTrue(flag, resp)
def test_bn_decomposition_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'bn_op'),
('const_bn_const', 'bn_const'),
('const_bn_beta', 'bn_beta'),
('const_bn_mean', 'bn_mean'),
('const_bn_var', 'bn_var'),
('bn_const', 'bn_op'),
('bn_beta', 'bn_op'),
('bn_mean', 'bn_op'),
('bn_var', 'bn_op'),
('bn_op', 'bn_data'),
('concat', 'concat_data'),
('bn_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'bn_op': {'eps': 1.2, 'can_be_fused': False},
'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_data': {'shape': np.array([1, 227, 227, 3])},
'concat_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'mul_2'),
('const_mul_2_w', 'mul_2_w'),
('mul_2_w', 'mul_2'),
('mul_2', 'mul_2_data'),
('mul_2_data', 'add_2'),
('const_add_2_w', 'add_2_w'),
('add_2_w', 'add_2'),
('add_2', 'add_2_data'),
('concat', 'concat_data'),
('add_2_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_data': {'shape': np.array([1, 227, 227, 3])},
'mul_1': {'can_be_fused': False},
'mul_2': {'can_be_fused': False},
'add_1': {'can_be_fused': False},
'add_2': {'can_be_fused': False},
'concat_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_batch_norm(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
self.assertTrue(flag, resp) | true | true |
f72f6568dca744bd5665beedd6e8518fdef41379 | 417 | py | Python | shopping_mall_server/wsgi.py | ninemilli-song/shopping-mall-server | 140fbcba2afbab59eda6ca95da59a5ba6945beb4 | [
"MIT"
] | null | null | null | shopping_mall_server/wsgi.py | ninemilli-song/shopping-mall-server | 140fbcba2afbab59eda6ca95da59a5ba6945beb4 | [
"MIT"
] | 2 | 2019-02-12T02:06:53.000Z | 2019-02-12T04:10:10.000Z | shopping_mall_server/wsgi.py | ninemilli-song/shopping-mall-server | 140fbcba2afbab59eda6ca95da59a5ba6945beb4 | [
"MIT"
] | null | null | null | """
WSGI config for shopping_mall_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopping_mall_server.settings')
application = get_wsgi_application()
| 24.529412 | 80 | 0.798561 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopping_mall_server.settings')
application = get_wsgi_application()
| true | true |
f72f66a98c1753c49fed08e7baf30b625045fe66 | 6,505 | py | Python | tests/conftest.py | omstrumpf/redis-py | a890532cbb93a85f6ae0bc23e014e6749d39b43b | [
"MIT"
] | null | null | null | tests/conftest.py | omstrumpf/redis-py | a890532cbb93a85f6ae0bc23e014e6749d39b43b | [
"MIT"
] | null | null | null | tests/conftest.py | omstrumpf/redis-py | a890532cbb93a85f6ae0bc23e014e6749d39b43b | [
"MIT"
] | null | null | null | import pytest
import redis
from mock import Mock
from distutils.version import StrictVersion
REDIS_INFO = {}
default_redis_host = "localhost"
default_redis_port = "6379"
default_cluster_master_host = "127.0.0.1"
default_cluster_master_port = "6379"
def pytest_addoption(parser):
parser.addoption('--redis-host', default=default_redis_host,
action="store",
help="Redis hostname,"
" defaults to `%(default)s`")
parser.addoption('--redis-port', default=default_redis_port,
action="store",
help="Redis port,"
" defaults to `%(default)s`")
parser.addoption('--cluster-master-host',
default=default_cluster_master_host,
action="store",
help="Hostname of cluster master,"
" defaults to `%(default)s`")
parser.addoption('--cluster-master-port',
default=default_cluster_master_port,
action="store",
help="Port of cluster master,"
" defaults to `%(default)s`")
def _build_redis_url(redis_host, redis_port):
return "redis://{}:{}/9".format(redis_host, redis_port)
def _get_info(redis_url):
client = redis.Redis.from_url(redis_url)
info = client.info()
client.connection_pool.disconnect()
return info
def pytest_sessionstart(session):
redis_host = session.config.getoption("--redis-host")
redis_port = session.config.getoption("--redis-port")
redis_url = _build_redis_url(redis_host, redis_port)
info = _get_info(redis_url)
version = info["redis_version"]
arch_bits = info["arch_bits"]
REDIS_INFO["version"] = version
REDIS_INFO["arch_bits"] = arch_bits
def skip_if_server_version_lt(min_version):
redis_version = REDIS_INFO["version"]
check = StrictVersion(redis_version) < StrictVersion(min_version)
return pytest.mark.skipif(
check,
reason="Redis version required >= {}".format(min_version))
def skip_if_server_version_gte(min_version):
redis_version = REDIS_INFO["version"]
check = StrictVersion(redis_version) >= StrictVersion(min_version)
return pytest.mark.skipif(
check,
reason="Redis version required < {}".format(min_version))
def skip_unless_arch_bits(arch_bits):
return pytest.mark.skipif(REDIS_INFO["arch_bits"] != arch_bits,
reason="server is not {}-bit".format(arch_bits))
def _get_client(cls, request, single_connection_client=True, **kwargs):
redis_host = request.config.getoption("--redis-host")
redis_port = request.config.getoption("--redis-port")
redis_url = _build_redis_url(redis_host, redis_port)
client = cls.from_url(redis_url, **kwargs)
if single_connection_client:
client = client.client()
if request:
def teardown():
try:
client.flushdb()
except redis.ConnectionError:
# handle cases where a test disconnected a client
# just manually retry the flushdb
client.flushdb()
client.close()
client.connection_pool.disconnect()
request.addfinalizer(teardown)
return client
@pytest.fixture()
def r(request):
with _get_client(redis.Redis, request) as client:
yield client
@pytest.fixture()
def r2(request):
"A second client for tests that need multiple"
with _get_client(redis.Redis, request) as client:
yield client
@pytest.fixture()
def redis_host(request):
return request.config.getoption("--redis-host")
@pytest.fixture()
def redis_port(request):
return request.config.getoption("--redis-port")
def _gen_cluster_mock_resp(r, response):
connection = Mock()
connection.read_response.return_value = response
r.connection = connection
return r
@pytest.fixture()
def mock_cluster_resp_ok(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
return _gen_cluster_mock_resp(r, 'OK')
@pytest.fixture()
def mock_cluster_resp_int(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
return _gen_cluster_mock_resp(r, '2')
@pytest.fixture()
def mock_cluster_resp_info(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = ('cluster_state:ok\r\ncluster_slots_assigned:16384\r\n'
'cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n'
'cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n'
'cluster_size:3\r\ncluster_current_epoch:7\r\n'
'cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n'
'cluster_stats_messages_received:105653\r\n')
return _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_nodes(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = ('c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 '
'slave aa90da731f673a99617dfe930306549a09f83a6b 0 '
'1447836263059 5 connected\n'
'9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 '
'master - 0 1447836264065 0 connected\n'
'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 '
'myself,master - 0 0 2 connected 5461-10922\n'
'1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 '
'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 '
'1447836262556 3 connected\n'
'4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 '
'master - 0 1447836262555 7 connected 0-5460\n'
'19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 '
'master - 0 1447836263562 3 connected 10923-16383\n'
'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 '
'master,fail - 1447829446956 1447829444948 1 disconnected\n'
)
return _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_slaves(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = ("['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
"slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
"1447836789290 3 connected']")
return _gen_cluster_mock_resp(r, response)
| 35.353261 | 78 | 0.654112 | import pytest
import redis
from mock import Mock
from distutils.version import StrictVersion
REDIS_INFO = {}
default_redis_host = "localhost"
default_redis_port = "6379"
default_cluster_master_host = "127.0.0.1"
default_cluster_master_port = "6379"
def pytest_addoption(parser):
parser.addoption('--redis-host', default=default_redis_host,
action="store",
help="Redis hostname,"
" defaults to `%(default)s`")
parser.addoption('--redis-port', default=default_redis_port,
action="store",
help="Redis port,"
" defaults to `%(default)s`")
parser.addoption('--cluster-master-host',
default=default_cluster_master_host,
action="store",
help="Hostname of cluster master,"
" defaults to `%(default)s`")
parser.addoption('--cluster-master-port',
default=default_cluster_master_port,
action="store",
help="Port of cluster master,"
" defaults to `%(default)s`")
def _build_redis_url(redis_host, redis_port):
return "redis://{}:{}/9".format(redis_host, redis_port)
def _get_info(redis_url):
client = redis.Redis.from_url(redis_url)
info = client.info()
client.connection_pool.disconnect()
return info
def pytest_sessionstart(session):
redis_host = session.config.getoption("--redis-host")
redis_port = session.config.getoption("--redis-port")
redis_url = _build_redis_url(redis_host, redis_port)
info = _get_info(redis_url)
version = info["redis_version"]
arch_bits = info["arch_bits"]
REDIS_INFO["version"] = version
REDIS_INFO["arch_bits"] = arch_bits
def skip_if_server_version_lt(min_version):
redis_version = REDIS_INFO["version"]
check = StrictVersion(redis_version) < StrictVersion(min_version)
return pytest.mark.skipif(
check,
reason="Redis version required >= {}".format(min_version))
def skip_if_server_version_gte(min_version):
redis_version = REDIS_INFO["version"]
check = StrictVersion(redis_version) >= StrictVersion(min_version)
return pytest.mark.skipif(
check,
reason="Redis version required < {}".format(min_version))
def skip_unless_arch_bits(arch_bits):
return pytest.mark.skipif(REDIS_INFO["arch_bits"] != arch_bits,
reason="server is not {}-bit".format(arch_bits))
def _get_client(cls, request, single_connection_client=True, **kwargs):
redis_host = request.config.getoption("--redis-host")
redis_port = request.config.getoption("--redis-port")
redis_url = _build_redis_url(redis_host, redis_port)
client = cls.from_url(redis_url, **kwargs)
if single_connection_client:
client = client.client()
if request:
def teardown():
try:
client.flushdb()
except redis.ConnectionError:
client.flushdb()
client.close()
client.connection_pool.disconnect()
request.addfinalizer(teardown)
return client
@pytest.fixture()
def r(request):
with _get_client(redis.Redis, request) as client:
yield client
@pytest.fixture()
def r2(request):
with _get_client(redis.Redis, request) as client:
yield client
@pytest.fixture()
def redis_host(request):
return request.config.getoption("--redis-host")
@pytest.fixture()
def redis_port(request):
return request.config.getoption("--redis-port")
def _gen_cluster_mock_resp(r, response):
connection = Mock()
connection.read_response.return_value = response
r.connection = connection
return r
@pytest.fixture()
def mock_cluster_resp_ok(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
return _gen_cluster_mock_resp(r, 'OK')
@pytest.fixture()
def mock_cluster_resp_int(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
return _gen_cluster_mock_resp(r, '2')
@pytest.fixture()
def mock_cluster_resp_info(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = ('cluster_state:ok\r\ncluster_slots_assigned:16384\r\n'
'cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n'
'cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n'
'cluster_size:3\r\ncluster_current_epoch:7\r\n'
'cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n'
'cluster_stats_messages_received:105653\r\n')
return _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_nodes(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = ('c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 '
'slave aa90da731f673a99617dfe930306549a09f83a6b 0 '
'1447836263059 5 connected\n'
'9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 '
'master - 0 1447836264065 0 connected\n'
'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 '
'myself,master - 0 0 2 connected 5461-10922\n'
'1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 '
'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 '
'1447836262556 3 connected\n'
'4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 '
'master - 0 1447836262555 7 connected 0-5460\n'
'19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 '
'master - 0 1447836263562 3 connected 10923-16383\n'
'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 '
'master,fail - 1447829446956 1447829444948 1 disconnected\n'
)
return _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_slaves(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = ("['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
"slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
"1447836789290 3 connected']")
return _gen_cluster_mock_resp(r, response)
| true | true |
f72f66ea355573925b4529312b9cec90130e327d | 9,984 | py | Python | recipes/recipe_modules/bot_update/examples/full.py | xing133/webrtc_depot_tools | ddcba7ca3c7f0432aee6bd9740c21f785c32a982 | [
"BSD-3-Clause"
] | 1 | 2021-11-22T05:43:42.000Z | 2021-11-22T05:43:42.000Z | recipes/recipe_modules/bot_update/examples/full.py | xing133/webrtc_depot_tools | ddcba7ca3c7f0432aee6bd9740c21f785c32a982 | [
"BSD-3-Clause"
] | null | null | null | recipes/recipe_modules/bot_update/examples/full.py | xing133/webrtc_depot_tools | ddcba7ca3c7f0432aee6bd9740c21f785c32a982 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'bot_update',
'gclient',
'gerrit',
'tryserver',
'recipe_engine/buildbucket',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/runtime',
]
from recipe_engine import types
from PB.go.chromium.org.luci.buildbucket.proto.build import Build
def RunSteps(api):
api.gclient.use_mirror = True
commit = api.buildbucket.build.input.gitiles_commit
src_cfg = api.gclient.make_config(CACHE_DIR='[GIT_CACHE]')
soln = src_cfg.solutions.add()
soln.name = 'src'
soln.url = 'https://chromium.googlesource.com/chromium/src.git'
soln.revision = commit.id or commit.ref or None
api.gclient.c = src_cfg
api.gclient.c.revisions.update(api.properties.get('revisions', {}))
if api.properties.get('deprecated_got_revision_mapping'):
api.gclient.c.got_revision_mapping['src'] = 'got_cr_revision'
else:
api.gclient.c.got_revision_reverse_mapping['got_cr_revision'] = 'src'
api.gclient.c.got_revision_reverse_mapping['got_revision'] = 'src'
api.gclient.c.got_revision_reverse_mapping['got_v8_revision'] = 'src/v8'
api.gclient.c.got_revision_reverse_mapping['got_angle_revision'] = (
'src/third_party/angle')
api.gclient.c.repo_path_map.update({
'https://chromium.googlesource.com/angle/angle': (
'src/third_party/angle', 'HEAD'),
'https://chromium.googlesource.com/v8/v8': ('src/v8', 'HEAD'),
'https://webrtc.googlesource.com/src': ('src/third_party/webrtc', 'HEAD'),
})
patch = api.properties.get('patch', True)
clobber = True if api.properties.get('clobber') else False
with_branch_heads = api.properties.get('with_branch_heads', False)
with_tags = api.properties.get('with_tags', False)
refs = api.properties.get('refs', [])
root_solution_revision = api.properties.get('root_solution_revision')
suffix = api.properties.get('suffix')
gerrit_no_reset = True if api.properties.get('gerrit_no_reset') else False
gerrit_no_rebase_patch_ref = bool(
api.properties.get('gerrit_no_rebase_patch_ref'))
manifest_name = api.properties.get('manifest_name')
patch_refs = api.properties.get('patch_refs')
set_output_commit = api.properties.get('set_output_commit', True)
step_test_data = None
bot_update_output = types.thaw(api.properties.get('bot_update_output'))
if bot_update_output:
step_test_data = lambda: api.json.test_api.output(bot_update_output)
bot_update_step = api.bot_update.ensure_checkout(
patch=patch,
with_branch_heads=with_branch_heads,
with_tags=with_tags,
refs=refs,
clobber=clobber,
root_solution_revision=root_solution_revision,
suffix=suffix,
gerrit_no_reset=gerrit_no_reset,
gerrit_no_rebase_patch_ref=gerrit_no_rebase_patch_ref,
disable_syntax_validation=True,
manifest_name=manifest_name,
patch_refs=patch_refs,
set_output_commit=set_output_commit,
step_test_data=step_test_data,
)
if patch:
api.bot_update.deapply_patch(bot_update_step)
if api.properties.get('resolve_chromium_fixed_version'):
api.bot_update.resolve_fixed_revision(bot_update_step.json.output, 'src')
def GenTests(api):
def try_build(**kwargs):
kwargs.setdefault(
'git_repo', 'https://chromium.googlesource.com/chromium/src')
return api.buildbucket.try_build('chromium/src', 'try', 'linux', **kwargs)
def ci_build(**kwargs):
kwargs.setdefault(
'git_repo', 'https://chromium.googlesource.com/chromium/src')
return (
api.buildbucket.ci_build('chromium/src', 'ci', 'linux', **kwargs) +
api.properties(patch=False)
)
yield (
api.test('basic') +
ci_build()
)
yield (
api.test('input_commit_with_id_without_repo') +
api.buildbucket.build(Build(
input={
'gitiles_commit': {
'id': 'a' * 40,
},
},
))
)
yield (
api.test('unrecognized_commit_repo') +
ci_build(git_repo='https://unrecognized/repo')
)
yield (
api.test('basic_luci') +
ci_build() +
api.runtime(is_experimental=False, is_luci=True)
)
yield (
api.test('with_manifest_name') +
ci_build() +
api.properties(
manifest_name='checkout',
set_output_commit=False,
) +
api.step_data('bot_update (without patch)', api.json.output({
'source_manifest': {
'directories': {
'src': {
'git_checkout': {
'repo_url': (
'https://chromium.googlesource.com/chromium/src.git'),
'revision': 'ea17a292ecfb3dcdaa8dd226e67d6504fc13c15a'
},
},
},
},
}))
)
yield (
api.test('resolve_chromium_fixed_version') +
ci_build() +
api.properties(resolve_chromium_fixed_version=True)
)
yield (
api.test('basic_with_branch_heads') +
ci_build() +
api.properties(
with_branch_heads=True,
suffix='with branch heads'
)
)
yield (
api.test('with_tags') +
api.properties(with_tags=True)
)
yield (
api.test('deprecated_got_revision_mapping') +
try_build() +
api.properties(
deprecated_got_revision_mapping=True,
set_output_commit=False,
)
)
yield (
api.test('refs') +
api.properties(refs=['+refs/change/1/2/333'])
)
yield (
api.test('tryjob_fail') +
try_build() +
api.step_data('bot_update', api.json.invalid(None), retcode=1)
)
yield (
api.test('tryjob_fail_patch') +
try_build() +
api.properties(fail_patch='apply') +
api.step_data('bot_update', retcode=88)
)
yield (
api.test('tryjob_fail_patch_download') +
try_build() +
api.properties(fail_patch='download') +
api.step_data('bot_update', retcode=87)
)
yield (
api.test('clobber') +
api.properties(clobber=1)
)
yield (
api.test('reset_root_solution_revision') +
api.properties(root_solution_revision='revision')
)
yield (
api.test('gerrit_no_reset') +
api.properties(gerrit_no_reset=1)
)
yield (
api.test('gerrit_no_rebase_patch_ref') +
api.properties(gerrit_no_rebase_patch_ref=True)
)
yield (
api.test('tryjob_v8') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8') +
api.properties(revisions={'src/v8': 'abc'})
)
yield (
api.test('tryjob_v8_head_by_default') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8')
)
yield (
api.test('tryjob_gerrit_angle') +
try_build(git_repo='https://chromium.googlesource.com/angle/angle')
)
yield (
api.test('no_apply_patch_on_gclient') +
try_build(git_repo='https://chromium.googlesource.com/angle/angle')
)
yield (
api.test('tryjob_gerrit_v8_feature_branch') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8') +
api.tryserver.gerrit_change_target_ref('refs/heads/experimental/feature')
)
yield (
api.test('tryjob_gerrit_feature_branch') +
try_build() +
api.tryserver.gerrit_change_target_ref('refs/heads/experimental/feature')
)
yield (
api.test('tryjob_gerrit_branch_heads') +
try_build() +
api.tryserver.gerrit_change_target_ref('refs/branch-heads/67')
)
yield (
api.test('tryjob_gerrit_webrtc') +
try_build(git_repo='https://webrtc.googlesource.com/src')
)
yield (
api.test('multiple_patch_refs') +
api.properties(
patch_refs=[
('https://chromium.googlesource.com/chromium/src@'
'refs/changes/12/34/5'),
'https://chromium.googlesource.com/v8/v8@refs/changes/124/45/6',
],
)
)
yield (
api.test('no_cp_checkout_a_specific_commit') +
ci_build(revision='a' * 40) +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_master') +
ci_build(revision='') +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_a_branch_head') +
ci_build(revision='', git_ref='refs/branch-heads/x') +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_HEAD') +
ci_build(revision='HEAD') +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
| 30.072289 | 80 | 0.610176 |
DEPS = [
'bot_update',
'gclient',
'gerrit',
'tryserver',
'recipe_engine/buildbucket',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/runtime',
]
from recipe_engine import types
from PB.go.chromium.org.luci.buildbucket.proto.build import Build
def RunSteps(api):
api.gclient.use_mirror = True
commit = api.buildbucket.build.input.gitiles_commit
src_cfg = api.gclient.make_config(CACHE_DIR='[GIT_CACHE]')
soln = src_cfg.solutions.add()
soln.name = 'src'
soln.url = 'https://chromium.googlesource.com/chromium/src.git'
soln.revision = commit.id or commit.ref or None
api.gclient.c = src_cfg
api.gclient.c.revisions.update(api.properties.get('revisions', {}))
if api.properties.get('deprecated_got_revision_mapping'):
api.gclient.c.got_revision_mapping['src'] = 'got_cr_revision'
else:
api.gclient.c.got_revision_reverse_mapping['got_cr_revision'] = 'src'
api.gclient.c.got_revision_reverse_mapping['got_revision'] = 'src'
api.gclient.c.got_revision_reverse_mapping['got_v8_revision'] = 'src/v8'
api.gclient.c.got_revision_reverse_mapping['got_angle_revision'] = (
'src/third_party/angle')
api.gclient.c.repo_path_map.update({
'https://chromium.googlesource.com/angle/angle': (
'src/third_party/angle', 'HEAD'),
'https://chromium.googlesource.com/v8/v8': ('src/v8', 'HEAD'),
'https://webrtc.googlesource.com/src': ('src/third_party/webrtc', 'HEAD'),
})
patch = api.properties.get('patch', True)
clobber = True if api.properties.get('clobber') else False
with_branch_heads = api.properties.get('with_branch_heads', False)
with_tags = api.properties.get('with_tags', False)
refs = api.properties.get('refs', [])
root_solution_revision = api.properties.get('root_solution_revision')
suffix = api.properties.get('suffix')
gerrit_no_reset = True if api.properties.get('gerrit_no_reset') else False
gerrit_no_rebase_patch_ref = bool(
api.properties.get('gerrit_no_rebase_patch_ref'))
manifest_name = api.properties.get('manifest_name')
patch_refs = api.properties.get('patch_refs')
set_output_commit = api.properties.get('set_output_commit', True)
step_test_data = None
bot_update_output = types.thaw(api.properties.get('bot_update_output'))
if bot_update_output:
step_test_data = lambda: api.json.test_api.output(bot_update_output)
bot_update_step = api.bot_update.ensure_checkout(
patch=patch,
with_branch_heads=with_branch_heads,
with_tags=with_tags,
refs=refs,
clobber=clobber,
root_solution_revision=root_solution_revision,
suffix=suffix,
gerrit_no_reset=gerrit_no_reset,
gerrit_no_rebase_patch_ref=gerrit_no_rebase_patch_ref,
disable_syntax_validation=True,
manifest_name=manifest_name,
patch_refs=patch_refs,
set_output_commit=set_output_commit,
step_test_data=step_test_data,
)
if patch:
api.bot_update.deapply_patch(bot_update_step)
if api.properties.get('resolve_chromium_fixed_version'):
api.bot_update.resolve_fixed_revision(bot_update_step.json.output, 'src')
def GenTests(api):
def try_build(**kwargs):
kwargs.setdefault(
'git_repo', 'https://chromium.googlesource.com/chromium/src')
return api.buildbucket.try_build('chromium/src', 'try', 'linux', **kwargs)
def ci_build(**kwargs):
kwargs.setdefault(
'git_repo', 'https://chromium.googlesource.com/chromium/src')
return (
api.buildbucket.ci_build('chromium/src', 'ci', 'linux', **kwargs) +
api.properties(patch=False)
)
yield (
api.test('basic') +
ci_build()
)
yield (
api.test('input_commit_with_id_without_repo') +
api.buildbucket.build(Build(
input={
'gitiles_commit': {
'id': 'a' * 40,
},
},
))
)
yield (
api.test('unrecognized_commit_repo') +
ci_build(git_repo='https://unrecognized/repo')
)
yield (
api.test('basic_luci') +
ci_build() +
api.runtime(is_experimental=False, is_luci=True)
)
yield (
api.test('with_manifest_name') +
ci_build() +
api.properties(
manifest_name='checkout',
set_output_commit=False,
) +
api.step_data('bot_update (without patch)', api.json.output({
'source_manifest': {
'directories': {
'src': {
'git_checkout': {
'repo_url': (
'https://chromium.googlesource.com/chromium/src.git'),
'revision': 'ea17a292ecfb3dcdaa8dd226e67d6504fc13c15a'
},
},
},
},
}))
)
yield (
api.test('resolve_chromium_fixed_version') +
ci_build() +
api.properties(resolve_chromium_fixed_version=True)
)
yield (
api.test('basic_with_branch_heads') +
ci_build() +
api.properties(
with_branch_heads=True,
suffix='with branch heads'
)
)
yield (
api.test('with_tags') +
api.properties(with_tags=True)
)
yield (
api.test('deprecated_got_revision_mapping') +
try_build() +
api.properties(
deprecated_got_revision_mapping=True,
set_output_commit=False,
)
)
yield (
api.test('refs') +
api.properties(refs=['+refs/change/1/2/333'])
)
yield (
api.test('tryjob_fail') +
try_build() +
api.step_data('bot_update', api.json.invalid(None), retcode=1)
)
yield (
api.test('tryjob_fail_patch') +
try_build() +
api.properties(fail_patch='apply') +
api.step_data('bot_update', retcode=88)
)
yield (
api.test('tryjob_fail_patch_download') +
try_build() +
api.properties(fail_patch='download') +
api.step_data('bot_update', retcode=87)
)
yield (
api.test('clobber') +
api.properties(clobber=1)
)
yield (
api.test('reset_root_solution_revision') +
api.properties(root_solution_revision='revision')
)
yield (
api.test('gerrit_no_reset') +
api.properties(gerrit_no_reset=1)
)
yield (
api.test('gerrit_no_rebase_patch_ref') +
api.properties(gerrit_no_rebase_patch_ref=True)
)
yield (
api.test('tryjob_v8') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8') +
api.properties(revisions={'src/v8': 'abc'})
)
yield (
api.test('tryjob_v8_head_by_default') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8')
)
yield (
api.test('tryjob_gerrit_angle') +
try_build(git_repo='https://chromium.googlesource.com/angle/angle')
)
yield (
api.test('no_apply_patch_on_gclient') +
try_build(git_repo='https://chromium.googlesource.com/angle/angle')
)
yield (
api.test('tryjob_gerrit_v8_feature_branch') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8') +
api.tryserver.gerrit_change_target_ref('refs/heads/experimental/feature')
)
yield (
api.test('tryjob_gerrit_feature_branch') +
try_build() +
api.tryserver.gerrit_change_target_ref('refs/heads/experimental/feature')
)
yield (
api.test('tryjob_gerrit_branch_heads') +
try_build() +
api.tryserver.gerrit_change_target_ref('refs/branch-heads/67')
)
yield (
api.test('tryjob_gerrit_webrtc') +
try_build(git_repo='https://webrtc.googlesource.com/src')
)
yield (
api.test('multiple_patch_refs') +
api.properties(
patch_refs=[
('https://chromium.googlesource.com/chromium/src@'
'refs/changes/12/34/5'),
'https://chromium.googlesource.com/v8/v8@refs/changes/124/45/6',
],
)
)
yield (
api.test('no_cp_checkout_a_specific_commit') +
ci_build(revision='a' * 40) +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_master') +
ci_build(revision='') +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_a_branch_head') +
ci_build(revision='', git_ref='refs/branch-heads/x') +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_HEAD') +
ci_build(revision='HEAD') +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
| true | true |
f72f675f782fe8125b53198e49770a42ec655291 | 2,434 | py | Python | examples/test_chromedriver.py | tomejorge/SeleniumBase | e3e50bbd80594c52131b0d88ca3e2c2f7692e340 | [
"MIT"
] | 2,745 | 2016-07-20T09:13:15.000Z | 2022-03-29T15:07:31.000Z | examples/test_chromedriver.py | tomejorge/SeleniumBase | e3e50bbd80594c52131b0d88ca3e2c2f7692e340 | [
"MIT"
] | 384 | 2016-07-17T20:45:26.000Z | 2022-03-31T22:35:35.000Z | examples/test_chromedriver.py | tomejorge/SeleniumBase | e3e50bbd80594c52131b0d88ca3e2c2f7692e340 | [
"MIT"
] | 704 | 2016-07-17T20:47:04.000Z | 2022-03-31T04:32:35.000Z | """
This test is only for Chrome!
(Verify that your chromedriver is compatible with your version of Chrome.)
"""
import colorama
from seleniumbase import BaseCase
class ChromedriverTests(BaseCase):
def test_chromedriver_matches_chrome(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(' (Run with: "--browser=chrome")')
self.skip("This test is only for Chrome!")
chrome_version = self.get_chrome_version()
major_chrome_version = chrome_version.split(".")[0]
chromedriver_version = self.get_chromedriver_version()
major_chromedriver_version = chromedriver_version.split(".")[0]
colorama.init(autoreset=True)
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c4 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c5 = colorama.Fore.RED + colorama.Back.LIGHTGREEN_EX
cr = colorama.Style.RESET_ALL
pr_chromedriver_version = c3 + chromedriver_version + cr
pr_chrome_version = c2 + chrome_version + cr
message = (
"\n"
"* Your version of chromedriver is: %s\n"
"*\n* And your version of Chrome is: %s"
% (pr_chromedriver_version, pr_chrome_version)
)
print(message)
if major_chromedriver_version < major_chrome_version:
install_sb = (
"seleniumbase install chromedriver %s" % major_chrome_version
)
pr_install_sb = c1 + install_sb + cr
up_msg = "You may want to upgrade your version of chromedriver:"
up_msg = c4 + up_msg + cr
message = "*\n* %s\n*\n* >>> %s" % (up_msg, pr_install_sb)
print(message)
elif major_chromedriver_version > major_chrome_version:
up_msg = "You may want to upgrade your version of Chrome:"
up_msg = c5 + up_msg + cr
up_url = c1 + "chrome://settings/help" + cr
message = "*\n* %s\n*\n* See: %s" % (up_msg, up_url)
print(message)
else:
up_msg = (
"Success! Your chromedriver is compatible with your Chrome!"
)
up_msg = c1 + up_msg + cr
message = "*\n* %s\n" % up_msg
print(message)
| 42.701754 | 77 | 0.601068 | import colorama
from seleniumbase import BaseCase
class ChromedriverTests(BaseCase):
def test_chromedriver_matches_chrome(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(' (Run with: "--browser=chrome")')
self.skip("This test is only for Chrome!")
chrome_version = self.get_chrome_version()
major_chrome_version = chrome_version.split(".")[0]
chromedriver_version = self.get_chromedriver_version()
major_chromedriver_version = chromedriver_version.split(".")[0]
colorama.init(autoreset=True)
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c4 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c5 = colorama.Fore.RED + colorama.Back.LIGHTGREEN_EX
cr = colorama.Style.RESET_ALL
pr_chromedriver_version = c3 + chromedriver_version + cr
pr_chrome_version = c2 + chrome_version + cr
message = (
"\n"
"* Your version of chromedriver is: %s\n"
"*\n* And your version of Chrome is: %s"
% (pr_chromedriver_version, pr_chrome_version)
)
print(message)
if major_chromedriver_version < major_chrome_version:
install_sb = (
"seleniumbase install chromedriver %s" % major_chrome_version
)
pr_install_sb = c1 + install_sb + cr
up_msg = "You may want to upgrade your version of chromedriver:"
up_msg = c4 + up_msg + cr
message = "*\n* %s\n*\n* >>> %s" % (up_msg, pr_install_sb)
print(message)
elif major_chromedriver_version > major_chrome_version:
up_msg = "You may want to upgrade your version of Chrome:"
up_msg = c5 + up_msg + cr
up_url = c1 + "chrome://settings/help" + cr
message = "*\n* %s\n*\n* See: %s" % (up_msg, up_url)
print(message)
else:
up_msg = (
"Success! Your chromedriver is compatible with your Chrome!"
)
up_msg = c1 + up_msg + cr
message = "*\n* %s\n" % up_msg
print(message)
| true | true |
f72f6791764c1d39d3340e5dce5ad5a3a904b8e3 | 1,230 | py | Python | tflite-onnx/onnx_tflite/tflite/ArgMinOptions.py | jwj04ok/ONNX_Convertor | 067a17e16dfc8aa80e36f44c4523959daf7359f5 | [
"MIT"
] | 193 | 2017-12-20T16:46:20.000Z | 2022-03-29T07:40:54.000Z | tinyengine/tflite/ArgMinOptions.py | liuyy3364/mcunet | f53f9e20e8e912bdb111b4c32da75e71e9a59597 | [
"Apache-2.0"
] | 141 | 2017-12-21T08:00:20.000Z | 2021-06-15T14:53:03.000Z | tinyengine/tflite/ArgMinOptions.py | liuyy3364/mcunet | f53f9e20e8e912bdb111b4c32da75e71e9a59597 | [
"Apache-2.0"
] | 55 | 2017-12-22T18:40:13.000Z | 2022-01-17T05:43:51.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ArgMinOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsArgMinOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ArgMinOptions()
x.Init(buf, n + offset)
return x
@classmethod
def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# ArgMinOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ArgMinOptions
def OutputType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
def ArgMinOptionsStart(builder): builder.StartObject(1)
def ArgMinOptionsAddOutputType(builder, outputType): builder.PrependInt8Slot(0, outputType, 0)
def ArgMinOptionsEnd(builder): return builder.EndObject()
| 33.243243 | 114 | 0.717073 |
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ArgMinOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsArgMinOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ArgMinOptions()
x.Init(buf, n + offset)
return x
@classmethod
def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def OutputType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
def ArgMinOptionsStart(builder): builder.StartObject(1)
def ArgMinOptionsAddOutputType(builder, outputType): builder.PrependInt8Slot(0, outputType, 0)
def ArgMinOptionsEnd(builder): return builder.EndObject()
| true | true |
f72f68b943a4b5db8edb14eb15f9a5472cbdde4a | 1,052 | py | Python | watchdog_kj_kultura/users/models.py | watchdogpolska/watchdog-kj-kultura | ea1a5c52ef2a174c012cc08eff5fdd7aa3b911b0 | [
"MIT"
] | null | null | null | watchdog_kj_kultura/users/models.py | watchdogpolska/watchdog-kj-kultura | ea1a5c52ef2a174c012cc08eff5fdd7aa3b911b0 | [
"MIT"
] | 138 | 2016-12-10T19:18:18.000Z | 2019-06-10T19:32:40.000Z | watchdog_kj_kultura/users/models.py | watchdogpolska/watchdog-kj-kultura | ea1a5c52ef2a174c012cc08eff5fdd7aa3b911b0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_('Name of User'), blank=True, max_length=255)
notify_about_fix = models.BooleanField(_("Notify about fix suggestion to organization"),
help_text=_("Check to receive notifications about fix " +
"suggestion to organization description"),
default=False)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
| 38.962963 | 100 | 0.664449 |
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
name = models.CharField(_('Name of User'), blank=True, max_length=255)
notify_about_fix = models.BooleanField(_("Notify about fix suggestion to organization"),
help_text=_("Check to receive notifications about fix " +
"suggestion to organization description"),
default=False)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
| true | true |
f72f69278791826313821144226481332bfd9cf5 | 14,156 | py | Python | pynger/fingerprint/FVC_utilities.py | DottD/pynger | 9a24b43a2170234e5059a54ed20329e036260b0a | [
"MIT"
] | 1 | 2021-11-10T16:37:28.000Z | 2021-11-10T16:37:28.000Z | pynger/fingerprint/FVC_utilities.py | DottD/pynger | 9a24b43a2170234e5059a54ed20329e036260b0a | [
"MIT"
] | null | null | null | pynger/fingerprint/FVC_utilities.py | DottD/pynger | 9a24b43a2170234e5059a54ed20329e036260b0a | [
"MIT"
] | null | null | null | import os
import re
import io
import numpy as np
import PIL.Image
import typing
from pynger.types import Image, Mask, Field
from pynger.fingerprint.tuning_lro import LROEstimator
from pynger.fingerprint.sampling import convert_to_full, subsample
from pynger.field.manipulation import polar2cart
from pynger.misc import recursively_scan_dir_gen, recursively_scan_dir, random_combination
from itertools import combinations, starmap
class Proxy:
def write(self, path: str):
raise NotImplementedError("Derived classes must reimplement this method")
def read(self, path: str):
raise NotImplementedError("Derived classes must reimplement this method")
class MaskProxy(Proxy):
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], np.ndarray):
self.mask = args[0]
elif isinstance(args[0], str):
self.read(args[0])
else:
raise TypeError("Arguments not recognized")
else:
self.mask = None
def read(self, path: str, full: bool = True):
""" Reads the mask, according to FVC-OnGoing specs.
Args:
path: The input file path (generally with .fg extension)
full: Whether the full output should be returned (not implemented yet)
Return:
The boolean mask represented in the given file.
"""
if not os.path.exists(path):
raise RuntimeError("The input file does not exist")
with open(path, 'r') as f:
shape = tuple([int(n) for n in f.readline().split()])
mask = np.empty(shape, dtype=bool)
for row_n, line in enumerate(f):
mask[row_n,:] = [bool(int(n)) for n in line.split()]
self.mask = mask
return mask
def write(self, path: str):
""" Writes the mask, according to FVC-OnGoing specs.
Args:
path: The output file path (generally with .fg extension)
"""
with open(path, 'w') as f:
print(self.mask.shape, file=f)
for line in self.mask.astype(int):
print(line, file=f)
class FieldProxy(Proxy):
def __init__(self, *args):
if len(args) == 2 and isinstance(args[0], np.ndarray) and isinstance(args[1], np.ndarray):
self.angle, self.mask = args[0].copy(), args[1].copy()
elif len(args) == 1 and isinstance(args[0], str):
self.read(args[0])
else:
self.angle, self.mask = None, None
def read(self, path: str, full: bool = True):
""" Reads the field, according to FVC-OnGoing specs.
Args:
path: The input file path (generally with .gt extension)
full: Whether the full output should be returned
Return:
The field represented in the given file.
"""
if not os.path.exists(path):
raise RuntimeError("The input file does not exist")
with open(path, 'rb') as f:
# Read and discard the header. To visualize -> print(f.read(8).decode('ascii'))
f.read(8)
# Read the field specifications
get_next_int = lambda: int.from_bytes(f.read(4), byteorder='little', signed=True)
self.border_x = get_next_int()
self.border_y = get_next_int()
self.step_x = get_next_int()
self.step_y = get_next_int()
cols = get_next_int()
rows = get_next_int()
# Read the values
get_next_uint8 = lambda: int.from_bytes(f.read(1), byteorder='little', signed=False)
content = [(get_next_uint8(), get_next_uint8()) for _ in range(cols*rows)]
angle, mask = zip(*content)
angle = np.array(angle, dtype=float).reshape((rows, cols))
angle *= np.pi / 255.0
mask = np.array(mask, dtype=bool).reshape((rows, cols))
# Optionally convert to full matrix
if full:
self.angle = convert_to_full(angle, border_x=self.border_x, border_y=self.border_y, step_x=self.step_x, step_y=self.step_y, mode='constant')
self.mask = convert_to_full(mask, border_x=self.border_x, border_y=self.border_y, step_x=self.step_x, step_y=self.step_y, mode='constant')
else:
self.angle = angle
self.mask = mask
return self.angle, self.mask
def write(self, path: str, **kwargs):
""" Writes the field, according to FVC-OnGoing specs.
Args:
path: The output file path (generally with .gt extension)
Keyword Args:
border_x (int): Horizontal border used to sample the field (defaults to 14)
border_y (int): Vertical border used to sample the field (defaults to 14)
step_x (int): Horizontal distance between two conscutive sample points (defaults to 8)
step_y (int): Vertical distance between two conscutive sample points (defaults to 8)
subsample (bool): Whether the input shall be sub-sampled before saving it
Note:
The field is subsampled in the process. To avoid this behaviour, set border parameters to 0 and step parameters to 1.
"""
# Read parameters
bx = kwargs.get('border_x', 14)
by = kwargs.get('border_y', 14)
sx = kwargs.get('step_x', 8)
sy = kwargs.get('step_y', 8)
needSubsample = kwargs.pop('subsample', True)
# Sample the field
if self.angle.shape != self.mask.shape:
raise RuntimeError('angle and mask sizes mismatch')
if needSubsample:
angle = subsample(self.angle, is_field=False, smooth=False, **kwargs)
mask = subsample(self.mask, is_field=False, smooth=False, **kwargs)
else:
angle = self.angle
mask = self.mask
with open(path, 'wb') as f:
f.write("DIRIMG00".encode('ascii'))
# Read the field specifications
put_int = lambda n: f.write(int(n).to_bytes(4, byteorder='little', signed=True))
put_int(bx)
put_int(by)
put_int(sx)
put_int(sy)
rows, cols = angle.shape
put_int(cols)
put_int(rows)
# Values conversion
angle *= 255.0 / np.pi
angle = angle.astype(int)
mask = mask.astype(int)
mask *= int(255 / mask.max())
# Write the values
put_uint8 = lambda n: f.write(int(n).to_bytes(1, byteorder='little', signed=False))
for a, m in zip(angle.ravel(), mask.ravel()):
put_uint8(a)
put_uint8(m)
def loadDataset(path: str, loadGT: bool = True):
""" Loads the FVC-TEST dataset.
Args:
path: Directory with the FVC-TEST dataset.
loadGT: whether to load the ground truth information or not.
Return:
A generator of pairs (X, y) where X has the original image, its mask and its border specifications, and y is the corresponding orientation field ground truth.
"""
with open(path, 'r') as f:
_ = int(f.readline())
for line in f:
name, step, bd = line.split()
step = int(step)
bd = int(bd)
# Load image
image_path = os.path.join(os.path.dirname(path), name)
image = np.array(PIL.Image.open(image_path).convert('L')).astype(float)
# Load mask
mask_path = os.path.splitext(image_path)[0]+'.fg'
mask = MaskProxy().read(mask_path)
# Set specifications
specs = [bd, bd, step, step]
# Adjust image shape
_mask = convert_to_full(mask, border_x=bd, border_y=bd, step_x=step, step_y=step, mode='constant')
image = image[:_mask.shape[0], :_mask.shape[1]]
# Load the ground truth field
if loadGT:
field_path = os.path.splitext(image_path)[0]+'.gt'
lro, _ = FieldProxy().read(field_path, full=False)
field = polar2cart(lro, 1, retField=True)
# Serialize input data and append to X and the ground truth information
yield (LROEstimator.serialize_Xrow(image, mask, specs), LROEstimator.serialize_yrow(field))
else:
yield (LROEstimator.serialize_Xrow(image, mask, specs), image_path)
def countDatasetElements(path):
with open(path, 'r') as f:
return int(f.readline())
def loadSegmentationDataset(sdir: str, odir: str):
""" Loads the dataset for segmentation evaluation.
Args:
sdir: Path to the segmented images; all the images shall be direct children of this directory.
odir: Path to the original images; this folder shall contain as direct children the folder of the databases FVC2000, FVC2002, FVC2004 (from DB1a, DB1b, to DB4a, DB4b) - e.g. the main root of the DVD shipped with Handbook of Fingerprint Recognition.
Note:
If some DB is not available a warning will be issued, but the other images will be loaded anyway.
Return:
A generator of pairs (X, y) where X is the original image, and y the corresponding ground truth segmentation image.
"""
pattern = re.compile('(FVC\\d+)_(\\w+)_\\w+_(\\d+)_(\\d+)')
sfiles = recursively_scan_dir_gen(sdir, '.png')
for sfile in sfiles:
basename = os.path.basename(sfile)
match = pattern.match(basename)
if match:
ofile = os.path.join(
odir,
match[1], # FVCxxxx
'Dbs',
# converts DB1 to Db1, them appends an 'a' for the first 100 images, and a 'b' otherwise
match[2].title() + '_' + ('a' if int(match[3])<=100 else 'b'),
'{}_{}.tif'.format(match[3], match[4]) # append the filename
)
yield (ofile, sfile)
def loadMatchingDatasetFVC(path: str):
""" Loads the FVC-TEST dataset.
Args:
path: Directory with the FVC-TEST dataset.
Return:
A dictionary whose keys are pairs of:
- tuples containing a reference to the database and competition where the images belong, and values are lists of pairs (X, y) where X has the pair of image filenames, and y is the corresponding ground truth label, i.e. a 0 for reject or 1 for accept;
- the list of all images found in the given folder.
"""
_, all_image_files = recursively_scan_dir(path, '.tif')
_, index_files = recursively_scan_dir(path, '.MFA')
comp_pattern = re.compile('(FVC\\d+)')
competitions = {}
# Loop over the four possible databases
for db_n in range(1, 5):
for MFA in index_files:
# Get index for false matches
MFR = MFA[:-1]+'R'
# Retrieve competition
match = comp_pattern.search(MFA)
if match:
competition = match[1]
else:
competition = 'NULL'
# Retrieve database type (a or b)
db_type = MFA[-5].lower()
# Create a new key for this competition
comp_key = (competition, db_n, db_type)
competitions[comp_key] = []
# Generate database name
db_name = 'Db{}_{}'.format(db_n, db_type)
# Take the subset of images related to this dataset
image_files = [name for name in all_image_files if os.path.basename(os.path.dirname(name)) == db_name]
# Load all the pairs that will be matched
challenge_pairs = []
for ifile, gt in zip([MFA, MFR], [0, 1]):
dir_ = os.path.dirname(ifile)
with open(ifile, 'r') as file_:
for line in file_:
file1, file2 = line.split()
path1 = os.path.join(dir_, db_name, file1)
path2 = os.path.join(dir_, db_name, file2)
challenge_pairs.append( ((path1, path2), gt) )
# Update the competition dictionary
competitions[comp_key] = (challenge_pairs, image_files)
return competitions
def loadMatchingDatasetNIST(path: str, ratio: float = 2.0, verbose: bool = True):
""" Load NIST SD04 for matching.
Args:
path: Path to the folder containing the images.
ratio: Ratio between the number of impostor and genuine matches.
verbose: whether to print some basic information about the dataset.
Return:
A tuple (X, y, lenX) where X yields pairs of images, y generates 0 for a non-match and 1 for a match, lenX is the total number of elements.
"""
# Load all images
_, image_files = recursively_scan_dir(path, ['.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff'])
# Split between first and second impression
f_image_files = list(filter(lambda s: os.path.basename(s)[0]=='f', image_files))
# Collect the genuine matches
challenge_pairs = []
for ffile in f_image_files:
basename = os.path.basename(ffile)
basename = 's'+basename[1:]
sfile = os.path.join( os.path.dirname(ffile), basename )
challenge_pairs.append( ((ffile, sfile), 1) )
# Get the total number of impostor and genuine matches
genuine_matches = len(challenge_pairs)
impostor_matches = int(genuine_matches * ratio)
total_matches = genuine_matches + impostor_matches
if verbose:
print('{} genuine matches and {} impostor matches will be selected'.format(genuine_matches, impostor_matches))
# Collect the impostor matches:
while True:
pair = random_combination(image_files, 2)
left_bname = os.path.basename(pair[0])
right_bname = os.path.basename(pair[1])
if left_bname[1:] == right_bname[1:]:
continue # genuine or the same image
else:
challenge_pairs.append( (pair, 0) )
if len(challenge_pairs) >= total_matches:
break
competitions = {
('NIST', 'SD04', '_'): (challenge_pairs, image_files)
}
return competitions
| 42.383234 | 258 | 0.596355 | import os
import re
import io
import numpy as np
import PIL.Image
import typing
from pynger.types import Image, Mask, Field
from pynger.fingerprint.tuning_lro import LROEstimator
from pynger.fingerprint.sampling import convert_to_full, subsample
from pynger.field.manipulation import polar2cart
from pynger.misc import recursively_scan_dir_gen, recursively_scan_dir, random_combination
from itertools import combinations, starmap
class Proxy:
def write(self, path: str):
raise NotImplementedError("Derived classes must reimplement this method")
def read(self, path: str):
raise NotImplementedError("Derived classes must reimplement this method")
class MaskProxy(Proxy):
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], np.ndarray):
self.mask = args[0]
elif isinstance(args[0], str):
self.read(args[0])
else:
raise TypeError("Arguments not recognized")
else:
self.mask = None
def read(self, path: str, full: bool = True):
if not os.path.exists(path):
raise RuntimeError("The input file does not exist")
with open(path, 'r') as f:
shape = tuple([int(n) for n in f.readline().split()])
mask = np.empty(shape, dtype=bool)
for row_n, line in enumerate(f):
mask[row_n,:] = [bool(int(n)) for n in line.split()]
self.mask = mask
return mask
def write(self, path: str):
with open(path, 'w') as f:
print(self.mask.shape, file=f)
for line in self.mask.astype(int):
print(line, file=f)
class FieldProxy(Proxy):
def __init__(self, *args):
if len(args) == 2 and isinstance(args[0], np.ndarray) and isinstance(args[1], np.ndarray):
self.angle, self.mask = args[0].copy(), args[1].copy()
elif len(args) == 1 and isinstance(args[0], str):
self.read(args[0])
else:
self.angle, self.mask = None, None
def read(self, path: str, full: bool = True):
if not os.path.exists(path):
raise RuntimeError("The input file does not exist")
with open(path, 'rb') as f:
f.read(8)
get_next_int = lambda: int.from_bytes(f.read(4), byteorder='little', signed=True)
self.border_x = get_next_int()
self.border_y = get_next_int()
self.step_x = get_next_int()
self.step_y = get_next_int()
cols = get_next_int()
rows = get_next_int()
get_next_uint8 = lambda: int.from_bytes(f.read(1), byteorder='little', signed=False)
content = [(get_next_uint8(), get_next_uint8()) for _ in range(cols*rows)]
angle, mask = zip(*content)
angle = np.array(angle, dtype=float).reshape((rows, cols))
angle *= np.pi / 255.0
mask = np.array(mask, dtype=bool).reshape((rows, cols))
if full:
self.angle = convert_to_full(angle, border_x=self.border_x, border_y=self.border_y, step_x=self.step_x, step_y=self.step_y, mode='constant')
self.mask = convert_to_full(mask, border_x=self.border_x, border_y=self.border_y, step_x=self.step_x, step_y=self.step_y, mode='constant')
else:
self.angle = angle
self.mask = mask
return self.angle, self.mask
def write(self, path: str, **kwargs):
bx = kwargs.get('border_x', 14)
by = kwargs.get('border_y', 14)
sx = kwargs.get('step_x', 8)
sy = kwargs.get('step_y', 8)
needSubsample = kwargs.pop('subsample', True)
if self.angle.shape != self.mask.shape:
raise RuntimeError('angle and mask sizes mismatch')
if needSubsample:
angle = subsample(self.angle, is_field=False, smooth=False, **kwargs)
mask = subsample(self.mask, is_field=False, smooth=False, **kwargs)
else:
angle = self.angle
mask = self.mask
with open(path, 'wb') as f:
f.write("DIRIMG00".encode('ascii'))
put_int = lambda n: f.write(int(n).to_bytes(4, byteorder='little', signed=True))
put_int(bx)
put_int(by)
put_int(sx)
put_int(sy)
rows, cols = angle.shape
put_int(cols)
put_int(rows)
angle *= 255.0 / np.pi
angle = angle.astype(int)
mask = mask.astype(int)
mask *= int(255 / mask.max())
put_uint8 = lambda n: f.write(int(n).to_bytes(1, byteorder='little', signed=False))
for a, m in zip(angle.ravel(), mask.ravel()):
put_uint8(a)
put_uint8(m)
def loadDataset(path: str, loadGT: bool = True):
with open(path, 'r') as f:
_ = int(f.readline())
for line in f:
name, step, bd = line.split()
step = int(step)
bd = int(bd)
image_path = os.path.join(os.path.dirname(path), name)
image = np.array(PIL.Image.open(image_path).convert('L')).astype(float)
mask_path = os.path.splitext(image_path)[0]+'.fg'
mask = MaskProxy().read(mask_path)
specs = [bd, bd, step, step]
_mask = convert_to_full(mask, border_x=bd, border_y=bd, step_x=step, step_y=step, mode='constant')
image = image[:_mask.shape[0], :_mask.shape[1]]
if loadGT:
field_path = os.path.splitext(image_path)[0]+'.gt'
lro, _ = FieldProxy().read(field_path, full=False)
field = polar2cart(lro, 1, retField=True)
yield (LROEstimator.serialize_Xrow(image, mask, specs), LROEstimator.serialize_yrow(field))
else:
yield (LROEstimator.serialize_Xrow(image, mask, specs), image_path)
def countDatasetElements(path):
with open(path, 'r') as f:
return int(f.readline())
def loadSegmentationDataset(sdir: str, odir: str):
pattern = re.compile('(FVC\\d+)_(\\w+)_\\w+_(\\d+)_(\\d+)')
sfiles = recursively_scan_dir_gen(sdir, '.png')
for sfile in sfiles:
basename = os.path.basename(sfile)
match = pattern.match(basename)
if match:
ofile = os.path.join(
odir,
match[1],
'Dbs',
match[2].title() + '_' + ('a' if int(match[3])<=100 else 'b'),
'{}_{}.tif'.format(match[3], match[4])
)
yield (ofile, sfile)
def loadMatchingDatasetFVC(path: str):
_, all_image_files = recursively_scan_dir(path, '.tif')
_, index_files = recursively_scan_dir(path, '.MFA')
comp_pattern = re.compile('(FVC\\d+)')
competitions = {}
for db_n in range(1, 5):
for MFA in index_files:
MFR = MFA[:-1]+'R'
match = comp_pattern.search(MFA)
if match:
competition = match[1]
else:
competition = 'NULL'
db_type = MFA[-5].lower()
comp_key = (competition, db_n, db_type)
competitions[comp_key] = []
db_name = 'Db{}_{}'.format(db_n, db_type)
image_files = [name for name in all_image_files if os.path.basename(os.path.dirname(name)) == db_name]
challenge_pairs = []
for ifile, gt in zip([MFA, MFR], [0, 1]):
dir_ = os.path.dirname(ifile)
with open(ifile, 'r') as file_:
for line in file_:
file1, file2 = line.split()
path1 = os.path.join(dir_, db_name, file1)
path2 = os.path.join(dir_, db_name, file2)
challenge_pairs.append( ((path1, path2), gt) )
competitions[comp_key] = (challenge_pairs, image_files)
return competitions
def loadMatchingDatasetNIST(path: str, ratio: float = 2.0, verbose: bool = True):
_, image_files = recursively_scan_dir(path, ['.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff'])
f_image_files = list(filter(lambda s: os.path.basename(s)[0]=='f', image_files))
challenge_pairs = []
for ffile in f_image_files:
basename = os.path.basename(ffile)
basename = 's'+basename[1:]
sfile = os.path.join( os.path.dirname(ffile), basename )
challenge_pairs.append( ((ffile, sfile), 1) )
genuine_matches = len(challenge_pairs)
impostor_matches = int(genuine_matches * ratio)
total_matches = genuine_matches + impostor_matches
if verbose:
print('{} genuine matches and {} impostor matches will be selected'.format(genuine_matches, impostor_matches))
while True:
pair = random_combination(image_files, 2)
left_bname = os.path.basename(pair[0])
right_bname = os.path.basename(pair[1])
if left_bname[1:] == right_bname[1:]:
continue
else:
challenge_pairs.append( (pair, 0) )
if len(challenge_pairs) >= total_matches:
break
competitions = {
('NIST', 'SD04', '_'): (challenge_pairs, image_files)
}
return competitions
| true | true |
f72f69d7416486496c8233c0c189ee6defb0f939 | 834 | py | Python | sketches/automata_00/components.py | heerdyes/raspi-art | 2e38c1926b6a6f4c745e0629b193d9c3c15acc22 | [
"MIT"
] | 1 | 2021-02-02T12:36:07.000Z | 2021-02-02T12:36:07.000Z | sketches/automata_00/components.py | heerdyes/raspi-art | 2e38c1926b6a6f4c745e0629b193d9c3c15acc22 | [
"MIT"
] | null | null | null | sketches/automata_00/components.py | heerdyes/raspi-art | 2e38c1926b6a6f4c745e0629b193d9c3c15acc22 | [
"MIT"
] | null | null | null | class FSM:
def __init__(self,states,alphabet,transitionmatrix,currstate):
self.S=states
self.A=alphabet
self.TM=transitionmatrix
self.currstate=currstate
def accept(self,sym):
if sym not in self.A:
return
symi=self.A.index(sym)
if self.TM[currstate][symi]:
self.currstate=self.TM[currstate][symi]
else:
raise Exception('undefined transition delta(%s,%s)'%(self.currstate,sym))
class FSMView:
def __init__(self,x,y,w,h,bg,fg,fsm):
self.x=x
self.y=y
self.w=w
self.h=h
self.bg=bg
self.fg=fg
self.fsm=fsm
def render():
fill(self.bg)
stroke(self.fg)
rect(self.x,self.y,self.w,self.h)
def transition(evt):
pass
| 23.828571 | 85 | 0.551559 | class FSM:
def __init__(self,states,alphabet,transitionmatrix,currstate):
self.S=states
self.A=alphabet
self.TM=transitionmatrix
self.currstate=currstate
def accept(self,sym):
if sym not in self.A:
return
symi=self.A.index(sym)
if self.TM[currstate][symi]:
self.currstate=self.TM[currstate][symi]
else:
raise Exception('undefined transition delta(%s,%s)'%(self.currstate,sym))
class FSMView:
def __init__(self,x,y,w,h,bg,fg,fsm):
self.x=x
self.y=y
self.w=w
self.h=h
self.bg=bg
self.fg=fg
self.fsm=fsm
def render():
fill(self.bg)
stroke(self.fg)
rect(self.x,self.y,self.w,self.h)
def transition(evt):
pass
| true | true |
f72f6b94236d1a6cd8a9243530cbeedf5ab82dd6 | 763 | py | Python | yaml/anchors-and-aliases/yaml_same_ids.py | progala/ttl255.com | 100ae0e96ab7a9f72d0c18d985de2311a517878f | [
"MIT"
] | 32 | 2018-05-28T13:35:49.000Z | 2022-03-05T23:18:32.000Z | yaml/anchors-and-aliases/yaml_same_ids.py | progala/ttl255.com | 100ae0e96ab7a9f72d0c18d985de2311a517878f | [
"MIT"
] | 1 | 2021-09-23T23:32:50.000Z | 2021-09-23T23:32:50.000Z | yaml/anchors-and-aliases/yaml_same_ids.py | progala/ttl255.com | 100ae0e96ab7a9f72d0c18d985de2311a517878f | [
"MIT"
] | 18 | 2019-02-19T22:36:45.000Z | 2022-02-12T19:13:35.000Z | # yaml_same_ids.py
import yaml
interfaces = dict(
Ethernet1=dict(description="Uplink to core-1", speed=1000, mtu=9000),
Ethernet2=dict(description="Uplink to core-2", speed=1000, mtu=9000),
)
prop_vals = ["pim", "ptp", "lldp"]
interfaces["Ethernet1"]["properties"] = prop_vals
interfaces["Ethernet2"]["properties"] = prop_vals
# Show IDs referenced by "properties" key
print("Ethernet1 properties object id:", id(interfaces["Ethernet1"]["properties"]))
print("Ethernet2 properties object id:", id(interfaces["Ethernet2"]["properties"]))
# Dump YAML to stdout
print("\n##### Resulting YAML:\n")
print(yaml.safe_dump(interfaces))
# Dump YAML to file
with open("yaml_files/interfaces_same_ids.yml", "w") as fout:
yaml.safe_dump(interfaces, fout)
| 28.259259 | 83 | 0.720839 |
import yaml
interfaces = dict(
Ethernet1=dict(description="Uplink to core-1", speed=1000, mtu=9000),
Ethernet2=dict(description="Uplink to core-2", speed=1000, mtu=9000),
)
prop_vals = ["pim", "ptp", "lldp"]
interfaces["Ethernet1"]["properties"] = prop_vals
interfaces["Ethernet2"]["properties"] = prop_vals
print("Ethernet1 properties object id:", id(interfaces["Ethernet1"]["properties"]))
print("Ethernet2 properties object id:", id(interfaces["Ethernet2"]["properties"]))
print("\n##### Resulting YAML:\n")
print(yaml.safe_dump(interfaces))
with open("yaml_files/interfaces_same_ids.yml", "w") as fout:
yaml.safe_dump(interfaces, fout)
| true | true |
f72f6d12c31fc8108305490ba1c038179074925b | 1,624 | py | Python | leet_code_array/mid_questions/3sum_cloest.py | IvanFan/leetcode-python | 72a12a107681cc5f09f1f88537c5b0741f0818a4 | [
"MIT"
] | null | null | null | leet_code_array/mid_questions/3sum_cloest.py | IvanFan/leetcode-python | 72a12a107681cc5f09f1f88537c5b0741f0818a4 | [
"MIT"
] | null | null | null | leet_code_array/mid_questions/3sum_cloest.py | IvanFan/leetcode-python | 72a12a107681cc5f09f1f88537c5b0741f0818a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Solution:
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
cand = 0
mindis = 9999
nl = len(nums)
if nl == 3:
return sum(nums)
nums.sort()
for x in range(nl-2):
print('x:',x)
l = x+1
r = nl -1
curmax = nums[x] + nums[r] + nums[r-1]
print('curmax:', curmax)
curmin = nums[x] + nums[l] + nums[l+1]
print('curmin:', curmin)
print('dis max', abs(target - curmax))
print('dis min', abs(target - curmin))
print('mindis', mindis)
if curmax < target:
if mindis > abs(target - curmax):
mindis = abs(target - curmax)
cand = curmax
continue
if curmin > target:
if mindis > abs(target - curmin):
mindis = abs(target - curmin)
cand = curmin
continue
while l < r:
cursum = nums[x] + nums[l] + nums[r]
print('x:',x,'l:',l,'r:',r, 'sum:',cursum)
if abs(target - cursum) < mindis:
mindis =abs(target - cursum)
cand = cursum
if cursum < target:
l += 1
elif cursum > target:
r -= 1
elif cursum == target:
return cursum
return cand
| 32.48 | 58 | 0.404557 |
class Solution:
def threeSumClosest(self, nums, target):
cand = 0
mindis = 9999
nl = len(nums)
if nl == 3:
return sum(nums)
nums.sort()
for x in range(nl-2):
print('x:',x)
l = x+1
r = nl -1
curmax = nums[x] + nums[r] + nums[r-1]
print('curmax:', curmax)
curmin = nums[x] + nums[l] + nums[l+1]
print('curmin:', curmin)
print('dis max', abs(target - curmax))
print('dis min', abs(target - curmin))
print('mindis', mindis)
if curmax < target:
if mindis > abs(target - curmax):
mindis = abs(target - curmax)
cand = curmax
continue
if curmin > target:
if mindis > abs(target - curmin):
mindis = abs(target - curmin)
cand = curmin
continue
while l < r:
cursum = nums[x] + nums[l] + nums[r]
print('x:',x,'l:',l,'r:',r, 'sum:',cursum)
if abs(target - cursum) < mindis:
mindis =abs(target - cursum)
cand = cursum
if cursum < target:
l += 1
elif cursum > target:
r -= 1
elif cursum == target:
return cursum
return cand
| true | true |
f72f6ddb7ac453475965a5a9f8b29db1c31fc185 | 3,126 | py | Python | keyboardRunCar.py | Adhunikr13/FORCAS | 1569a09f146df525b98caa631cb2c806346ba16b | [
"CC0-1.0",
"MIT"
] | null | null | null | keyboardRunCar.py | Adhunikr13/FORCAS | 1569a09f146df525b98caa631cb2c806346ba16b | [
"CC0-1.0",
"MIT"
] | null | null | null | keyboardRunCar.py | Adhunikr13/FORCAS | 1569a09f146df525b98caa631cb2c806346ba16b | [
"CC0-1.0",
"MIT"
] | null | null | null | import time
import RPi.GPIO as GPIO
import sys
from pynput import keyboard
import csv
##from termios import tcflush, TCIOFLUSH, TCIFLUSH
from multiprocessing import Process
from datetime import datetime
GPIO.cleanup()
Forward = 17
Backward = 27
Left = 23
Right = 24
sleeptime = 0.25
speed = 0.5
mode=GPIO.getmode()
GPIO.setmode(GPIO.BCM)
GPIO.setup(Forward, GPIO.OUT)
GPIO.setup(Backward, GPIO.OUT)
GPIO.setup(Left, GPIO.OUT)
GPIO.setup(Right, GPIO.OUT)
def forward(x):
GPIO.output(Forward, GPIO.HIGH)
print("Moving Forward")
time.sleep(x)
## sys.stdout.flush();
## tcflush(sys.stdin, TCIFLUSH)
GPIO.output(Forward, GPIO.LOW)
def left(x):
GPIO.output(Left, GPIO.HIGH)
print("Moving Left")
time.sleep(x)
## sys.stdout.flush();
## tcflush(sys.stdin, TCIFLUSH)
GPIO.output(Left, GPIO.LOW)
def right(x):
GPIO.output(Right, GPIO.HIGH)
print("Moving Right")
time.sleep(x)
## sys.stdout.flush();
## tcflush(sys.stdin, TCIFLUSH)
GPIO.output(Right, GPIO.LOW)
def reverse(x):
GPIO.output(Backward, GPIO.HIGH)
print("Moving Backward")
time.sleep(x)
## sys.stdout.flush();
## tcflush(sys.stdin, TCIFLUSH)
GPIO.output(Backward, GPIO.LOW)
'''
def runInParallel(*fns):
proc = []
for fn in fns:
global p
p = Process(target=fn, args=(speed,))
p.start()
proc.append(p)
#for p in proc:
# p.join()
while not p.empty():
p.get()
'''
def on_press(key):
try:
with open("controls.csv","a") as filename:
fieldnames = ['images','controls']
writer = csv.DictWriter(filename, fieldnames=fieldnames)
if (key.char == 's'):
print("speed")
reverse(sleeptime)
elif(key.char == 'w'):
forward(sleeptime)
elif(key.char == 'a'):
left(sleeptime)
elif(key.char == 'd'):
right(sleeptime)
elif(key.char == 'q'):
'''runInParallel(forward,left)
p.terminate()'''
forward(sleeptime)
left(sleeptime+0.10)
'''p1 = Process(target=forward, args=(speed,))
p1.start()
p2 = Process(target=left, args=(speed,))
p2.start()
#p1.join()
#p2.join()
p1.get()
p2.get()
#p1.terminate()'''
elif(key.char == 'e'):
forward(sleeptime)
right(sleeptime+0.10)
timestamp = datetime.now()
writer.writerows([{'images': str(timestamp), 'controls': key.char}])
except AttributeError: \
print('special key {0} pressed'.format(
key))
def on_release(key):
if (key == keyboard.Key.esc):
return False
if __name__ =='__main__':
try:
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
finally:
print("closed")
GPIO.cleanup()
| 25.622951 | 80 | 0.546065 | import time
import RPi.GPIO as GPIO
import sys
from pynput import keyboard
import csv
e import datetime
GPIO.cleanup()
Forward = 17
Backward = 27
Left = 23
Right = 24
sleeptime = 0.25
speed = 0.5
mode=GPIO.getmode()
GPIO.setmode(GPIO.BCM)
GPIO.setup(Forward, GPIO.OUT)
GPIO.setup(Backward, GPIO.OUT)
GPIO.setup(Left, GPIO.OUT)
GPIO.setup(Right, GPIO.OUT)
def forward(x):
GPIO.output(Forward, GPIO.HIGH)
print("Moving Forward")
time.sleep(x)
IO.output(Left, GPIO.HIGH)
print("Moving Left")
time.sleep(x)
.output(Right, GPIO.HIGH)
print("Moving Right")
time.sleep(x)
PIO.output(Backward, GPIO.HIGH)
print("Moving Backward")
time.sleep(x)
try:
with open("controls.csv","a") as filename:
fieldnames = ['images','controls']
writer = csv.DictWriter(filename, fieldnames=fieldnames)
if (key.char == 's'):
print("speed")
reverse(sleeptime)
elif(key.char == 'w'):
forward(sleeptime)
elif(key.char == 'a'):
left(sleeptime)
elif(key.char == 'd'):
right(sleeptime)
elif(key.char == 'q'):
'''runInParallel(forward,left)
p.terminate()'''
forward(sleeptime)
left(sleeptime+0.10)
'''p1 = Process(target=forward, args=(speed,))
p1.start()
p2 = Process(target=left, args=(speed,))
p2.start()
#p1.join()
#p2.join()
p1.get()
p2.get()
#p1.terminate()'''
elif(key.char == 'e'):
forward(sleeptime)
right(sleeptime+0.10)
timestamp = datetime.now()
writer.writerows([{'images': str(timestamp), 'controls': key.char}])
except AttributeError: \
print('special key {0} pressed'.format(
key))
def on_release(key):
if (key == keyboard.Key.esc):
return False
if __name__ =='__main__':
try:
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
finally:
print("closed")
GPIO.cleanup()
| true | true |
f72f6e019ee2fd66663eb569edbe38804ab04fd3 | 575 | py | Python | submissions/tenka1-2012-qualC/i.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/tenka1-2012-qualC/i.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/tenka1-2012-qualC/i.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | import sys
import math
input = sys.stdin.readline
def eratosthenes(limit):
if limit == 1:
return []
A = [i for i in range(2, limit + 1)]
P = []
for i in range(limit):
prime = min(A)
if prime > math.sqrt(limit):
break
P.append(prime)
for j in range(limit):
if j >= len(A):
break
if A[j] % prime == 0:
A.pop(j)
continue
for a in A:
P.append(a)
return P
n = int(input())
ans = len(eratosthenes(n - 1))
print(ans)
| 16.428571 | 40 | 0.464348 | import sys
import math
input = sys.stdin.readline
def eratosthenes(limit):
if limit == 1:
return []
A = [i for i in range(2, limit + 1)]
P = []
for i in range(limit):
prime = min(A)
if prime > math.sqrt(limit):
break
P.append(prime)
for j in range(limit):
if j >= len(A):
break
if A[j] % prime == 0:
A.pop(j)
continue
for a in A:
P.append(a)
return P
n = int(input())
ans = len(eratosthenes(n - 1))
print(ans)
| true | true |
f72f6ffa418102f76a422d0fa5ffd49e955a5898 | 6,071 | py | Python | SSWs.py | rhwhite/rhwhitepackages3 | 91d5677ea57d7cc9a3643708cd8c82a74fb6188d | [
"MIT"
] | null | null | null | SSWs.py | rhwhite/rhwhitepackages3 | 91d5677ea57d7cc9a3643708cd8c82a74fb6188d | [
"MIT"
] | null | null | null | SSWs.py | rhwhite/rhwhitepackages3 | 91d5677ea57d7cc9a3643708cd8c82a74fb6188d | [
"MIT"
] | null | null | null | # Module to search for and get data on SSWs
# Using the definition of Charlton and Polvani (2007):
# Author rachel.white@cantab.net
# Created July 2017
import numpy as np
import xarray as xr
import math
import sys
def adddays(U,itime,ndays):
# Find ndays consecutive days with easterlies
numcons = 0
torun = True
while itime < len(U.time):
if U[itime] > 0:
numcons += 1
else:
numcons = 0
if numcons >= ndays:
return(itime,numcons,False)
itime += 1
return(itime,numcons,True)
def meanSE(N,in0,in1,in2,in3=0):
# Calculate mean and standard error of number of SSWs
# a la Charlton and Polvani (2007)
p0 = float(in0)/float(N)
p1 = float(in1)/float(N)
p2 = float(in2)/float(N)
p3 = float(in3)/float(N)
calcmean = p1 + (2 * p2) + (3 * p3)
calcSE = ((math.sqrt(((0-calcmean)**2 * p0) +
((1-calcmean)**2 * p1) +
((2-calcmean)**2 * p2) +
((3-calcmean)**2 * p3)))
/math.sqrt(N))
return calcmean,calcSE
def findyearSSWs(U,times,count,thresh,lastdate,startdate,toprint,SSWdates):
# find all SSWs in a single year
finalwarmingstart = -1
yearcount = 0
itime = 0
# if U starts below 0, iterate until it isn't!
while U[itime]<0:
itime +=1
while itime < len(U.time):
if U[itime] < 0:
central,end,itime = findend(U,itime,thresh)
if end == -1:
finalwarmingstart = ((times[central]+1) % 365)
else:
SSWdates.append(int(times[central]))
if toprint: print ('SSW, day of year ' +
str((times[central]) % 365))
if lastdate < ((times[central] +1) % 365) < startdate :
# it counts as a final warming
finalwarmingstart = ((times[central]+1) % 365)
else:
count +=1
yearcount +=1
itime +=1
return count,yearcount, finalwarmingstart, SSWdates
def findend(U,itime,thresh):
# find final SSW
centraltime,endtime = -1,-1
if U[itime] < 0:
centraltime = itime
# Find end date
while U[itime] < 0:
itime = itime + 1
if itime >= len(U.time): return (centraltime,-1,itime)
endtime = itime
# Check for final warming: ends after April 30th but started before July
# Add 10 consective easterly days - must occur before April 30th for event
# to count
newtime,numcons,end = adddays(U,itime,thresh)
if end:
return(itime,-1,newtime)
else:
# Event counts. Now add 20 consecutive days
itime,ndays,end = adddays(U,itime,20)
return(centraltime,endtime,itime)
def findSSWs(U,thresh,Obs=False,startyr = 0):
# Find SSWs, print the mean number, the standard error, and
# return the dates
# Created for WACCM daily data
SSWdates = []
toprint = False
SSWyears = []
startdate = 303 # beginning of November
lastdate = 119 # end of April
enddate = 119 # 30th April
count = 0
yearcount = 0
singleyear = 0
doubleyear = 0
tripleyear = 0
final = []
nyears = len(U.time)//365
times = U.time
# Select first year
if Obs:
yearU = U.sel(time=slice(str(startyr) + '-01',str(startyr) + '-04'))
yeartime = times.sel(time=slice(str(startyr) + '-01',
str(startyr) +'-04'))
yeartime = (yeartime.values - np.datetime64('1980-01-01'))/ np.timedelta64(1, 'D')
else:
yearU = U.isel(time=slice(0,120))
yeartime = times[0:120].values
count,yearcount,finalW,SSWdates = findyearSSWs(yearU,yeartime,count,thresh,
lastdate,startdate,
toprint, SSWdates)
if yearcount == 1:
singleyear +=1
#if toprint: print('year 0 1 SSW \n')
SSWyears.append(0)
elif yearcount ==2:
doubleyear +=1
#if toprint: print('year 0 2 SSWs \n')
SSWyears.append(0)
elif yearcount ==3:
tripleyear +=1
SSWyears.append(0)
final.append(finalW)
for iyear in range(0,nyears):
if Obs:
yearU = U.sel(time=slice(str(startyr+iyear) +'-11',
str(startyr+iyear+1) + '-04'))
yeartime = times.sel(time=slice(str(startyr+iyear) + '-11',
str(startyr+iyear+1) +'-04'))
yeartime = ((yeartime.values - np.datetime64('1980-01-01'))/
np.timedelta64(1, 'D'))
else:
yearU = U.isel(time=slice(startdate+(iyear*365),
enddate + ((iyear + 1) * 365)))
yeartime = (times[startdate+(iyear*365):
enddate+((iyear+1)*365)].values)
count,yearcount,finalW,SSWdates = findyearSSWs(
yearU,yeartime,
count,thresh,lastdate,startdate,
toprint,SSWdates)
if yearcount == 1:
singleyear +=1
SSWyears.append(iyear + 1)
#if toprint: print('year ' + str(iyear +1) + ' 1 SSW \n')
elif yearcount ==2:
doubleyear +=1
#if toprint: print('year ' + str(iyear +1) + ' 2 SSWs \n')
SSWyears.append(iyear + 1)
elif yearcount ==3:
tripleyear +=1
SSWyears.append(iyear + 1)
final.append(finalW)
if singleyear + 2 * doubleyear +3 * tripleyear != count:
print(count)
print(singleyear + 2 * doubleyear +3 * tripleyear)
sys.exit("problem with counting, maybe a year with more than 3 SSWs?!")
mean,SE = meanSE(nyears,nyears - singleyear - doubleyear,singleyear,doubleyear)
print ('mean: ' + str(mean) + ' ; s.e.: ' + str(SE) )
return(SSWdates)
| 32.465241 | 90 | 0.530555 |
import numpy as np
import xarray as xr
import math
import sys
def adddays(U,itime,ndays):
numcons = 0
torun = True
while itime < len(U.time):
if U[itime] > 0:
numcons += 1
else:
numcons = 0
if numcons >= ndays:
return(itime,numcons,False)
itime += 1
return(itime,numcons,True)
def meanSE(N,in0,in1,in2,in3=0):
p0 = float(in0)/float(N)
p1 = float(in1)/float(N)
p2 = float(in2)/float(N)
p3 = float(in3)/float(N)
calcmean = p1 + (2 * p2) + (3 * p3)
calcSE = ((math.sqrt(((0-calcmean)**2 * p0) +
((1-calcmean)**2 * p1) +
((2-calcmean)**2 * p2) +
((3-calcmean)**2 * p3)))
/math.sqrt(N))
return calcmean,calcSE
def findyearSSWs(U,times,count,thresh,lastdate,startdate,toprint,SSWdates):
finalwarmingstart = -1
yearcount = 0
itime = 0
while U[itime]<0:
itime +=1
while itime < len(U.time):
if U[itime] < 0:
central,end,itime = findend(U,itime,thresh)
if end == -1:
finalwarmingstart = ((times[central]+1) % 365)
else:
SSWdates.append(int(times[central]))
if toprint: print ('SSW, day of year ' +
str((times[central]) % 365))
if lastdate < ((times[central] +1) % 365) < startdate :
# it counts as a final warming
finalwarmingstart = ((times[central]+1) % 365)
else:
count +=1
yearcount +=1
itime +=1
return count,yearcount, finalwarmingstart, SSWdates
def findend(U,itime,thresh):
# find final SSW
centraltime,endtime = -1,-1
if U[itime] < 0:
centraltime = itime
# Find end date
while U[itime] < 0:
itime = itime + 1
if itime >= len(U.time): return (centraltime,-1,itime)
endtime = itime
# Check for final warming: ends after April 30th but started before July
# Add 10 consective easterly days - must occur before April 30th for event
# to count
newtime,numcons,end = adddays(U,itime,thresh)
if end:
return(itime,-1,newtime)
else:
# Event counts. Now add 20 consecutive days
itime,ndays,end = adddays(U,itime,20)
return(centraltime,endtime,itime)
def findSSWs(U,thresh,Obs=False,startyr = 0):
# Find SSWs, print the mean number, the standard error, and
# return the dates
# Created for WACCM daily data
SSWdates = []
toprint = False
SSWyears = []
startdate = 303 # beginning of November
lastdate = 119 # end of April
enddate = 119 # 30th April
count = 0
yearcount = 0
singleyear = 0
doubleyear = 0
tripleyear = 0
final = []
nyears = len(U.time)//365
times = U.time
# Select first year
if Obs:
yearU = U.sel(time=slice(str(startyr) + '-01',str(startyr) + '-04'))
yeartime = times.sel(time=slice(str(startyr) + '-01',
str(startyr) +'-04'))
yeartime = (yeartime.values - np.datetime64('1980-01-01'))/ np.timedelta64(1, 'D')
else:
yearU = U.isel(time=slice(0,120))
yeartime = times[0:120].values
count,yearcount,finalW,SSWdates = findyearSSWs(yearU,yeartime,count,thresh,
lastdate,startdate,
toprint, SSWdates)
if yearcount == 1:
singleyear +=1
#if toprint: print('year 0 1 SSW \n')
SSWyears.append(0)
elif yearcount ==2:
doubleyear +=1
#if toprint: print('year 0 2 SSWs \n')
SSWyears.append(0)
elif yearcount ==3:
tripleyear +=1
SSWyears.append(0)
final.append(finalW)
for iyear in range(0,nyears):
if Obs:
yearU = U.sel(time=slice(str(startyr+iyear) +'-11',
str(startyr+iyear+1) + '-04'))
yeartime = times.sel(time=slice(str(startyr+iyear) + '-11',
str(startyr+iyear+1) +'-04'))
yeartime = ((yeartime.values - np.datetime64('1980-01-01'))/
np.timedelta64(1, 'D'))
else:
yearU = U.isel(time=slice(startdate+(iyear*365),
enddate + ((iyear + 1) * 365)))
yeartime = (times[startdate+(iyear*365):
enddate+((iyear+1)*365)].values)
count,yearcount,finalW,SSWdates = findyearSSWs(
yearU,yeartime,
count,thresh,lastdate,startdate,
toprint,SSWdates)
if yearcount == 1:
singleyear +=1
SSWyears.append(iyear + 1)
#if toprint: print('year ' + str(iyear +1) + ' 1 SSW \n')
elif yearcount ==2:
doubleyear +=1
#if toprint: print('year ' + str(iyear +1) + ' 2 SSWs \n')
SSWyears.append(iyear + 1)
elif yearcount ==3:
tripleyear +=1
SSWyears.append(iyear + 1)
final.append(finalW)
if singleyear + 2 * doubleyear +3 * tripleyear != count:
print(count)
print(singleyear + 2 * doubleyear +3 * tripleyear)
sys.exit("problem with counting, maybe a year with more than 3 SSWs?!")
mean,SE = meanSE(nyears,nyears - singleyear - doubleyear,singleyear,doubleyear)
print ('mean: ' + str(mean) + ' ; s.e.: ' + str(SE) )
return(SSWdates)
| true | true |
f72f7245256e6106b683dd052df2b3c12248710d | 3,127 | py | Python | billy/web/api/urls.py | backwardn/billy | 07ac788d25a6c79d03dd0e3d55459bbb55e22439 | [
"BSD-3-Clause"
] | 33 | 2016-11-05T07:25:48.000Z | 2022-01-31T03:40:43.000Z | billy/web/api/urls.py | backwardn/billy | 07ac788d25a6c79d03dd0e3d55459bbb55e22439 | [
"BSD-3-Clause"
] | 16 | 2015-02-05T21:25:58.000Z | 2015-09-18T20:27:06.000Z | billy/web/api/urls.py | backwardn/billy | 07ac788d25a6c79d03dd0e3d55459bbb55e22439 | [
"BSD-3-Clause"
] | 22 | 2015-03-23T07:13:20.000Z | 2016-06-10T04:41:06.000Z | import datetime
from django.conf import settings
from django.conf.urls import url
from django.http import HttpResponse
import piston.resource
from piston.emitters import Emitter
from billy.web.api import handlers
from billy.web.api.emitters import BillyJSONEmitter
class CORSResource(piston.resource.Resource):
def __call__(self, *args, **kwargs):
r = super(CORSResource, self).__call__(*args, **kwargs)
r['Access-Control-Allow-Origin'] = '*'
return r
authorizer = None
Resource = CORSResource
Emitter.register('json', BillyJSONEmitter, 'application/json; charset=utf-8')
Emitter.unregister('yaml')
Emitter.unregister('xml')
Emitter.unregister('django')
Emitter.unregister('pickle')
all_metadata_handler = Resource(handlers.AllMetadataHandler,
authentication=authorizer)
metadata_handler = Resource(handlers.MetadataHandler,
authentication=authorizer)
bill_handler = Resource(handlers.BillHandler,
authentication=authorizer)
bill_search_handler = Resource(handlers.BillSearchHandler,
authentication=authorizer)
legislator_handler = Resource(handlers.LegislatorHandler,
authentication=authorizer)
legsearch_handler = Resource(handlers.LegislatorSearchHandler,
authentication=authorizer)
committee_handler = Resource(handlers.CommitteeHandler,
authentication=authorizer)
committee_search_handler = Resource(handlers.CommitteeSearchHandler,
authentication=authorizer)
legislator_geo_handler = Resource(handlers.LegislatorGeoHandler,
authentication=authorizer)
district_handler = Resource(handlers.DistrictHandler,
authentication=authorizer)
boundary_handler = Resource(handlers.BoundaryHandler,
authentication=authorizer)
urlpatterns = [
# metadata
url(r'^v1/metadata/$', all_metadata_handler),
url(r'^v1/metadata/(?P<abbr>[a-zA-Z-]+)/$', metadata_handler),
# bills, including three urls for bill handler
url(r'^v1/bills/(?P<abbr>[a-zA-Z-]+)/(?P<session>.+)/'
r'(?P<chamber>upper|lower)/(?P<bill_id>.+)/$', bill_handler),
url(r'^v1/bills/(?P<abbr>[a-zA-Z-]+)/(?P<session>.+)/'
r'(?P<bill_id>.+)/$', bill_handler),
url(r'^v1/bills/(?P<billy_bill_id>[A-Z-]+B\d{8})/', bill_handler),
url(r'^v1/bills/$', bill_search_handler),
url(r'^v1/legislators/(?P<id>[A-Z-]+L\d{6})/$', legislator_handler),
url(r'^v1/legislators/$', legsearch_handler),
url(r'v1/legislators/geo/$', legislator_geo_handler),
url(r'^v1/committees/(?P<id>[A-Z-]+C\d{6})/$', committee_handler),
url(r'^v1/committees/$', committee_search_handler),
# districts & boundaries
url(r'v1/districts/(?P<abbr>[a-zA-Z-]+)/$',
district_handler),
url(r'v1/districts/(?P<abbr>[a-zA-Z-]+)/(?P<chamber>upper|lower)/$',
district_handler),
url(r'v1/districts/boundary/(?P<boundary_id>.+)/$', boundary_handler),
]
| 39.582278 | 77 | 0.657179 | import datetime
from django.conf import settings
from django.conf.urls import url
from django.http import HttpResponse
import piston.resource
from piston.emitters import Emitter
from billy.web.api import handlers
from billy.web.api.emitters import BillyJSONEmitter
class CORSResource(piston.resource.Resource):
def __call__(self, *args, **kwargs):
r = super(CORSResource, self).__call__(*args, **kwargs)
r['Access-Control-Allow-Origin'] = '*'
return r
authorizer = None
Resource = CORSResource
Emitter.register('json', BillyJSONEmitter, 'application/json; charset=utf-8')
Emitter.unregister('yaml')
Emitter.unregister('xml')
Emitter.unregister('django')
Emitter.unregister('pickle')
all_metadata_handler = Resource(handlers.AllMetadataHandler,
authentication=authorizer)
metadata_handler = Resource(handlers.MetadataHandler,
authentication=authorizer)
bill_handler = Resource(handlers.BillHandler,
authentication=authorizer)
bill_search_handler = Resource(handlers.BillSearchHandler,
authentication=authorizer)
legislator_handler = Resource(handlers.LegislatorHandler,
authentication=authorizer)
legsearch_handler = Resource(handlers.LegislatorSearchHandler,
authentication=authorizer)
committee_handler = Resource(handlers.CommitteeHandler,
authentication=authorizer)
committee_search_handler = Resource(handlers.CommitteeSearchHandler,
authentication=authorizer)
legislator_geo_handler = Resource(handlers.LegislatorGeoHandler,
authentication=authorizer)
district_handler = Resource(handlers.DistrictHandler,
authentication=authorizer)
boundary_handler = Resource(handlers.BoundaryHandler,
authentication=authorizer)
urlpatterns = [
url(r'^v1/metadata/$', all_metadata_handler),
url(r'^v1/metadata/(?P<abbr>[a-zA-Z-]+)/$', metadata_handler),
url(r'^v1/bills/(?P<abbr>[a-zA-Z-]+)/(?P<session>.+)/'
r'(?P<chamber>upper|lower)/(?P<bill_id>.+)/$', bill_handler),
url(r'^v1/bills/(?P<abbr>[a-zA-Z-]+)/(?P<session>.+)/'
r'(?P<bill_id>.+)/$', bill_handler),
url(r'^v1/bills/(?P<billy_bill_id>[A-Z-]+B\d{8})/', bill_handler),
url(r'^v1/bills/$', bill_search_handler),
url(r'^v1/legislators/(?P<id>[A-Z-]+L\d{6})/$', legislator_handler),
url(r'^v1/legislators/$', legsearch_handler),
url(r'v1/legislators/geo/$', legislator_geo_handler),
url(r'^v1/committees/(?P<id>[A-Z-]+C\d{6})/$', committee_handler),
url(r'^v1/committees/$', committee_search_handler),
url(r'v1/districts/(?P<abbr>[a-zA-Z-]+)/$',
district_handler),
url(r'v1/districts/(?P<abbr>[a-zA-Z-]+)/(?P<chamber>upper|lower)/$',
district_handler),
url(r'v1/districts/boundary/(?P<boundary_id>.+)/$', boundary_handler),
]
| true | true |
f72f74c949c35b437d4684648a5f2909b9268826 | 2,083 | py | Python | generated/nifake/setup.py | kurtp-ni/nimi-python | 4f0bccce67a69ca9f46a8ab9b07dc26ca0049729 | [
"MIT"
] | 88 | 2017-08-03T18:07:27.000Z | 2022-01-28T13:55:06.000Z | generated/nifake/setup.py | kurtp-ni/nimi-python | 4f0bccce67a69ca9f46a8ab9b07dc26ca0049729 | [
"MIT"
] | 1,310 | 2017-07-11T18:42:44.000Z | 2022-03-28T21:03:57.000Z | generated/nifake/setup.py | kurtp-ni/nimi-python | 4f0bccce67a69ca9f46a8ab9b07dc26ca0049729 | [
"MIT"
] | 70 | 2017-07-25T14:52:53.000Z | 2022-03-31T14:14:23.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file was generated
from setuptools.command.test import test as test_command
from setuptools import setup
class PyTest(test_command):
def finalize_options(self):
test_command.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
pytest.main(self.test_args)
pypi_name = 'nifake'
def read_contents(file_to_read):
with open(file_to_read, 'r') as f:
return f.read()
setup(
name=pypi_name,
zip_safe=True,
version='1.4.2.dev0',
description='NI-FAKE Python API',
long_description=read_contents('README.rst'),
long_description_content_type='text/x-rst',
author='National Instruments',
author_email="opensource@ni.com",
url="https://github.com/ni/nimi-python",
maintainer="National Instruments",
maintainer_email="opensource@ni.com",
keywords=['nifake'],
license='MIT',
include_package_data=True,
packages=['nifake'],
install_requires=[
'enum34;python_version<"3.4"',
'singledispatch;python_version<"3.4"',
'hightime>=0.2.0',
'nitclk',
],
setup_requires=['pytest-runner', ],
tests_require=['pytest'],
test_suite='tests',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Manufacturing",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: System :: Hardware :: Hardware Drivers"
],
cmdclass={'test': PyTest},
package_data={pypi_name: ['VERSION']},
)
| 28.534247 | 70 | 0.62698 |
from setuptools.command.test import test as test_command
from setuptools import setup
class PyTest(test_command):
def finalize_options(self):
test_command.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
pytest.main(self.test_args)
pypi_name = 'nifake'
def read_contents(file_to_read):
with open(file_to_read, 'r') as f:
return f.read()
setup(
name=pypi_name,
zip_safe=True,
version='1.4.2.dev0',
description='NI-FAKE Python API',
long_description=read_contents('README.rst'),
long_description_content_type='text/x-rst',
author='National Instruments',
author_email="opensource@ni.com",
url="https://github.com/ni/nimi-python",
maintainer="National Instruments",
maintainer_email="opensource@ni.com",
keywords=['nifake'],
license='MIT',
include_package_data=True,
packages=['nifake'],
install_requires=[
'enum34;python_version<"3.4"',
'singledispatch;python_version<"3.4"',
'hightime>=0.2.0',
'nitclk',
],
setup_requires=['pytest-runner', ],
tests_require=['pytest'],
test_suite='tests',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Manufacturing",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: System :: Hardware :: Hardware Drivers"
],
cmdclass={'test': PyTest},
package_data={pypi_name: ['VERSION']},
)
| true | true |
f72f74e9499225c66ece09f508b8f7dac132b713 | 1,812 | py | Python | experiments/analysis/edge_bound/training_nlf/camera_nlf_training.py | haihabi/GenerativeCRB | d53c01bec7214bb087fbe17dba241e12eb60858e | [
"MIT"
] | null | null | null | experiments/analysis/edge_bound/training_nlf/camera_nlf_training.py | haihabi/GenerativeCRB | d53c01bec7214bb087fbe17dba241e12eb60858e | [
"MIT"
] | null | null | null | experiments/analysis/edge_bound/training_nlf/camera_nlf_training.py | haihabi/GenerativeCRB | d53c01bec7214bb087fbe17dba241e12eb60858e | [
"MIT"
] | null | null | null | import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from experiments.data_model.image_denoising.noise_dataset import NoiseDataSet
from experiments.models_architecture.camera_nlf_flow import generate_nlf_flow
def train_step(in_noise, in_cond_vector):
opt.zero_grad()
loss = flow.nll_mean(in_noise, in_cond_vector)
loss.backward()
loss_list.append(loss.item())
opt.step()
if __name__ == '__main__':
lr = 1e-4
patch_size = 32
n_epochs = 5
batch_size = 32
n_iter_per_epoch = 1000
input_shape = [4, patch_size, patch_size]
trained_alpha = True
flow = generate_nlf_flow(input_shape, trained_alpha)
opt = torch.optim.Adam(flow.parameters(), lr=lr)
nds = NoiseDataSet("/data/datasets/SIDD_Medium_Raw/Data", n_pat_per_im=5000)
nds_dl = DataLoader(nds, batch_size=batch_size, shuffle=True)
loss_best = np.inf
for n in range(n_epochs):
loss_list = []
for noise, clean, cam, iso in tqdm(nds_dl):
noise, clean, cam, iso = noise.cuda(), clean.cuda(), cam.long().cuda(), iso.cuda()
clean = torch.permute(clean, (0, 3, 1, 2)).float()
noise = torch.permute(noise, (0, 3, 1, 2)).float()
cond_vector = [clean, iso, cam]
train_step(noise, cond_vector)
loss_current = sum(loss_list) / len(loss_list)
print(loss_current)
if loss_current < loss_best:
flow_name = "flow_nlf_best.pt" if trained_alpha else "flow_gaussian_best.pt"
torch.save(flow.state_dict(), f"./{flow_name}")
loss_best = loss_current
print(f"Update Best To:{loss_current}")
flow_name = "flow_nlf.pt" if trained_alpha else "flow_gaussian.pt"
torch.save(flow.state_dict(), f"./{flow_name}")
| 34.846154 | 94 | 0.667219 | import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from experiments.data_model.image_denoising.noise_dataset import NoiseDataSet
from experiments.models_architecture.camera_nlf_flow import generate_nlf_flow
def train_step(in_noise, in_cond_vector):
opt.zero_grad()
loss = flow.nll_mean(in_noise, in_cond_vector)
loss.backward()
loss_list.append(loss.item())
opt.step()
if __name__ == '__main__':
lr = 1e-4
patch_size = 32
n_epochs = 5
batch_size = 32
n_iter_per_epoch = 1000
input_shape = [4, patch_size, patch_size]
trained_alpha = True
flow = generate_nlf_flow(input_shape, trained_alpha)
opt = torch.optim.Adam(flow.parameters(), lr=lr)
nds = NoiseDataSet("/data/datasets/SIDD_Medium_Raw/Data", n_pat_per_im=5000)
nds_dl = DataLoader(nds, batch_size=batch_size, shuffle=True)
loss_best = np.inf
for n in range(n_epochs):
loss_list = []
for noise, clean, cam, iso in tqdm(nds_dl):
noise, clean, cam, iso = noise.cuda(), clean.cuda(), cam.long().cuda(), iso.cuda()
clean = torch.permute(clean, (0, 3, 1, 2)).float()
noise = torch.permute(noise, (0, 3, 1, 2)).float()
cond_vector = [clean, iso, cam]
train_step(noise, cond_vector)
loss_current = sum(loss_list) / len(loss_list)
print(loss_current)
if loss_current < loss_best:
flow_name = "flow_nlf_best.pt" if trained_alpha else "flow_gaussian_best.pt"
torch.save(flow.state_dict(), f"./{flow_name}")
loss_best = loss_current
print(f"Update Best To:{loss_current}")
flow_name = "flow_nlf.pt" if trained_alpha else "flow_gaussian.pt"
torch.save(flow.state_dict(), f"./{flow_name}")
| true | true |
f72f75554d82d37ec9ea899454d23be4c88365df | 4,843 | py | Python | torch/distributed/elastic/metrics/__init__.py | MagiaSN/pytorch | 7513455c743d3d644b45a804902c1a0d14b69f45 | [
"Intel"
] | 1 | 2021-04-11T08:27:46.000Z | 2021-04-11T08:27:46.000Z | torch/distributed/elastic/metrics/__init__.py | MagiaSN/pytorch | 7513455c743d3d644b45a804902c1a0d14b69f45 | [
"Intel"
] | 1 | 2022-01-18T12:17:29.000Z | 2022-01-18T12:17:29.000Z | torch/distributed/elastic/metrics/__init__.py | MagiaSN/pytorch | 7513455c743d3d644b45a804902c1a0d14b69f45 | [
"Intel"
] | 2 | 2021-07-02T10:18:21.000Z | 2021-08-18T10:10:28.000Z | #!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Metrics API
**Overview**:
The metrics API in torchelastic is used to publish telemetry metrics.
It is designed to be used by torchelastic's internal modules to
publish metrics for the end user with the goal of increasing visibility
and helping with debugging. However you may use the same API in your
jobs to publish metrics to the same metrics ``sink``.
A ``metric`` can be thought of as timeseries data
and is uniquely identified by the string-valued tuple
``(metric_group, metric_name)``.
torchelastic makes no assumptions about what a ``metric_group`` is
and what relationship it has with ``metric_name``. It is totally up
to the user to use these two fields to uniquely identify a metric.
.. note:: The metric group ``torchelastic`` is reserved by torchelastic for
platform level metrics that it produces.
For instance torchelastic may output the latency (in milliseconds)
of a re-rendezvous operation from the agent as
``(torchelastic, agent.rendezvous.duration.ms)``
A sensible way to use metric groups is to map them to a stage or module
in your job. You may also encode certain high level properties
the job such as the region or stage (dev vs prod).
**Publish Metrics**:
Using torchelastic's metrics API is similar to using python's logging
framework. You first have to configure a metrics handler before
trying to add metric data.
The example below measures the latency for the ``calculate()`` function.
::
import time
import torch.distributed.elastic.metrics as metrics
# makes all metrics other than the one from "my_module" to go /dev/null
metrics.configure(metrics.NullMetricsHandler())
metrics.configure(metrics.ConsoleMetricsHandler(), "my_module")
def my_method():
start = time.time()
calculate()
end = time.time()
metrics.put_metric("calculate_latency", int(end-start), "my_module")
You may also use the torch.distributed.elastic.metrics.prof` decorator
to conveniently and succinctly profile functions
::
# -- in module examples.foobar --
import torch.distributed.elastic.metrics as metrics
metrics.configure(metrics.ConsoleMetricsHandler(), "foobar")
metrics.configure(metrics.ConsoleMetricsHandler(), "Bar")
@metrics.prof
def foo():
pass
class Bar():
@metrics.prof
def baz():
pass
``@metrics.prof`` will publish the following metrics
::
<leaf_module or classname>.success - 1 if the function finished successfully
<leaf_module or classname>.failure - 1 if the function threw an exception
<leaf_module or classname>.duration.ms - function duration in milliseconds
**Configuring Metrics Handler**:
`torch.distributed.elastic.metrics.MetricHandler` is responsible for emitting
the added metric values to a particular destination. Metric groups can be
configured with different metric handlers.
By default torchelastic emits all metrics to ``/dev/null``.
By adding the following configuration metrics,
``torchelastic`` and ``my_app`` metric groups will be printed out to
console.
::
import torch.distributed.elastic.metrics as metrics
metrics.configure(metrics.ConsoleMetricHandler(), group = "torchelastic")
metrics.configure(metrics.ConsoleMetricHandler(), group = "my_app")
**Writing a Custom Metric Handler**:
If you want your metrics to be emitted to a custom location, implement
the `torch.distributed.elastic.metrics.MetricHandler` interface
and configure your job to use your custom metric handler.
Below is a toy example that prints the metrics to ``stdout``
::
import torch.distributed.elastic.metrics as metrics
class StdoutMetricHandler(metrics.MetricHandler):
def emit(self, metric_data):
ts = metric_data.timestamp
group = metric_data.group_name
name = metric_data.name
value = metric_data.value
print(f"[{ts}][{group}]: {name}={value}")
metrics.configure(StdoutMetricHandler(), group="my_app")
Now all metrics in the group ``my_app`` will be printed to stdout as:
::
[1574213883.4182858][my_app]: my_metric=<value>
[1574213940.5237644][my_app]: my_metric=<value>
"""
from typing import Optional
from .api import ( # noqa F401
ConsoleMetricHandler,
MetricData,
MetricHandler,
MetricsConfig,
NullMetricHandler,
configure,
get_elapsed_time_ms,
getStream,
prof,
profile,
publish_metric,
put_metric,
)
def initialize_metrics(cfg: Optional[MetricsConfig] = None):
pass
try:
from torch.distributed.elastic.metrics.static_init import * # type: ignore # noqa: F401 F403
except ModuleNotFoundError:
pass
| 29.530488 | 97 | 0.742308 |
from typing import Optional
from .api import (
ConsoleMetricHandler,
MetricData,
MetricHandler,
MetricsConfig,
NullMetricHandler,
configure,
get_elapsed_time_ms,
getStream,
prof,
profile,
publish_metric,
put_metric,
)
def initialize_metrics(cfg: Optional[MetricsConfig] = None):
pass
try:
from torch.distributed.elastic.metrics.static_init import * FoundError:
pass
| true | true |
f72f76abe1221bf6bd92370f84f0906ef075e999 | 3,133 | py | Python | src/dataset_builder.py | elangovana/large-scale-ptm-ppi | cc835df915d12dd20c35f9cea5e40365200a6d3d | [
"MIT"
] | 1 | 2022-02-25T22:06:39.000Z | 2022-02-25T22:06:39.000Z | src/dataset_builder.py | elangovana/ppi-aimed | cc835df915d12dd20c35f9cea5e40365200a6d3d | [
"MIT"
] | null | null | null | src/dataset_builder.py | elangovana/ppi-aimed | cc835df915d12dd20c35f9cea5e40365200a6d3d | [
"MIT"
] | null | null | null | import logging
import os
from torch.utils.data import DataLoader
from locator import Locator
class DatasetBuilder:
def __init__(self, val_data, dataset_factory_name, tokenisor_factory_name, train_data=None, num_workers=None,
batch_size=8, addition_args_dict=None):
self._addition_args_dict = addition_args_dict
self.train_data = train_data
self.val_data = val_data
self.batch_size = batch_size
self._dataset_factory = Locator().get(dataset_factory_name)
self._tokenisor_factory = Locator().get(tokenisor_factory_name)
self.num_workers = num_workers or os.cpu_count() - 1
if self.num_workers <= 0:
self.num_workers = 0
self._tokenisor = None
self._train_dataloader = None
self._train_dataset = None
self._val_dataset = None
self._val_dataloader = None
self._scorers = None
self._label_mapper = None
@property
def _logger(self):
return logging.getLogger(__name__)
def get_tokenisor(self):
self._logger.info("Retrieving Tokeniser")
if self._tokenisor is None:
self._tokenisor = self._tokenisor_factory.get_tokenisor(**self._addition_args_dict)
return self._tokenisor
def get_train_dataset(self):
if self._train_dataset is None:
self._train_dataset = self._dataset_factory.get_dataset(self.train_data,
preprocessors=self.get_tokenisor(),
**self._addition_args_dict)
return self._train_dataset
def get_val_dataset(self):
if self._val_dataset is None:
self._val_dataset = self._dataset_factory.get_dataset(self.val_data, preprocessors=self.get_tokenisor(),
**self._addition_args_dict)
return self._val_dataset
def get_label_mapper(self):
if self._label_mapper is None:
self._label_mapper = self._dataset_factory.get_label_mapper()
return self._label_mapper
def num_classes(self):
return self.get_label_mapper().num_classes
def positive_label_index(self):
return self._label_mapper.positive_label_index
def get_scorers(self):
if self._scorers is None:
self._scorers = self._dataset_factory.get_scorers()
return self._scorers
def get_train_dataloader(self):
if self._train_dataloader is None:
self._train_dataloader = DataLoader(dataset=self.get_train_dataset(), num_workers=self.num_workers,
batch_size=self.batch_size, shuffle=True)
return self._train_dataloader
def get_val_dataloader(self):
if self._val_dataloader is None:
self._val_dataloader = DataLoader(dataset=self.get_val_dataset(), num_workers=self.num_workers,
batch_size=self.batch_size, shuffle=False)
return self._val_dataloader
| 34.811111 | 116 | 0.635812 | import logging
import os
from torch.utils.data import DataLoader
from locator import Locator
class DatasetBuilder:
def __init__(self, val_data, dataset_factory_name, tokenisor_factory_name, train_data=None, num_workers=None,
batch_size=8, addition_args_dict=None):
self._addition_args_dict = addition_args_dict
self.train_data = train_data
self.val_data = val_data
self.batch_size = batch_size
self._dataset_factory = Locator().get(dataset_factory_name)
self._tokenisor_factory = Locator().get(tokenisor_factory_name)
self.num_workers = num_workers or os.cpu_count() - 1
if self.num_workers <= 0:
self.num_workers = 0
self._tokenisor = None
self._train_dataloader = None
self._train_dataset = None
self._val_dataset = None
self._val_dataloader = None
self._scorers = None
self._label_mapper = None
@property
def _logger(self):
return logging.getLogger(__name__)
def get_tokenisor(self):
self._logger.info("Retrieving Tokeniser")
if self._tokenisor is None:
self._tokenisor = self._tokenisor_factory.get_tokenisor(**self._addition_args_dict)
return self._tokenisor
def get_train_dataset(self):
if self._train_dataset is None:
self._train_dataset = self._dataset_factory.get_dataset(self.train_data,
preprocessors=self.get_tokenisor(),
**self._addition_args_dict)
return self._train_dataset
def get_val_dataset(self):
if self._val_dataset is None:
self._val_dataset = self._dataset_factory.get_dataset(self.val_data, preprocessors=self.get_tokenisor(),
**self._addition_args_dict)
return self._val_dataset
def get_label_mapper(self):
if self._label_mapper is None:
self._label_mapper = self._dataset_factory.get_label_mapper()
return self._label_mapper
def num_classes(self):
return self.get_label_mapper().num_classes
def positive_label_index(self):
return self._label_mapper.positive_label_index
def get_scorers(self):
if self._scorers is None:
self._scorers = self._dataset_factory.get_scorers()
return self._scorers
def get_train_dataloader(self):
if self._train_dataloader is None:
self._train_dataloader = DataLoader(dataset=self.get_train_dataset(), num_workers=self.num_workers,
batch_size=self.batch_size, shuffle=True)
return self._train_dataloader
def get_val_dataloader(self):
if self._val_dataloader is None:
self._val_dataloader = DataLoader(dataset=self.get_val_dataset(), num_workers=self.num_workers,
batch_size=self.batch_size, shuffle=False)
return self._val_dataloader
| true | true |
f72f78af5664f6874058767edba4aca0c9a4cc9f | 2,687 | py | Python | app/recipe/tests/test_ingredient_api.py | samderlust/recipe-app-api | 44d63426fe2875bd57900203d9dccc14550f1f9d | [
"MIT"
] | null | null | null | app/recipe/tests/test_ingredient_api.py | samderlust/recipe-app-api | 44d63426fe2875bd57900203d9dccc14550f1f9d | [
"MIT"
] | null | null | null | app/recipe/tests/test_ingredient_api.py | samderlust/recipe-app-api | 44d63426fe2875bd57900203d9dccc14550f1f9d | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientAPITest(TestCase):
"""Test public avaliable ingredient API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to access the endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientAPITest(TestCase):
"""Test private API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'password'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
"Test get list of ingredient"
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that ingredients for authenticated are returned"""
user2 = get_user_model().objects.create_user(
'user2@test.com',
'password'
)
Ingredient.objects.create(user=user2, name='Kale')
ingredient = Ingredient.objects.create(user=self.user, name='Egg')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
"""Test creating invalid ingredient fails"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 30.534091 | 74 | 0.676219 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientAPITest(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientAPITest(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'password'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
user2 = get_user_model().objects.create_user(
'user2@test.com',
'password'
)
Ingredient.objects.create(user=user2, name='Kale')
ingredient = Ingredient.objects.create(user=self.user, name='Egg')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| true | true |
f72f7970ff216f32d358651dab960a4f8b67cce2 | 5,358 | py | Python | migrations/versions/a725247ae9b2_initial_migration.py | np1e/whoami_backend | d91540885c81194489b4b9d0dc67acfe81a59688 | [
"MIT"
] | null | null | null | migrations/versions/a725247ae9b2_initial_migration.py | np1e/whoami_backend | d91540885c81194489b4b9d0dc67acfe81a59688 | [
"MIT"
] | null | null | null | migrations/versions/a725247ae9b2_initial_migration.py | np1e/whoami_backend | d91540885c81194489b4b9d0dc67acfe81a59688 | [
"MIT"
] | null | null | null | """initial migration
Revision ID: a725247ae9b2
Revises:
Create Date: 2021-04-08 08:45:24.584283
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a725247ae9b2'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('collection',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=30), nullable=False),
sa.Column('default', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('game',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('key', sa.String(length=16), nullable=True),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('state', sa.Enum('WAITING', 'RUNNING', 'FINISHED', name='gamestate'), nullable=True),
sa.Column('max_players', sa.Integer(), nullable=True),
sa.Column('current_player_id', sa.String(length=36), nullable=True),
sa.Column('awaitingGuessVote', sa.Boolean(), nullable=True),
#sa.ForeignKeyConstraint(['current_player_id'], ['player._id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id'),
sa.UniqueConstraint('key')
)
op.create_table('image',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('image_url', sa.String(), nullable=True),
sa.Column('license', sa.String(), nullable=True),
sa.Column('creator', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('player',
sa.Column('_id', sa.String(length=36), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('game_id', sa.String(length=36), nullable=True),
sa.Column('connected', sa.Boolean(), nullable=True),
sa.Column('ready', sa.Boolean(), nullable=True),
sa.Column('sid', sa.String(), nullable=True),
sa.Column('is_creator', sa.Boolean(), nullable=True),
sa.Column('character_id', sa.Integer(), nullable=True),
sa.Column('guesses', sa.Integer(), nullable=True),
sa.Column('guessed', sa.Boolean(), nullable=True),
#sa.ForeignKeyConstraint(['character_id'], ['character.id'], ),
#sa.ForeignKeyConstraint(['game_id'], ['game.id'], ),
sa.PrimaryKeyConstraint('_id'),
sa.UniqueConstraint('_id')
)
op.create_table('tag',
sa.Column('name', sa.String(length=20), nullable=False),
sa.PrimaryKeyConstraint('name'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('username', sa.String(length=18), nullable=False),
sa.Column('password_hash', sa.String(length=94), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('character',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=True),
sa.Column('image_id', sa.String(length=36), nullable=True),
#sa.ForeignKeyConstraint(['collection_id'], ['collection.id'], ),
#sa.ForeignKeyConstraint(['image_id'], ['image.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('tags',
sa.Column('tag_name', sa.String(length=20), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=False),
#sa.ForeignKeyConstraint(['collection_id'], ['collection.id'], ),
#sa.ForeignKeyConstraint(['tag_name'], ['tag.name'], ),
sa.PrimaryKeyConstraint('tag_name', 'collection_id')
)
op.create_table('used_collections',
sa.Column('game_id', sa.String(length=36), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=False),
#sa.ForeignKeyConstraint(['collection_id'], ['collection.id'], ),
#sa.ForeignKeyConstraint(['game_id'], ['game.id'], ),
sa.PrimaryKeyConstraint('game_id', 'collection_id')
)
op.create_table('vote',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('result', sa.Boolean(), nullable=True),
sa.Column('game_id', sa.String(length=36), nullable=True),
sa.Column('player_id', sa.String(length=36), nullable=True),
#sa.ForeignKeyConstraint(['game_id'], ['game.id'], ),
#sa.ForeignKeyConstraint(['player_id'], ['player._id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('votes',
sa.Column('game_id', sa.String(length=36), nullable=False),
sa.Column('vote_id', sa.Integer(), nullable=False),
#sa.ForeignKeyConstraint(['game_id'], ['game.id'], ),
#sa.ForeignKeyConstraint(['vote_id'], ['vote.id'], ),
sa.PrimaryKeyConstraint('game_id', 'vote_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('votes')
op.drop_table('vote')
op.drop_table('used_collections')
op.drop_table('tags')
op.drop_table('character')
op.drop_table('user')
op.drop_table('tag')
op.drop_table('player')
op.drop_table('image')
op.drop_table('game')
op.drop_table('collection')
# ### end Alembic commands ###
| 39.985075 | 99 | 0.66424 | from alembic import op
import sqlalchemy as sa
revision = 'a725247ae9b2'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
('default', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('game',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('key', sa.String(length=16), nullable=True),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('state', sa.Enum('WAITING', 'RUNNING', 'FINISHED', name='gamestate'), nullable=True),
sa.Column('max_players', sa.Integer(), nullable=True),
sa.Column('current_player_id', sa.String(length=36), nullable=True),
sa.Column('awaitingGuessVote', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id'),
sa.UniqueConstraint('key')
)
op.create_table('image',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('image_url', sa.String(), nullable=True),
sa.Column('license', sa.String(), nullable=True),
sa.Column('creator', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('player',
sa.Column('_id', sa.String(length=36), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('game_id', sa.String(length=36), nullable=True),
sa.Column('connected', sa.Boolean(), nullable=True),
sa.Column('ready', sa.Boolean(), nullable=True),
sa.Column('sid', sa.String(), nullable=True),
sa.Column('is_creator', sa.Boolean(), nullable=True),
sa.Column('character_id', sa.Integer(), nullable=True),
sa.Column('guesses', sa.Integer(), nullable=True),
sa.Column('guessed', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('_id'),
sa.UniqueConstraint('_id')
)
op.create_table('tag',
sa.Column('name', sa.String(length=20), nullable=False),
sa.PrimaryKeyConstraint('name'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('username', sa.String(length=18), nullable=False),
sa.Column('password_hash', sa.String(length=94), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('character',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=True),
sa.Column('image_id', sa.String(length=36), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('tags',
sa.Column('tag_name', sa.String(length=20), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('tag_name', 'collection_id')
)
op.create_table('used_collections',
sa.Column('game_id', sa.String(length=36), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('game_id', 'collection_id')
)
op.create_table('vote',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('result', sa.Boolean(), nullable=True),
sa.Column('game_id', sa.String(length=36), nullable=True),
sa.Column('player_id', sa.String(length=36), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('votes',
sa.Column('game_id', sa.String(length=36), nullable=False),
sa.Column('vote_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('game_id', 'vote_id')
)
le('image')
op.drop_table('game')
op.drop_table('collection')
| true | true |
f72f7a106d50691196cea4aa0e76813201841350 | 2,499 | py | Python | ndscheduler/default_settings.py | SquisEat/ndscheduler | 14df862cdafca37f46009419b7627989978c5803 | [
"BSD-2-Clause"
] | null | null | null | ndscheduler/default_settings.py | SquisEat/ndscheduler | 14df862cdafca37f46009419b7627989978c5803 | [
"BSD-2-Clause"
] | null | null | null | ndscheduler/default_settings.py | SquisEat/ndscheduler | 14df862cdafca37f46009419b7627989978c5803 | [
"BSD-2-Clause"
] | null | null | null | """Default settings."""
import logging
import os
#
# Development mode or production mode
# If DEBUG is True, then auto-reload is enabled, i.e., when code is modified, server will be
# reloaded immediately
#
DEBUG = True
#
# Static Assets
#
# The web UI is a single page app. All javascripts/css files should be in STATIC_DIR_PATH
#
STATIC_DIR_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')
TEMPLATE_DIR_PATH = STATIC_DIR_PATH
APP_INDEX_PAGE = 'index.html'
#
# Server setup
#
HTTP_PORT = 7777
HTTP_ADDRESS = '127.0.0.1'
TORNADO_MAX_WORKERS = 8
#
# ApScheduler settings
#
THREAD_POOL_SIZE = 4
JOB_MAX_INSTANCES = 3
JOB_COALESCE = True
TIMEZONE = 'UTC'
# When a job is misfired -- A job were to run at a specific time, but due to some
# reason (e.g., scheduler restart), we miss that run.
#
# By default, if a job is misfired within 1 hour, the scheduler will rerun it.
# Otherwise, if it's misfired over 1 hour, the scheduler will not rerun it.
JOB_MISFIRE_GRACE_SEC = 3600
#
# Database settings
#
JOBS_TABLENAME = 'scheduler_jobs'
EXECUTIONS_TABLENAME = 'scheduler_execution'
AUDIT_LOGS_TABLENAME = 'scheduler_jobauditlog'
DATABASE_TABLENAMES = {
'jobs_tablename': JOBS_TABLENAME,
'executions_tablename': EXECUTIONS_TABLENAME,
'auditlogs_tablename': AUDIT_LOGS_TABLENAME
}
# See different database providers in ndscheduler/core/datastore/providers/
# SQLite
#
DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.sqlite.DatastoreSqlite'
DATABASE_CONFIG_DICT = {
'file_path': 'datastore.db'
}
# Postgres
#
# DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.postgres.DatastorePostgres'
# DATABASE_CONFIG_DICT = {
# 'user': 'username',
# 'password': '',
# 'hostname': 'localhost',
# 'port': 5432,
# 'database': 'scheduler',
# 'sslmode': 'disable'
# }
# MySQL
#
# DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.mysql.DatastoreMySQL'
# DATABASE_CONFIG_DICT = {
# 'user': 'username',
# 'password': '',
# 'hostname': 'localhost',
# 'port': 3306,
# 'database': 'scheduler'
# }
# ndschedule is based on apscheduler. Here we can customize the apscheduler's main scheduler class
# Please see ndscheduler/core/scheduler/base.py
SCHEDULER_CLASS = 'ndscheduler.corescheduler.core.base.BaseScheduler'
#
# Set logging level
#
logging.getLogger(__name__).setLevel(logging.INFO)
# Packages that contains job classes, e.g., simple_scheduler.jobs
JOB_CLASS_PACKAGES = []
| 24.262136 | 98 | 0.735494 |
import logging
import os
DEBUG = True
STATIC_DIR_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')
TEMPLATE_DIR_PATH = STATIC_DIR_PATH
APP_INDEX_PAGE = 'index.html'
HTTP_PORT = 7777
HTTP_ADDRESS = '127.0.0.1'
TORNADO_MAX_WORKERS = 8
THREAD_POOL_SIZE = 4
JOB_MAX_INSTANCES = 3
JOB_COALESCE = True
TIMEZONE = 'UTC'
JOB_MISFIRE_GRACE_SEC = 3600
#
# Database settings
#
JOBS_TABLENAME = 'scheduler_jobs'
EXECUTIONS_TABLENAME = 'scheduler_execution'
AUDIT_LOGS_TABLENAME = 'scheduler_jobauditlog'
DATABASE_TABLENAMES = {
'jobs_tablename': JOBS_TABLENAME,
'executions_tablename': EXECUTIONS_TABLENAME,
'auditlogs_tablename': AUDIT_LOGS_TABLENAME
}
# See different database providers in ndscheduler/core/datastore/providers/
# SQLite
#
DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.sqlite.DatastoreSqlite'
DATABASE_CONFIG_DICT = {
'file_path': 'datastore.db'
}
# Postgres
#
# DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.postgres.DatastorePostgres'
# DATABASE_CONFIG_DICT = {
# 'user': 'username',
# 'password': '',
# 'hostname': 'localhost',
# 'port': 5432,
# 'database': 'scheduler',
# 'sslmode': 'disable'
# }
# MySQL
#
# DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.mysql.DatastoreMySQL'
# DATABASE_CONFIG_DICT = {
# 'user': 'username',
# 'password': '',
# 'hostname': 'localhost',
# 'port': 3306,
# 'database': 'scheduler'
# }
# ndschedule is based on apscheduler. Here we can customize the apscheduler's main scheduler class
SCHEDULER_CLASS = 'ndscheduler.corescheduler.core.base.BaseScheduler'
logging.getLogger(__name__).setLevel(logging.INFO)
JOB_CLASS_PACKAGES = []
| true | true |
f72f7a7cce9f58fe52ac8e93e43957260898eeda | 1,281 | py | Python | tests/test_utils/test_import_namespaces.py | rajshruti18/biolinkml | 451e71c9d3fd11aa3b08c6a713d9ab8b127ece77 | [
"CC0-1.0"
] | 25 | 2019-07-05T01:16:18.000Z | 2021-03-22T20:49:25.000Z | tests/test_utils/test_import_namespaces.py | rajshruti18/biolinkml | 451e71c9d3fd11aa3b08c6a713d9ab8b127ece77 | [
"CC0-1.0"
] | 299 | 2019-03-05T15:15:30.000Z | 2021-04-08T23:25:41.000Z | tests/test_utils/test_import_namespaces.py | rajshruti18/biolinkml | 451e71c9d3fd11aa3b08c6a713d9ab8b127ece77 | [
"CC0-1.0"
] | 19 | 2019-05-23T17:46:47.000Z | 2021-03-25T06:45:55.000Z | import unittest
from biolinkml.generators.shexgen import ShExGenerator
from tests.test_utils.environment import env
class URLImportTestCase(unittest.TestCase):
@unittest.skipIf(False, "Finish implementing this")
def test_import_from_url(self):
""" Validate namespace bindings """
shex = ShExGenerator(env.input_path('import_test_l2.yaml')).serialize()
self.assertEqual("""BASE <http://example.org/l2/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX l1: <http://example.org/l1/>
PREFIX base: <http://example.org/b/>
l1:Int xsd:integer
base:String xsd:string
base:BaseClass CLOSED {
( $base:BaseClass_tes base:base_slot @base:String ? ;
rdf:type [ base:BaseClass ] ?
)
}
l1:L1Class (
CLOSED {
( $l1:L1Class_tes ( l1:l1_slot1 @base:String ? ;
l1:l1_slot2 @l1:Int ?
) ;
rdf:type [ l1:L1Class ] ?
)
} OR @<L2Class>
)
<L2Class> CLOSED {
( $<L2Class_tes> ( &l1:L1Class_tes ;
rdf:type [ l1:L1Class ] ? ;
<l2_slot1> @base:String ? ;
<l2_slot2> @l1:Int ?
) ;
rdf:type [ <L2Class> ] ?
)
}""", shex.strip())
if __name__ == '__main__':
unittest.main()
| 24.169811 | 79 | 0.613583 | import unittest
from biolinkml.generators.shexgen import ShExGenerator
from tests.test_utils.environment import env
class URLImportTestCase(unittest.TestCase):
@unittest.skipIf(False, "Finish implementing this")
def test_import_from_url(self):
shex = ShExGenerator(env.input_path('import_test_l2.yaml')).serialize()
self.assertEqual("""BASE <http://example.org/l2/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX l1: <http://example.org/l1/>
PREFIX base: <http://example.org/b/>
l1:Int xsd:integer
base:String xsd:string
base:BaseClass CLOSED {
( $base:BaseClass_tes base:base_slot @base:String ? ;
rdf:type [ base:BaseClass ] ?
)
}
l1:L1Class (
CLOSED {
( $l1:L1Class_tes ( l1:l1_slot1 @base:String ? ;
l1:l1_slot2 @l1:Int ?
) ;
rdf:type [ l1:L1Class ] ?
)
} OR @<L2Class>
)
<L2Class> CLOSED {
( $<L2Class_tes> ( &l1:L1Class_tes ;
rdf:type [ l1:L1Class ] ? ;
<l2_slot1> @base:String ? ;
<l2_slot2> @l1:Int ?
) ;
rdf:type [ <L2Class> ] ?
)
}""", shex.strip())
if __name__ == '__main__':
unittest.main()
| true | true |
f72f7ae3ee8c69c4af98bcfa7cfeb00f3ed16fef | 82 | py | Python | demo/demoproject/demoapp/views.py | mrc75/django-easy-reports | 5a92e8e1fd199ee3fd0fdfd5b47d84fe72861a0a | [
"BSD-1-Clause"
] | 2 | 2015-05-28T10:35:54.000Z | 2016-11-18T04:33:26.000Z | demo/demoproject/demoapp/views.py | mrc75/django-easy-reports | 5a92e8e1fd199ee3fd0fdfd5b47d84fe72861a0a | [
"BSD-1-Clause"
] | 1 | 2015-10-25T01:50:04.000Z | 2015-10-25T01:50:04.000Z | demo/demoproject/demoapp/views.py | saxix/django-easy-reports | 81679b0c49d728c198601f7ee3a726a66cae49b5 | [
"BSD-1-Clause"
] | null | null | null | from ereports.views import ReportIndex
class ReportsView(ReportIndex):
pass
| 13.666667 | 38 | 0.792683 | from ereports.views import ReportIndex
class ReportsView(ReportIndex):
pass
| true | true |
f72f7b324f2181b534f98b9140d1f67ec53e65b6 | 597 | py | Python | mms/utils/__init__.py | abhinavs95/mxnet-model-server | 901c1a9a2def8373cd9a91c8d2f47248eed281cc | [
"Apache-2.0"
] | 1 | 2019-01-10T20:56:25.000Z | 2019-01-10T20:56:25.000Z | mms/utils/__init__.py | frankfliu/mxnet-model-server | ce36c9e35efc17efe0fb79bb7019bdf3593131a5 | [
"Apache-2.0"
] | null | null | null | mms/utils/__init__.py | frankfliu/mxnet-model-server | ce36c9e35efc17efe0fb79bb7019bdf3593131a5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Util files for MMS
"""
| 42.642857 | 75 | 0.752094 | true | true | |
f72f7b9ba7292667009bfeeb0edba74ee8da34be | 74,707 | py | Python | lib/galaxy/webapps/galaxy/api/workflows.py | itisAliRH/galaxy | b3b693ea0788f773442c8481472a87f43ccb10d7 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/webapps/galaxy/api/workflows.py | itisAliRH/galaxy | b3b693ea0788f773442c8481472a87f43ccb10d7 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/webapps/galaxy/api/workflows.py | itisAliRH/galaxy | b3b693ea0788f773442c8481472a87f43ccb10d7 | [
"CC-BY-3.0"
] | null | null | null | """
API operations for Workflows
"""
import hashlib
import json
import logging
import os
from typing import (
Any,
Dict,
List,
Optional,
)
from fastapi import (
Body,
Path,
Query,
Response,
status,
)
from gxformat2._yaml import ordered_dump
from markupsafe import escape
from pydantic import Extra
from galaxy import (
exceptions,
model,
util,
)
from galaxy.files.uris import (
stream_url_to_str,
validate_uri_access,
)
from galaxy.managers.context import ProvidesUserContext
from galaxy.managers.jobs import (
fetch_job_states,
invocation_job_source_iter,
summarize_job_metrics,
)
from galaxy.managers.workflows import (
MissingToolsException,
RefactorRequest,
WorkflowCreateOptions,
WorkflowUpdateOptions,
)
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.schema.schema import (
AsyncFile,
AsyncTaskResultSummary,
SetSlugPayload,
ShareWithPayload,
ShareWithStatus,
SharingStatus,
StoreContentSource,
WorkflowSortByEnum,
WriteStoreToPayload,
)
from galaxy.structured_app import StructuredApp
from galaxy.tool_shed.galaxy_install.install_manager import InstallRepositoryManager
from galaxy.tools import recommendations
from galaxy.tools.parameters import populate_state
from galaxy.tools.parameters.basic import workflow_building_modes
from galaxy.util.sanitize_html import sanitize_html
from galaxy.version import VERSION
from galaxy.web import (
expose_api,
expose_api_anonymous,
expose_api_anonymous_and_sessionless,
expose_api_raw,
expose_api_raw_anonymous_and_sessionless,
format_return_as_json,
)
from galaxy.webapps.base.controller import (
SharableMixin,
url_for,
UsesStoredWorkflowMixin,
)
from galaxy.webapps.base.webapp import GalaxyWebTransaction
from galaxy.webapps.galaxy.services.base import (
ConsumesModelStores,
ServesExportStores,
)
from galaxy.webapps.galaxy.services.invocations import (
InvocationIndexPayload,
InvocationSerializationParams,
InvocationsService,
PrepareStoreDownloadPayload,
)
from galaxy.webapps.galaxy.services.workflows import (
WorkflowIndexPayload,
WorkflowsService,
)
from galaxy.workflow.extract import extract_workflow
from galaxy.workflow.modules import module_factory
from galaxy.workflow.run import queue_invoke
from galaxy.workflow.run_request import build_workflow_run_configs
from . import (
BaseGalaxyAPIController,
depends,
DependsOnTrans,
IndexQueryTag,
Router,
search_query_param,
)
log = logging.getLogger(__name__)
router = Router(tags=["workflows"])
class CreateInvocationFromStore(StoreContentSource):
history_id: Optional[str]
class Config:
extra = Extra.allow
class WorkflowsAPIController(
BaseGalaxyAPIController,
UsesStoredWorkflowMixin,
UsesAnnotations,
SharableMixin,
ServesExportStores,
ConsumesModelStores,
):
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
def __init__(self, app: StructuredApp):
super().__init__(app)
self.history_manager = app.history_manager
self.workflow_manager = app.workflow_manager
self.workflow_contents_manager = app.workflow_contents_manager
self.tool_recommendations = recommendations.ToolRecommendations()
@expose_api
def get_workflow_menu(self, trans: ProvidesUserContext, **kwd):
"""
Get workflows present in the tools panel
GET /api/workflows/menu
"""
user = trans.user
ids_in_menu = [x.stored_workflow_id for x in user.stored_workflow_menu_entries]
workflows = self.get_workflows_list(trans, **kwd)
return {"ids_in_menu": ids_in_menu, "workflows": workflows}
@expose_api
def set_workflow_menu(self, trans: GalaxyWebTransaction, payload=None, **kwd):
"""
Save workflow menu to be shown in the tool panel
PUT /api/workflows/menu
"""
payload = payload or {}
user = trans.user
workflow_ids = payload.get("workflow_ids")
if workflow_ids is None:
workflow_ids = []
elif type(workflow_ids) != list:
workflow_ids = [workflow_ids]
workflow_ids_decoded = []
# Decode the encoded workflow ids
for ids in workflow_ids:
workflow_ids_decoded.append(trans.security.decode_id(ids))
sess = trans.sa_session
# This explicit remove seems like a hack, need to figure out
# how to make the association do it automatically.
for m in user.stored_workflow_menu_entries:
sess.delete(m)
user.stored_workflow_menu_entries = []
q = sess.query(model.StoredWorkflow)
# To ensure id list is unique
seen_workflow_ids = set()
for wf_id in workflow_ids_decoded:
if wf_id in seen_workflow_ids:
continue
else:
seen_workflow_ids.add(wf_id)
m = model.StoredWorkflowMenuEntry()
m.stored_workflow = q.get(wf_id)
user.stored_workflow_menu_entries.append(m)
sess.flush()
message = "Menu updated."
trans.set_message(message)
return {"message": message, "status": "done"}
def get_workflows_list(
self,
trans: ProvidesUserContext,
missing_tools=False,
show_published=None,
show_shared=None,
show_hidden=False,
show_deleted=False,
**kwd,
):
"""
Displays a collection of workflows.
:param show_published: Optional boolean to include published workflows
If unspecified this behavior depends on whether the request
is coming from an authenticated session. The default is true
for annonymous API requests and false otherwise.
:type show_published: boolean
:param show_hidden: if True, show hidden workflows
:type show_hidden: boolean
:param show_deleted: if True, show deleted workflows
:type show_deleted: boolean
:param show_shared: Optional boolean to include shared workflows.
If unspecified this behavior depends on show_deleted/show_hidden.
Defaulting to false if show_hidden or show_deleted is true or else
false.
:param missing_tools: if True, include a list of missing tools per workflow
:type missing_tools: boolean
"""
show_published = util.string_as_bool_or_none(show_published)
show_hidden = util.string_as_bool(show_hidden)
show_deleted = util.string_as_bool(show_deleted)
missing_tools = util.string_as_bool(missing_tools)
show_shared = util.string_as_bool_or_none(show_shared)
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
)
workflows, _ = self.service.index(trans, payload)
return workflows
@expose_api_anonymous_and_sessionless
def show(self, trans: GalaxyWebTransaction, id, **kwd):
"""
GET /api/workflows/{encoded_workflow_id}
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
Displays information needed to run a workflow.
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
if stored_workflow.importable is False and stored_workflow.user != trans.user and not trans.user_is_admin:
if (
trans.sa_session.query(model.StoredWorkflowUserShareAssociation)
.filter_by(user=trans.user, stored_workflow=stored_workflow)
.count()
== 0
):
message = "Workflow is neither importable, nor owned by or shared with current user"
raise exceptions.ItemAccessibilityException(message)
if kwd.get("legacy", False):
style = "legacy"
else:
style = "instance"
version = kwd.get("version")
if version is None and util.string_as_bool(kwd.get("instance", "false")):
# A Workflow instance may not be the latest workflow version attached to StoredWorkflow.
# This figures out the correct version so that we return the correct Workflow and version.
workflow_id = self.decode_id(id)
for i, workflow in enumerate(reversed(stored_workflow.workflows)):
if workflow.id == workflow_id:
version = i
break
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style=style, version=version)
@expose_api
def show_versions(self, trans: GalaxyWebTransaction, workflow_id, **kwds):
"""
GET /api/workflows/{encoded_workflow_id}/versions
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
Lists all versions of this workflow.
"""
instance = util.string_as_bool(kwds.get("instance", "false"))
stored_workflow = self.workflow_manager.get_stored_accessible_workflow(
trans, workflow_id, by_stored_id=not instance
)
return [
{"version": i, "update_time": str(w.update_time), "steps": len(w.steps)}
for i, w in enumerate(reversed(stored_workflow.workflows))
]
@expose_api
def create(self, trans: GalaxyWebTransaction, payload=None, **kwd):
"""
POST /api/workflows
Create workflows in various ways.
:param from_history_id: Id of history to extract a workflow from.
:type from_history_id: str
:param job_ids: If from_history_id is set - optional list of jobs to include when extracting a workflow from history
:type job_ids: str
:param dataset_ids: If from_history_id is set - optional list of HDA "hid"s corresponding to workflow inputs when extracting a workflow from history
:type dataset_ids: str
:param dataset_collection_ids: If from_history_id is set - optional list of HDCA "hid"s corresponding to workflow inputs when extracting a workflow from history
:type dataset_collection_ids: str
:param workflow_name: If from_history_id is set - name of the workflow to create when extracting a workflow from history
:type workflow_name: str
"""
ways_to_create = {
"archive_source",
"from_history_id",
"from_path",
"shared_workflow_id",
"workflow",
}
if trans.user_is_bootstrap_admin:
raise exceptions.RealUserRequiredException("Only real users can create or run workflows.")
if payload is None or len(ways_to_create.intersection(payload)) == 0:
message = f"One parameter among - {', '.join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterMissingException(message)
if len(ways_to_create.intersection(payload)) > 1:
message = f"Only one parameter among - {', '.join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterInvalidException(message)
if "archive_source" in payload:
archive_source = payload["archive_source"]
archive_file = payload.get("archive_file")
archive_data = None
if archive_source:
validate_uri_access(archive_source, trans.user_is_admin, trans.app.config.fetch_url_allowlist_ips)
if archive_source.startswith("file://"):
workflow_src = {"src": "from_path", "path": archive_source[len("file://") :]}
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
elif archive_source == "trs_tool":
trs_server = payload.get("trs_server")
trs_tool_id = payload.get("trs_tool_id")
trs_version_id = payload.get("trs_version_id")
import_source = None
archive_data = self.app.trs_proxy.get_version_descriptor(trs_server, trs_tool_id, trs_version_id)
else:
try:
archive_data = stream_url_to_str(
archive_source, trans.app.file_sources, prefix="gx_workflow_download"
)
import_source = "URL"
except Exception:
raise exceptions.MessageException(f"Failed to open URL '{escape(archive_source)}'.")
elif hasattr(archive_file, "file"):
uploaded_file = archive_file.file
uploaded_file_name = uploaded_file.name
if os.path.getsize(os.path.abspath(uploaded_file_name)) > 0:
archive_data = util.unicodify(uploaded_file.read())
import_source = "uploaded file"
else:
raise exceptions.MessageException("You attempted to upload an empty file.")
else:
raise exceptions.MessageException("Please provide a URL or file.")
return self.__api_import_from_archive(trans, archive_data, import_source, payload=payload)
if "from_history_id" in payload:
from_history_id = payload.get("from_history_id")
from_history_id = self.decode_id(from_history_id)
history = self.history_manager.get_accessible(from_history_id, trans.user, current_history=trans.history)
job_ids = [self.decode_id(_) for _ in payload.get("job_ids", [])]
dataset_ids = payload.get("dataset_ids", [])
dataset_collection_ids = payload.get("dataset_collection_ids", [])
workflow_name = payload["workflow_name"]
stored_workflow = extract_workflow(
trans=trans,
user=trans.user,
history=history,
job_ids=job_ids,
dataset_ids=dataset_ids,
dataset_collection_ids=dataset_collection_ids,
workflow_name=workflow_name,
)
item = stored_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["url"] = url_for("workflow", id=item["id"])
return item
if "from_path" in payload:
from_path = payload.get("from_path")
object_id = payload.get("object_id")
workflow_src = {"src": "from_path", "path": from_path}
if object_id is not None:
workflow_src["object_id"] = object_id
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
if "shared_workflow_id" in payload:
workflow_id = payload["shared_workflow_id"]
return self.__api_import_shared_workflow(trans, workflow_id, payload)
if "workflow" in payload:
return self.__api_import_new_workflow(trans, payload, **kwd)
# This was already raised above, but just in case...
raise exceptions.RequestParameterMissingException("No method for workflow creation supplied.")
@expose_api_raw_anonymous_and_sessionless
def workflow_dict(self, trans: GalaxyWebTransaction, workflow_id, **kwd):
"""
GET /api/workflows/{encoded_workflow_id}/download
Returns a selected workflow.
:type style: str
:param style: Style of export. The default is 'export', which is the meant to be used
with workflow import endpoints. Other formats such as 'instance', 'editor',
'run' are more tied to the GUI and should not be considered stable APIs.
The default format for 'export' is specified by the
admin with the `default_workflow_export_format` config
option. Style can be specified as either 'ga' or 'format2' directly
to be explicit about which format to download.
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
"""
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, **kwd)
style = kwd.get("style", "export")
download_format = kwd.get("format")
version = kwd.get("version")
history_id = kwd.get("history_id")
history = None
if history_id:
history = self.history_manager.get_accessible(
self.decode_id(history_id), trans.user, current_history=trans.history
)
ret_dict = self.workflow_contents_manager.workflow_to_dict(
trans, stored_workflow, style=style, version=version, history=history
)
if download_format == "json-download":
sname = stored_workflow.name
sname = "".join(c in util.FILENAME_VALID_CHARS and c or "_" for c in sname)[0:150]
if ret_dict.get("format-version", None) == "0.1":
extension = "ga"
else:
extension = "gxwf.json"
trans.response.headers[
"Content-Disposition"
] = f'attachment; filename="Galaxy-Workflow-{sname}.{extension}"'
trans.response.set_content_type("application/galaxy-archive")
if style == "format2" and download_format != "json-download":
return ordered_dump(ret_dict)
else:
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def delete(self, trans: ProvidesUserContext, id, **kwd):
"""
DELETE /api/workflows/{encoded_workflow_id}
Deletes a specified workflow
Author: rpark
copied from galaxy.web.controllers.workflows.py (delete)
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
# check to see if user has permissions to selected workflow
if stored_workflow.user != trans.user and not trans.user_is_admin:
raise exceptions.InsufficientPermissionsException()
# Mark a workflow as deleted
stored_workflow.deleted = True
trans.sa_session.flush()
# TODO: Unsure of response message to let api know that a workflow was successfully deleted
return f"Workflow '{stored_workflow.name}' successfully deleted"
@expose_api
def import_new_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
"""
POST /api/workflows/upload
Importing dynamic workflows from the api. Return newly generated workflow id.
Author: rpark
# currently assumes payload['workflow'] is a json representation of a workflow to be inserted into the database
Deprecated in favor to POST /api/workflows with encoded 'workflow' in
payload the same way.
"""
return self.__api_import_new_workflow(trans, payload, **kwd)
@expose_api
def update(self, trans: GalaxyWebTransaction, id, payload, **kwds):
"""
PUT /api/workflows/{id}
Update the workflow stored with ``id``.
:type id: str
:param id: the encoded id of the workflow to update
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false by default.
:type instance: boolean
:type payload: dict
:param payload: a dictionary containing any or all the
:workflow:
the json description of the workflow as would be
produced by GET workflows/<id>/download or
given to `POST workflows`
The workflow contents will be updated to target this.
:name:
optional string name for the workflow, if not present in payload,
name defaults to existing name
:annotation:
optional string annotation for the workflow, if not present in payload,
annotation defaults to existing annotation
:menu_entry:
optional boolean marking if the workflow should appear in the user\'s menu,
if not present, workflow menu entries are not modified
:tags:
optional list containing list of tags to add to the workflow (overwriting
existing tags), if not present, tags are not modified
:from_tool_form:
True iff encoded state coming in is encoded for the tool form.
:rtype: dict
:returns: serialized version of the workflow
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
workflow_dict = payload.get("workflow", {})
workflow_dict.update({k: v for k, v in payload.items() if k not in workflow_dict})
if workflow_dict:
require_flush = False
raw_workflow_description = self.__normalize_workflow(trans, workflow_dict)
workflow_dict = raw_workflow_description.as_dict
new_workflow_name = workflow_dict.get("name")
old_workflow = stored_workflow.latest_workflow
name_updated = new_workflow_name and new_workflow_name != stored_workflow.name
steps_updated = "steps" in workflow_dict
if name_updated and not steps_updated:
sanitized_name = sanitize_html(new_workflow_name or old_workflow.name)
workflow = old_workflow.copy(user=trans.user)
workflow.stored_workflow = stored_workflow
workflow.name = sanitized_name
stored_workflow.name = sanitized_name
stored_workflow.latest_workflow = workflow
trans.sa_session.add(workflow, stored_workflow)
require_flush = True
if "hidden" in workflow_dict and stored_workflow.hidden != workflow_dict["hidden"]:
stored_workflow.hidden = workflow_dict["hidden"]
require_flush = True
if "published" in workflow_dict and stored_workflow.published != workflow_dict["published"]:
stored_workflow.published = workflow_dict["published"]
require_flush = True
if "importable" in workflow_dict and stored_workflow.importable != workflow_dict["importable"]:
stored_workflow.importable = workflow_dict["importable"]
require_flush = True
if "annotation" in workflow_dict and not steps_updated:
newAnnotation = sanitize_html(workflow_dict["annotation"])
self.add_item_annotation(trans.sa_session, trans.user, stored_workflow, newAnnotation)
require_flush = True
if "menu_entry" in workflow_dict or "show_in_tool_panel" in workflow_dict:
show_in_panel = workflow_dict.get("menu_entry") or workflow_dict.get("show_in_tool_panel")
stored_workflow_menu_entries = trans.user.stored_workflow_menu_entries
decoded_id = trans.security.decode_id(id)
if show_in_panel:
workflow_ids = [wf.stored_workflow_id for wf in stored_workflow_menu_entries]
if decoded_id not in workflow_ids:
menu_entry = model.StoredWorkflowMenuEntry()
menu_entry.stored_workflow = stored_workflow
stored_workflow_menu_entries.append(menu_entry)
trans.sa_session.add(menu_entry)
require_flush = True
else:
# remove if in list
entries = {x.stored_workflow_id: x for x in stored_workflow_menu_entries}
if decoded_id in entries:
stored_workflow_menu_entries.remove(entries[decoded_id])
require_flush = True
# set tags
if "tags" in workflow_dict:
trans.app.tag_handler.set_tags_from_list(
user=trans.user, item=stored_workflow, new_tags_list=workflow_dict["tags"]
)
if require_flush:
trans.sa_session.flush()
if "steps" in workflow_dict:
try:
workflow_update_options = WorkflowUpdateOptions(**payload)
workflow, errors = self.workflow_contents_manager.update_workflow_from_raw_description(
trans,
stored_workflow,
raw_workflow_description,
workflow_update_options,
)
except MissingToolsException:
raise exceptions.MessageException(
"This workflow contains missing tools. It cannot be saved until they have been removed from the workflow or installed."
)
else:
message = "Updating workflow requires dictionary containing 'workflow' attribute with new JSON description."
raise exceptions.RequestParameterInvalidException(message)
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style="instance")
@expose_api
def refactor(self, trans, id, payload, **kwds):
"""
* PUT /api/workflows/{id}/refactor
updates the workflow stored with ``id``
:type id: str
:param id: the encoded id of the workflow to update
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
:type payload: dict
:param payload: a dictionary containing list of actions to apply.
:rtype: dict
:returns: serialized version of the workflow
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
refactor_request = RefactorRequest(**payload)
return self.workflow_contents_manager.refactor(trans, stored_workflow, refactor_request)
@expose_api
def build_module(self, trans: GalaxyWebTransaction, payload=None):
"""
POST /api/workflows/build_module
Builds module models for the workflow editor.
"""
if payload is None:
payload = {}
inputs = payload.get("inputs", {})
trans.workflow_building_mode = workflow_building_modes.ENABLED
module = module_factory.from_dict(trans, payload, from_tool_form=True)
if "tool_state" not in payload:
module_state: Dict[str, Any] = {}
populate_state(trans, module.get_inputs(), inputs, module_state, check=False)
module.recover_state(module_state, from_tool_form=True)
return {
"label": inputs.get("__label", ""),
"annotation": inputs.get("__annotation", ""),
"name": module.get_name(),
"tool_state": module.get_state(),
"content_id": module.get_content_id(),
"inputs": module.get_all_inputs(connectable_only=True),
"outputs": module.get_all_outputs(),
"config_form": module.get_config_form(),
"post_job_actions": module.get_post_job_actions(inputs),
}
@expose_api
def get_tool_predictions(self, trans: ProvidesUserContext, payload, **kwd):
"""
POST /api/workflows/get_tool_predictions
Fetch predicted tools for a workflow
:type payload: dict
:param payload:
a dictionary containing two parameters
'tool_sequence' - comma separated sequence of tool ids
'remote_model_url' - (optional) path to the deep learning model
"""
remote_model_url = payload.get("remote_model_url", trans.app.config.tool_recommendation_model_path)
tool_sequence = payload.get("tool_sequence", "")
if "tool_sequence" not in payload or remote_model_url is None:
return
tool_sequence, recommended_tools = self.tool_recommendations.get_predictions(
trans, tool_sequence, remote_model_url
)
return {"current_tool": tool_sequence, "predicted_data": recommended_tools}
#
# -- Helper methods --
#
def __api_import_from_archive(self, trans: GalaxyWebTransaction, archive_data, source=None, payload=None):
payload = payload or {}
try:
data = json.loads(archive_data)
except Exception:
if "GalaxyWorkflow" in archive_data:
data = {"yaml_content": archive_data}
else:
raise exceptions.MessageException("The data content does not appear to be a valid workflow.")
if not data:
raise exceptions.MessageException("The data content is missing.")
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans, raw_workflow_description, workflow_create_options, source=source
)
workflow_id = workflow.id
workflow = workflow.latest_workflow
response = {
"message": f"Workflow '{escape(workflow.name)}' imported successfully.",
"status": "success",
"id": trans.security.encode_id(workflow_id),
}
if workflow.has_errors:
response["message"] = "Imported, but some steps in this workflow have validation errors."
response["status"] = "error"
elif len(workflow.steps) == 0:
response["message"] = "Imported, but this workflow has no steps."
response["status"] = "error"
elif workflow.has_cycles:
response["message"] = "Imported, but this workflow contains cycles."
response["status"] = "error"
return response
def __api_import_new_workflow(self, trans: GalaxyWebTransaction, payload, **kwd):
data = payload["workflow"]
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans,
raw_workflow_description,
workflow_create_options,
)
# galaxy workflow newly created id
workflow_id = workflow.id
# api encoded, id
encoded_id = trans.security.encode_id(workflow_id)
item = workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["annotations"] = [x.annotation for x in workflow.annotations]
item["url"] = url_for("workflow", id=encoded_id)
item["owner"] = workflow.user.username
item["number_of_steps"] = len(workflow.latest_workflow.steps)
return item
def __normalize_workflow(self, trans: GalaxyWebTransaction, as_dict):
return self.workflow_contents_manager.normalize_workflow_format(trans, as_dict)
@expose_api
def import_shared_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
"""
POST /api/workflows/import
Import a workflow shared by other users.
:param workflow_id: the workflow id (required)
:type workflow_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
# Pull parameters out of payload.
workflow_id = payload.get("workflow_id", None)
if workflow_id is None:
raise exceptions.ObjectAttributeMissingException("Missing required parameter 'workflow_id'.")
self.__api_import_shared_workflow(trans, workflow_id, payload)
def __api_import_shared_workflow(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
try:
stored_workflow = self.get_stored_workflow(trans, workflow_id, check_ownership=False)
except Exception:
raise exceptions.ObjectNotFound(f"Malformed workflow id ( {workflow_id} ) specified.")
if stored_workflow.importable is False:
raise exceptions.ItemAccessibilityException(
"The owner of this workflow has disabled imports via this link."
)
elif stored_workflow.deleted:
raise exceptions.ItemDeletionException("You can't import this workflow because it has been deleted.")
imported_workflow = self._import_shared_workflow(trans, stored_workflow)
item = imported_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
encoded_id = trans.security.encode_id(imported_workflow.id)
item["url"] = url_for("workflow", id=encoded_id)
return item
@expose_api
def invoke(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
"""
POST /api/workflows/{encoded_workflow_id}/invocations
Schedule the workflow specified by `workflow_id` to run.
.. note:: This method takes the same arguments as
:func:`galaxy.webapps.galaxy.api.workflows.WorkflowsAPIController.create` above.
:raises: exceptions.MessageException, exceptions.RequestParameterInvalidException
"""
# Get workflow + accessibility check.
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, instance=kwd.get("instance", False))
workflow = stored_workflow.latest_workflow
run_configs = build_workflow_run_configs(trans, workflow, payload)
is_batch = payload.get("batch")
if not is_batch and len(run_configs) != 1:
raise exceptions.RequestParameterInvalidException("Must specify 'batch' to use batch parameters.")
require_exact_tool_versions = util.string_as_bool(payload.get("require_exact_tool_versions", "true"))
tools = self.workflow_contents_manager.get_all_tools(workflow)
missing_tools = [
tool
for tool in tools
if not self.app.toolbox.has_tool(
tool["tool_id"], tool_version=tool["tool_version"], exact=require_exact_tool_versions
)
]
if missing_tools:
missing_tools_message = "Workflow was not invoked; the following required tools are not installed: "
if require_exact_tool_versions:
missing_tools_message += ", ".join(
[f"{tool['tool_id']} (version {tool['tool_version']})" for tool in missing_tools]
)
else:
missing_tools_message += ", ".join([tool["tool_id"] for tool in missing_tools])
raise exceptions.MessageException(missing_tools_message)
invocations = []
for run_config in run_configs:
workflow_scheduler_id = payload.get("scheduler", None)
# TODO: workflow scheduler hints
work_request_params = dict(scheduler=workflow_scheduler_id)
workflow_invocation = queue_invoke(
trans=trans,
workflow=workflow,
workflow_run_config=run_config,
request_params=work_request_params,
flush=False,
)
invocations.append(workflow_invocation)
trans.sa_session.flush()
invocations = [self.encode_all_ids(trans, invocation.to_dict(), recursive=True) for invocation in invocations]
if is_batch:
return invocations
else:
return invocations[0]
@expose_api
def index_invocations(self, trans: GalaxyWebTransaction, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations
GET /api/invocations
Get the list of a user's workflow invocations. If workflow_id is supplied
(either via URL or query parameter) it should be an encoded StoredWorkflow id
and returned invocations will be restricted to that workflow. history_id (an encoded
History id) can be used to further restrict the query. If neither a workflow_id or
history_id is supplied, all the current user's workflow invocations will be indexed
(as determined by the invocation being executed on one of the user's histories).
:param workflow_id: an encoded stored workflow id to restrict query to
:type workflow_id: str
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
:param history_id: an encoded history id to restrict query to
:type history_id: str
:param job_id: an encoded job id to restrict query to
:type job_id: str
:param user_id: an encoded user id to restrict query to, must be own id if not admin user
:type user_id: str
:param view: level of detail to return per invocation 'element' or 'collection'.
:type view: str
:param step_details: If 'view' is 'element', also include details on individual steps.
:type step_details: bool
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
invocation_payload = InvocationIndexPayload(**kwd)
serialization_params = InvocationSerializationParams(**kwd)
invocations, total_matches = self.invocations_service.index(trans, invocation_payload, serialization_params)
trans.response.headers["total_matches"] = total_matches
return invocations
@expose_api_anonymous
def create_invocations_from_store(self, trans, payload, **kwd):
"""
POST /api/invocations/from_store
Create invocation(s) from a supplied model store.
Input can be an archive describing a Galaxy model store containing an
workflow invocation - for instance one created with with write_store
or prepare_store_download endpoint.
"""
create_payload = CreateInvocationFromStore(**payload)
serialization_params = InvocationSerializationParams(**payload)
# refactor into a service...
return self._create_from_store(trans, create_payload, serialization_params)
def _create_from_store(
self, trans, payload: CreateInvocationFromStore, serialization_params: InvocationSerializationParams
):
history = self.history_manager.get_owned(
self.decode_id(payload.history_id), trans.user, current_history=trans.history
)
object_tracker = self.create_objects_from_store(
trans,
payload,
history=history,
)
return self.invocations_service.serialize_workflow_invocations(
object_tracker.invocations_by_key.values(), serialization_params
)
@expose_api
def show_invocation(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}
GET /api/invocations/{invocation_id}
Get detailed description of workflow invocation
:param invocation_id: the invocation id (required)
:type invocation_id: str
:param step_details: fetch details about individual invocation steps
and populate a steps attribute in the resulting
dictionary. Defaults to false.
:type step_details: bool
:param legacy_job_state: If step_details is true, and this is set to true
populate the invocation step state with the job state
instead of the invocation step state. This will also
produce one step per job in mapping jobs to mimic the
older behavior with respect to collections. Partially
scheduled steps may provide incomplete information
and the listed steps outputs are the mapped over
step outputs but the individual job outputs
when this is set - at least for now.
:type legacy_job_state: bool
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id, eager=True)
if not workflow_invocation:
raise exceptions.ObjectNotFound()
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def cancel_invocation(self, trans: ProvidesUserContext, invocation_id, **kwd):
"""
DELETE /api/workflows/{workflow_id}/invocations/{invocation_id}
DELETE /api/invocations/{invocation_id}
Cancel the specified workflow invocation.
:param invocation_id: the usage id (required)
:type invocation_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.cancel_invocation(trans, decoded_workflow_invocation_id)
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def show_invocation_report(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/report
GET /api/invocations/{invocation_id}/report
Get JSON summarizing invocation for reporting.
"""
kwd["format"] = "json"
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
@expose_api_raw
def show_invocation_report_pdf(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/report.pdf
GET /api/invocations/{invocation_id}/report.pdf
Get JSON summarizing invocation for reporting.
"""
kwd["format"] = "pdf"
trans.response.set_content_type("application/pdf")
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
def _generate_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id)
history = workflow_invocation.history
workflow = workflow_invocation.workflow
stored_workflow = workflow.stored_workflow
# pull in the user info from those who the history and workflow has been shared with
contributing_users = [stored_workflow.user]
# may want to extend this to have more reviewers.
reviewing_users = [stored_workflow.user]
encoded_workflow_id = trans.security.encode_id(stored_workflow.id)
encoded_history_id = trans.security.encode_id(history.id)
dict_workflow = json.loads(self.workflow_dict(trans, encoded_workflow_id))
spec_version = kwd.get("spec_version", "https://w3id.org/ieee/ieee-2791-schema/2791object.json")
for i, w in enumerate(reversed(stored_workflow.workflows)):
if workflow == w:
current_version = i
contributors = []
for contributing_user in contributing_users:
contributor = {
"orcid": kwd.get("xref", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": ["authoredBy"],
"email": contributing_user.email,
}
contributors.append(contributor)
reviewers = []
for reviewer in reviewing_users:
reviewer = {
"status": "approved",
"reviewer_comment": "",
"date": workflow_invocation.update_time.isoformat(),
"reviewer": {
"orcid": kwd.get("orcid", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": "curatedBy",
"email": contributing_user.email,
},
}
reviewers.append(reviewer)
provenance_domain = {
"name": workflow.name,
"version": current_version,
"review": reviewers,
"derived_from": url_for("workflow", id=encoded_workflow_id, qualified=True),
"created": workflow_invocation.create_time.isoformat(),
"modified": workflow_invocation.update_time.isoformat(),
"contributors": contributors,
"license": "https://spdx.org/licenses/CC-BY-4.0.html",
}
keywords = []
for tag in stored_workflow.tags:
keywords.append(tag.user_tname)
for tag in history.tags:
if tag.user_tname not in keywords:
keywords.append(tag.user_tname)
metrics = {}
tools, input_subdomain, output_subdomain, pipeline_steps, software_prerequisites = [], [], [], [], []
for step in workflow_invocation.steps:
if step.workflow_step.type == "tool":
workflow_outputs_list, output_list, input_list = set(), [], []
for wo in step.workflow_step.workflow_outputs:
workflow_outputs_list.add(wo.output_name)
for job in step.jobs:
metrics[i] = summarize_job_metrics(trans, job)
for job_input in job.input_datasets:
if hasattr(job_input.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_input.dataset.dataset_id)
input_obj = {
# TODO: that should maybe be a step prefix + element identifier where appropriate.
"filename": job_input.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_input.dataset.create_time.isoformat(),
}
input_list.append(input_obj)
for job_output in job.output_datasets:
if hasattr(job_output.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_output.dataset.dataset_id)
output_obj = {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
}
output_list.append(output_obj)
if job_output.name in workflow_outputs_list:
output = {
"mediatype": job_output.dataset.extension,
"uri": {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
},
}
output_subdomain.append(output)
workflow_step = step.workflow_step
step_index = workflow_step.order_index
current_step = dict_workflow["steps"][str(step_index)]
pipeline_step = {
"step_number": step_index,
"name": current_step["name"],
"description": current_step["annotation"],
"version": current_step["tool_version"],
"prerequisite": kwd.get("prerequisite", []),
"input_list": input_list,
"output_list": output_list,
}
pipeline_steps.append(pipeline_step)
try:
software_prerequisite = {
"name": current_step["content_id"],
"version": current_step["tool_version"],
"uri": {"uri": current_step["content_id"], "access_time": current_step["uuid"]},
}
if software_prerequisite["uri"]["uri"] not in tools:
software_prerequisites.append(software_prerequisite)
tools.append(software_prerequisite["uri"]["uri"])
except Exception:
continue
if step.workflow_step.type == "data_input" and step.output_datasets:
for output_assoc in step.output_datasets:
encoded_dataset_id = trans.security.encode_id(output_assoc.dataset_id)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content", history_id=encoded_history_id, id=encoded_dataset_id, qualified=True
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
if step.workflow_step.type == "data_collection_input" and step.output_dataset_collections:
for output_dataset_collection_association in step.output_dataset_collections:
encoded_dataset_id = trans.security.encode_id(
output_dataset_collection_association.dataset_collection_id
)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
type="dataset_collection",
qualified=True,
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
usability_domain = []
for a in stored_workflow.annotations:
usability_domain.append(a.annotation)
for h in history.annotations:
usability_domain.append(h.annotation)
parametric_domain = []
for inv_step in workflow_invocation.steps:
try:
for k, v in inv_step.workflow_step.tool_inputs.items():
param, value, step = k, v, inv_step.workflow_step.order_index
parametric_domain.append({"param": param, "value": value, "step": step})
except Exception:
continue
execution_domain = {
"script_access_type": "a_galaxy_workflow",
"script": [url_for("workflows", encoded_workflow_id=encoded_workflow_id, qualified=True)],
"script_driver": "Galaxy",
"software_prerequisites": software_prerequisites,
"external_data_endpoints": [
{"name": "Access to Galaxy", "url": url_for("/", qualified=True)},
kwd.get("external_data_endpoints"),
],
"environment_variables": kwd.get("environment_variables", {}),
}
extension = [
{
"extension_schema": "https://raw.githubusercontent.com/biocompute-objects/extension_domain/6d2cd8482e6075746984662edcf78b57d3d38065/galaxy/galaxy_extension.json",
"galaxy_extension": {
"galaxy_url": url_for("/", qualified=True),
"galaxy_version": VERSION,
# TODO:
# 'aws_estimate': aws_estimate,
# 'job_metrics': metrics
},
}
]
error_domain = {
"empirical_error": kwd.get("empirical_error", []),
"algorithmic_error": kwd.get("algorithmic_error", []),
}
bco_dict = {
"provenance_domain": provenance_domain,
"usability_domain": usability_domain,
"extension_domain": extension,
"description_domain": {
"keywords": keywords,
"xref": kwd.get("xref", []),
"platform": ["Galaxy"],
"pipeline_steps": pipeline_steps,
},
"execution_domain": execution_domain,
"parametric_domain": parametric_domain,
"io_domain": {
"input_subdomain": input_subdomain,
"output_subdomain": output_subdomain,
},
"error_domain": error_domain,
}
# Generate etag from the BCO excluding object_id and spec_version, as
# specified in https://github.com/biocompute-objects/BCO_Specification/blob/main/docs/top-level.md#203-etag-etag
etag = hashlib.sha256(json.dumps(bco_dict, sort_keys=True).encode()).hexdigest()
bco_dict.update(
{
"object_id": url_for(
controller=f"api/invocations/{invocation_id}", action="biocompute", qualified=True
),
"spec_version": spec_version,
"etag": etag,
}
)
return bco_dict
@expose_api
def export_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/invocations/{invocations_id}/biocompute
Return a BioCompute Object for the workflow invocation.
The BioCompute Object endpoints are in beta - important details such
as how inputs and outputs are represented, how the workflow is encoded,
and how author and version information is encoded, and how URLs are
generated will very likely change in important ways over time.
"""
return self._generate_invocation_bco(trans, invocation_id, **kwd)
@expose_api_raw
def download_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/invocations/{invocations_id}/biocompute/download
Returns a selected BioCompute Object as a file for download (HTTP
headers configured with filename and such).
The BioCompute Object endpoints are in beta - important details such
as how inputs and outputs are represented, how the workflow is encoded,
and how author and version information is encoded, and how URLs are
generated will very likely change in important ways over time.
"""
ret_dict = self._generate_invocation_bco(trans, invocation_id, **kwd)
trans.response.headers["Content-Disposition"] = f'attachment; filename="bco_{invocation_id}.json"'
trans.response.set_content_type("application/json")
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def invocation_step(self, trans, invocation_id, step_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/steps/{step_id}
GET /api/invocations/{invocation_id}/steps/{step_id}
:param invocation_id: the invocation id (required)
:type invocation_id: str
:param step_id: encoded id of the WorkflowInvocationStep (required)
:type step_id: str
:param payload: payload containing update action information
for running workflow.
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_invocation_step_id = self.decode_id(step_id)
invocation_step = self.workflow_manager.get_invocation_step(trans, decoded_invocation_step_id)
return self.__encode_invocation_step(trans, invocation_step)
@expose_api_anonymous_and_sessionless
def invocation_step_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/step_jobs_summary
GET /api/invocations/{invocation_id}/step_jobs_summary
return job state summary info aggregated across per step of the workflow invocation
Warning: We allow anyone to fetch job state information about any object they
can guess an encoded ID for - it isn't considered protected data. This keeps
polling IDs as part of state calculation for large histories and collections as
efficient as possible.
:param invocation_id: the invocation id (required)
:type invocation_id: str
:rtype: dict[]
:returns: an array of job summary object dictionaries for each step
"""
decoded_invocation_id = self.decode_id(invocation_id)
ids = []
types = []
for (job_source_type, job_source_id, _) in invocation_job_source_iter(trans.sa_session, decoded_invocation_id):
ids.append(job_source_id)
types.append(job_source_type)
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)]
@expose_api_anonymous_and_sessionless
def invocation_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/jobs_summary
GET /api/invocations/{invocation_id}/jobs_summary
return job state summary info aggregated across all current jobs of workflow invocation
Warning: We allow anyone to fetch job state information about any object they
can guess an encoded ID for - it isn't considered protected data. This keeps
polling IDs as part of state calculation for large histories and collections as
efficient as possible.
:param invocation_id: the invocation id (required)
:type invocation_id: str
:rtype: dict
:returns: a job summary object merged for all steps in workflow invocation
"""
ids = [self.decode_id(invocation_id)]
types = ["WorkflowInvocation"]
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)][0]
@expose_api
def update_invocation_step(self, trans: GalaxyWebTransaction, invocation_id, step_id, payload, **kwd):
"""
PUT /api/workflows/{workflow_id}/invocations/{invocation_id}/steps/{step_id}
PUT /api/invocations/{invocation_id}/steps/{step_id}
Update state of running workflow step invocation - still very nebulous
but this would be for stuff like confirming paused steps can proceed
etc....
:param invocation_id: the usage id (required)
:type invocation_id: str
:param step_id: encoded id of the WorkflowInvocationStep (required)
:type step_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_invocation_step_id = self.decode_id(step_id)
action = payload.get("action", None)
invocation_step = self.workflow_manager.update_invocation_step(
trans,
decoded_invocation_step_id,
action=action,
)
return self.__encode_invocation_step(trans, invocation_step)
def _workflow_from_dict(self, trans, data, workflow_create_options, source=None):
"""Creates a workflow from a dict.
Created workflow is stored in the database and returned.
"""
publish = workflow_create_options.publish
importable = workflow_create_options.is_importable
if publish and not importable:
raise exceptions.RequestParameterInvalidException("Published workflow must be importable.")
workflow_contents_manager = self.app.workflow_contents_manager
raw_workflow_description = workflow_contents_manager.ensure_raw_description(data)
created_workflow = workflow_contents_manager.build_workflow_from_raw_description(
trans,
raw_workflow_description,
workflow_create_options,
source=source,
)
if importable:
self._make_item_accessible(trans.sa_session, created_workflow.stored_workflow)
trans.sa_session.flush()
self._import_tools_if_needed(trans, workflow_create_options, raw_workflow_description)
return created_workflow.stored_workflow, created_workflow.missing_tools
def _import_tools_if_needed(self, trans, workflow_create_options, raw_workflow_description):
if not workflow_create_options.import_tools:
return
if not trans.user_is_admin:
raise exceptions.AdminRequiredException()
data = raw_workflow_description.as_dict
tools = {}
for key in data["steps"]:
item = data["steps"][key]
if item is not None:
if "tool_shed_repository" in item:
tool_shed_repository = item["tool_shed_repository"]
if (
"owner" in tool_shed_repository
and "changeset_revision" in tool_shed_repository
and "name" in tool_shed_repository
and "tool_shed" in tool_shed_repository
):
toolstr = (
tool_shed_repository["owner"]
+ tool_shed_repository["changeset_revision"]
+ tool_shed_repository["name"]
+ tool_shed_repository["tool_shed"]
)
tools[toolstr] = tool_shed_repository
irm = InstallRepositoryManager(self.app)
install_options = workflow_create_options.install_options
for k in tools:
item = tools[k]
tool_shed_url = f"https://{item['tool_shed']}/"
name = item["name"]
owner = item["owner"]
changeset_revision = item["changeset_revision"]
irm.install(tool_shed_url, name, owner, changeset_revision, install_options)
def __encode_invocation_step(self, trans: ProvidesUserContext, invocation_step):
return self.encode_all_ids(trans, invocation_step.to_dict("element"), True)
def __get_stored_accessible_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_accessible_workflow(trans, workflow_id, by_stored_id=not instance)
def __get_stored_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_workflow(trans, workflow_id, by_stored_id=not instance)
def __encode_invocation(self, invocation, **kwd):
params = InvocationSerializationParams(**kwd)
return self.invocations_service.serialize_workflow_invocation(invocation, params)
StoredWorkflowIDPathParam: EncodedDatabaseIdField = Path(
..., title="Stored Workflow ID", description="The encoded database identifier of the Stored Workflow."
)
InvocationIDPathParam: EncodedDatabaseIdField = Path(
..., title="Invocation ID", description="The encoded database identifier of the Invocation."
)
DeletedQueryParam: bool = Query(
default=False, title="Display deleted", description="Whether to restrict result to deleted workflows."
)
HiddenQueryParam: bool = Query(
default=False, title="Display hidden", description="Whether to restrict result to hidden workflows."
)
MissingToolsQueryParam: bool = Query(
default=False,
title="Display missing tools",
description="Whether to include a list of missing tools per workflow entry",
)
ShowPublishedQueryParam: Optional[bool] = Query(default=None, title="Include published workflows.", description="")
ShowSharedQueryParam: Optional[bool] = Query(
default=None, title="Include workflows shared with authenticated user.", description=""
)
SortByQueryParam: Optional[WorkflowSortByEnum] = Query(
default=None,
title="Sort workflow index by this attribute",
description="In unspecified, default ordering depends on other parameters but generally the user's own workflows appear first based on update time",
)
SortDescQueryParam: Optional[bool] = Query(
default=None,
title="Sort Descending",
description="Sort in descending order?",
)
LimitQueryParam: Optional[int] = Query(default=None, title="Limit number of queries.")
OffsetQueryParam: Optional[int] = Query(
default=0,
title="Number of workflows to skip in sorted query (to enable pagination).",
)
query_tags = [
IndexQueryTag("name", "The stored workflow's name.", "n"),
IndexQueryTag(
"tag",
"The workflow's tag, if the tag contains a colon an approach will be made to match the key and value of the tag separately.",
"t",
),
IndexQueryTag("user", "The stored workflow's owner's username.", "u"),
IndexQueryTag(
"is:published",
"Include only published workflows in the final result. Be sure the the query parameter `show_published` is set to `true` if to include all published workflows and not just the requesting user's.",
),
IndexQueryTag(
"is:share_with_me",
"Include only workflows shared with the requesting user. Be sure the the query parameter `show_shared` is set to `true` if to include shared workflows.",
),
]
SearchQueryParam: Optional[str] = search_query_param(
model_name="Stored Workflow",
tags=query_tags,
free_text_fields=["name", "tag", "user"],
)
SkipStepCountsQueryParam: bool = Query(
default=False,
title="Skip step counts.",
description="Set this to true to skip joining workflow step counts and optimize the resulting index query. Response objects will not contain step counts.",
)
@router.cbv
class FastAPIWorkflows:
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
@router.get(
"/api/workflows",
summary="Lists stored workflows viewable by the user.",
response_description="A list with summary stored workflow information per viewable entry.",
)
def index(
self,
response: Response,
trans: ProvidesUserContext = DependsOnTrans,
show_deleted: bool = DeletedQueryParam,
show_hidden: bool = HiddenQueryParam,
missing_tools: bool = MissingToolsQueryParam,
show_published: Optional[bool] = ShowPublishedQueryParam,
show_shared: Optional[bool] = ShowSharedQueryParam,
sort_by: Optional[WorkflowSortByEnum] = SortByQueryParam,
sort_desc: Optional[bool] = SortDescQueryParam,
limit: Optional[int] = LimitQueryParam,
offset: Optional[int] = OffsetQueryParam,
search: Optional[str] = SearchQueryParam,
skip_step_counts: bool = SkipStepCountsQueryParam,
) -> List[Dict[str, Any]]:
"""Lists stored workflows viewable by the user."""
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
sort_by=sort_by,
sort_desc=sort_desc,
limit=limit,
offset=offset,
search=search,
skip_step_counts=skip_step_counts,
)
workflows, total_matches = self.service.index(trans, payload, include_total_count=True)
response.headers["total_matches"] = str(total_matches)
return workflows
@router.get(
"/api/workflows/{id}/sharing",
summary="Get the current sharing status of the given item.",
)
def sharing(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Return the sharing status of the item."""
return self.service.shareable_service.sharing(trans, id)
@router.put(
"/api/workflows/{id}/enable_link_access",
summary="Makes this item accessible by a URL link.",
)
def enable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item accessible by a URL link and return the current sharing status."""
return self.service.shareable_service.enable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/disable_link_access",
summary="Makes this item inaccessible by a URL link.",
)
def disable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item inaccessible by a URL link and return the current sharing status."""
return self.service.shareable_service.disable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/publish",
summary="Makes this item public and accessible by a URL link.",
)
def publish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item publicly available by a URL link and return the current sharing status."""
return self.service.shareable_service.publish(trans, id)
@router.put(
"/api/workflows/{id}/unpublish",
summary="Removes this item from the published list.",
)
def unpublish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Removes this item from the published list and return the current sharing status."""
return self.service.shareable_service.unpublish(trans, id)
@router.put(
"/api/workflows/{id}/share_with_users",
summary="Share this item with specific users.",
)
def share_with_users(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: ShareWithPayload = Body(...),
) -> ShareWithStatus:
"""Shares this item with specific users and return the current sharing status."""
return self.service.shareable_service.share_with_users(trans, id, payload)
@router.put(
"/api/workflows/{id}/slug",
summary="Set a new slug for this shared item.",
status_code=status.HTTP_204_NO_CONTENT,
)
def set_slug(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: SetSlugPayload = Body(...),
):
"""Sets a new slug to access this item by URL. The new slug must be unique."""
self.service.shareable_service.set_slug(trans, id, payload)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post(
"/api/invocations/{invocation_id}/prepare_store_download",
summary="Prepare a worklfow invocation export-style download.",
)
def prepare_store_download(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: PrepareStoreDownloadPayload = Body(...),
) -> AsyncFile:
return self.invocations_service.prepare_store_download(
trans,
invocation_id,
payload,
)
@router.post(
"/api/invocations/{invocation_id}/write_store",
summary="Prepare a worklfow invocation export-style download and write to supplied URI.",
)
def write_store(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: WriteStoreToPayload = Body(...),
) -> AsyncTaskResultSummary:
rval = self.invocations_service.write_store(
trans,
invocation_id,
payload,
)
return rval
| 43.66277 | 204 | 0.625136 |
import hashlib
import json
import logging
import os
from typing import (
Any,
Dict,
List,
Optional,
)
from fastapi import (
Body,
Path,
Query,
Response,
status,
)
from gxformat2._yaml import ordered_dump
from markupsafe import escape
from pydantic import Extra
from galaxy import (
exceptions,
model,
util,
)
from galaxy.files.uris import (
stream_url_to_str,
validate_uri_access,
)
from galaxy.managers.context import ProvidesUserContext
from galaxy.managers.jobs import (
fetch_job_states,
invocation_job_source_iter,
summarize_job_metrics,
)
from galaxy.managers.workflows import (
MissingToolsException,
RefactorRequest,
WorkflowCreateOptions,
WorkflowUpdateOptions,
)
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.schema.schema import (
AsyncFile,
AsyncTaskResultSummary,
SetSlugPayload,
ShareWithPayload,
ShareWithStatus,
SharingStatus,
StoreContentSource,
WorkflowSortByEnum,
WriteStoreToPayload,
)
from galaxy.structured_app import StructuredApp
from galaxy.tool_shed.galaxy_install.install_manager import InstallRepositoryManager
from galaxy.tools import recommendations
from galaxy.tools.parameters import populate_state
from galaxy.tools.parameters.basic import workflow_building_modes
from galaxy.util.sanitize_html import sanitize_html
from galaxy.version import VERSION
from galaxy.web import (
expose_api,
expose_api_anonymous,
expose_api_anonymous_and_sessionless,
expose_api_raw,
expose_api_raw_anonymous_and_sessionless,
format_return_as_json,
)
from galaxy.webapps.base.controller import (
SharableMixin,
url_for,
UsesStoredWorkflowMixin,
)
from galaxy.webapps.base.webapp import GalaxyWebTransaction
from galaxy.webapps.galaxy.services.base import (
ConsumesModelStores,
ServesExportStores,
)
from galaxy.webapps.galaxy.services.invocations import (
InvocationIndexPayload,
InvocationSerializationParams,
InvocationsService,
PrepareStoreDownloadPayload,
)
from galaxy.webapps.galaxy.services.workflows import (
WorkflowIndexPayload,
WorkflowsService,
)
from galaxy.workflow.extract import extract_workflow
from galaxy.workflow.modules import module_factory
from galaxy.workflow.run import queue_invoke
from galaxy.workflow.run_request import build_workflow_run_configs
from . import (
BaseGalaxyAPIController,
depends,
DependsOnTrans,
IndexQueryTag,
Router,
search_query_param,
)
log = logging.getLogger(__name__)
router = Router(tags=["workflows"])
class CreateInvocationFromStore(StoreContentSource):
history_id: Optional[str]
class Config:
extra = Extra.allow
class WorkflowsAPIController(
BaseGalaxyAPIController,
UsesStoredWorkflowMixin,
UsesAnnotations,
SharableMixin,
ServesExportStores,
ConsumesModelStores,
):
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
def __init__(self, app: StructuredApp):
super().__init__(app)
self.history_manager = app.history_manager
self.workflow_manager = app.workflow_manager
self.workflow_contents_manager = app.workflow_contents_manager
self.tool_recommendations = recommendations.ToolRecommendations()
@expose_api
def get_workflow_menu(self, trans: ProvidesUserContext, **kwd):
user = trans.user
ids_in_menu = [x.stored_workflow_id for x in user.stored_workflow_menu_entries]
workflows = self.get_workflows_list(trans, **kwd)
return {"ids_in_menu": ids_in_menu, "workflows": workflows}
@expose_api
def set_workflow_menu(self, trans: GalaxyWebTransaction, payload=None, **kwd):
payload = payload or {}
user = trans.user
workflow_ids = payload.get("workflow_ids")
if workflow_ids is None:
workflow_ids = []
elif type(workflow_ids) != list:
workflow_ids = [workflow_ids]
workflow_ids_decoded = []
for ids in workflow_ids:
workflow_ids_decoded.append(trans.security.decode_id(ids))
sess = trans.sa_session
for m in user.stored_workflow_menu_entries:
sess.delete(m)
user.stored_workflow_menu_entries = []
q = sess.query(model.StoredWorkflow)
seen_workflow_ids = set()
for wf_id in workflow_ids_decoded:
if wf_id in seen_workflow_ids:
continue
else:
seen_workflow_ids.add(wf_id)
m = model.StoredWorkflowMenuEntry()
m.stored_workflow = q.get(wf_id)
user.stored_workflow_menu_entries.append(m)
sess.flush()
message = "Menu updated."
trans.set_message(message)
return {"message": message, "status": "done"}
def get_workflows_list(
self,
trans: ProvidesUserContext,
missing_tools=False,
show_published=None,
show_shared=None,
show_hidden=False,
show_deleted=False,
**kwd,
):
show_published = util.string_as_bool_or_none(show_published)
show_hidden = util.string_as_bool(show_hidden)
show_deleted = util.string_as_bool(show_deleted)
missing_tools = util.string_as_bool(missing_tools)
show_shared = util.string_as_bool_or_none(show_shared)
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
)
workflows, _ = self.service.index(trans, payload)
return workflows
@expose_api_anonymous_and_sessionless
def show(self, trans: GalaxyWebTransaction, id, **kwd):
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
if stored_workflow.importable is False and stored_workflow.user != trans.user and not trans.user_is_admin:
if (
trans.sa_session.query(model.StoredWorkflowUserShareAssociation)
.filter_by(user=trans.user, stored_workflow=stored_workflow)
.count()
== 0
):
message = "Workflow is neither importable, nor owned by or shared with current user"
raise exceptions.ItemAccessibilityException(message)
if kwd.get("legacy", False):
style = "legacy"
else:
style = "instance"
version = kwd.get("version")
if version is None and util.string_as_bool(kwd.get("instance", "false")):
workflow_id = self.decode_id(id)
for i, workflow in enumerate(reversed(stored_workflow.workflows)):
if workflow.id == workflow_id:
version = i
break
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style=style, version=version)
@expose_api
def show_versions(self, trans: GalaxyWebTransaction, workflow_id, **kwds):
instance = util.string_as_bool(kwds.get("instance", "false"))
stored_workflow = self.workflow_manager.get_stored_accessible_workflow(
trans, workflow_id, by_stored_id=not instance
)
return [
{"version": i, "update_time": str(w.update_time), "steps": len(w.steps)}
for i, w in enumerate(reversed(stored_workflow.workflows))
]
@expose_api
def create(self, trans: GalaxyWebTransaction, payload=None, **kwd):
ways_to_create = {
"archive_source",
"from_history_id",
"from_path",
"shared_workflow_id",
"workflow",
}
if trans.user_is_bootstrap_admin:
raise exceptions.RealUserRequiredException("Only real users can create or run workflows.")
if payload is None or len(ways_to_create.intersection(payload)) == 0:
message = f"One parameter among - {', '.join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterMissingException(message)
if len(ways_to_create.intersection(payload)) > 1:
message = f"Only one parameter among - {', '.join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterInvalidException(message)
if "archive_source" in payload:
archive_source = payload["archive_source"]
archive_file = payload.get("archive_file")
archive_data = None
if archive_source:
validate_uri_access(archive_source, trans.user_is_admin, trans.app.config.fetch_url_allowlist_ips)
if archive_source.startswith("file://"):
workflow_src = {"src": "from_path", "path": archive_source[len("file://") :]}
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
elif archive_source == "trs_tool":
trs_server = payload.get("trs_server")
trs_tool_id = payload.get("trs_tool_id")
trs_version_id = payload.get("trs_version_id")
import_source = None
archive_data = self.app.trs_proxy.get_version_descriptor(trs_server, trs_tool_id, trs_version_id)
else:
try:
archive_data = stream_url_to_str(
archive_source, trans.app.file_sources, prefix="gx_workflow_download"
)
import_source = "URL"
except Exception:
raise exceptions.MessageException(f"Failed to open URL '{escape(archive_source)}'.")
elif hasattr(archive_file, "file"):
uploaded_file = archive_file.file
uploaded_file_name = uploaded_file.name
if os.path.getsize(os.path.abspath(uploaded_file_name)) > 0:
archive_data = util.unicodify(uploaded_file.read())
import_source = "uploaded file"
else:
raise exceptions.MessageException("You attempted to upload an empty file.")
else:
raise exceptions.MessageException("Please provide a URL or file.")
return self.__api_import_from_archive(trans, archive_data, import_source, payload=payload)
if "from_history_id" in payload:
from_history_id = payload.get("from_history_id")
from_history_id = self.decode_id(from_history_id)
history = self.history_manager.get_accessible(from_history_id, trans.user, current_history=trans.history)
job_ids = [self.decode_id(_) for _ in payload.get("job_ids", [])]
dataset_ids = payload.get("dataset_ids", [])
dataset_collection_ids = payload.get("dataset_collection_ids", [])
workflow_name = payload["workflow_name"]
stored_workflow = extract_workflow(
trans=trans,
user=trans.user,
history=history,
job_ids=job_ids,
dataset_ids=dataset_ids,
dataset_collection_ids=dataset_collection_ids,
workflow_name=workflow_name,
)
item = stored_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["url"] = url_for("workflow", id=item["id"])
return item
if "from_path" in payload:
from_path = payload.get("from_path")
object_id = payload.get("object_id")
workflow_src = {"src": "from_path", "path": from_path}
if object_id is not None:
workflow_src["object_id"] = object_id
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
if "shared_workflow_id" in payload:
workflow_id = payload["shared_workflow_id"]
return self.__api_import_shared_workflow(trans, workflow_id, payload)
if "workflow" in payload:
return self.__api_import_new_workflow(trans, payload, **kwd)
raise exceptions.RequestParameterMissingException("No method for workflow creation supplied.")
@expose_api_raw_anonymous_and_sessionless
def workflow_dict(self, trans: GalaxyWebTransaction, workflow_id, **kwd):
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, **kwd)
style = kwd.get("style", "export")
download_format = kwd.get("format")
version = kwd.get("version")
history_id = kwd.get("history_id")
history = None
if history_id:
history = self.history_manager.get_accessible(
self.decode_id(history_id), trans.user, current_history=trans.history
)
ret_dict = self.workflow_contents_manager.workflow_to_dict(
trans, stored_workflow, style=style, version=version, history=history
)
if download_format == "json-download":
sname = stored_workflow.name
sname = "".join(c in util.FILENAME_VALID_CHARS and c or "_" for c in sname)[0:150]
if ret_dict.get("format-version", None) == "0.1":
extension = "ga"
else:
extension = "gxwf.json"
trans.response.headers[
"Content-Disposition"
] = f'attachment; filename="Galaxy-Workflow-{sname}.{extension}"'
trans.response.set_content_type("application/galaxy-archive")
if style == "format2" and download_format != "json-download":
return ordered_dump(ret_dict)
else:
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def delete(self, trans: ProvidesUserContext, id, **kwd):
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
if stored_workflow.user != trans.user and not trans.user_is_admin:
raise exceptions.InsufficientPermissionsException()
stored_workflow.deleted = True
trans.sa_session.flush()
return f"Workflow '{stored_workflow.name}' successfully deleted"
@expose_api
def import_new_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
return self.__api_import_new_workflow(trans, payload, **kwd)
@expose_api
def update(self, trans: GalaxyWebTransaction, id, payload, **kwds):
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
workflow_dict = payload.get("workflow", {})
workflow_dict.update({k: v for k, v in payload.items() if k not in workflow_dict})
if workflow_dict:
require_flush = False
raw_workflow_description = self.__normalize_workflow(trans, workflow_dict)
workflow_dict = raw_workflow_description.as_dict
new_workflow_name = workflow_dict.get("name")
old_workflow = stored_workflow.latest_workflow
name_updated = new_workflow_name and new_workflow_name != stored_workflow.name
steps_updated = "steps" in workflow_dict
if name_updated and not steps_updated:
sanitized_name = sanitize_html(new_workflow_name or old_workflow.name)
workflow = old_workflow.copy(user=trans.user)
workflow.stored_workflow = stored_workflow
workflow.name = sanitized_name
stored_workflow.name = sanitized_name
stored_workflow.latest_workflow = workflow
trans.sa_session.add(workflow, stored_workflow)
require_flush = True
if "hidden" in workflow_dict and stored_workflow.hidden != workflow_dict["hidden"]:
stored_workflow.hidden = workflow_dict["hidden"]
require_flush = True
if "published" in workflow_dict and stored_workflow.published != workflow_dict["published"]:
stored_workflow.published = workflow_dict["published"]
require_flush = True
if "importable" in workflow_dict and stored_workflow.importable != workflow_dict["importable"]:
stored_workflow.importable = workflow_dict["importable"]
require_flush = True
if "annotation" in workflow_dict and not steps_updated:
newAnnotation = sanitize_html(workflow_dict["annotation"])
self.add_item_annotation(trans.sa_session, trans.user, stored_workflow, newAnnotation)
require_flush = True
if "menu_entry" in workflow_dict or "show_in_tool_panel" in workflow_dict:
show_in_panel = workflow_dict.get("menu_entry") or workflow_dict.get("show_in_tool_panel")
stored_workflow_menu_entries = trans.user.stored_workflow_menu_entries
decoded_id = trans.security.decode_id(id)
if show_in_panel:
workflow_ids = [wf.stored_workflow_id for wf in stored_workflow_menu_entries]
if decoded_id not in workflow_ids:
menu_entry = model.StoredWorkflowMenuEntry()
menu_entry.stored_workflow = stored_workflow
stored_workflow_menu_entries.append(menu_entry)
trans.sa_session.add(menu_entry)
require_flush = True
else:
entries = {x.stored_workflow_id: x for x in stored_workflow_menu_entries}
if decoded_id in entries:
stored_workflow_menu_entries.remove(entries[decoded_id])
require_flush = True
if "tags" in workflow_dict:
trans.app.tag_handler.set_tags_from_list(
user=trans.user, item=stored_workflow, new_tags_list=workflow_dict["tags"]
)
if require_flush:
trans.sa_session.flush()
if "steps" in workflow_dict:
try:
workflow_update_options = WorkflowUpdateOptions(**payload)
workflow, errors = self.workflow_contents_manager.update_workflow_from_raw_description(
trans,
stored_workflow,
raw_workflow_description,
workflow_update_options,
)
except MissingToolsException:
raise exceptions.MessageException(
"This workflow contains missing tools. It cannot be saved until they have been removed from the workflow or installed."
)
else:
message = "Updating workflow requires dictionary containing 'workflow' attribute with new JSON description."
raise exceptions.RequestParameterInvalidException(message)
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style="instance")
@expose_api
def refactor(self, trans, id, payload, **kwds):
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
refactor_request = RefactorRequest(**payload)
return self.workflow_contents_manager.refactor(trans, stored_workflow, refactor_request)
@expose_api
def build_module(self, trans: GalaxyWebTransaction, payload=None):
if payload is None:
payload = {}
inputs = payload.get("inputs", {})
trans.workflow_building_mode = workflow_building_modes.ENABLED
module = module_factory.from_dict(trans, payload, from_tool_form=True)
if "tool_state" not in payload:
module_state: Dict[str, Any] = {}
populate_state(trans, module.get_inputs(), inputs, module_state, check=False)
module.recover_state(module_state, from_tool_form=True)
return {
"label": inputs.get("__label", ""),
"annotation": inputs.get("__annotation", ""),
"name": module.get_name(),
"tool_state": module.get_state(),
"content_id": module.get_content_id(),
"inputs": module.get_all_inputs(connectable_only=True),
"outputs": module.get_all_outputs(),
"config_form": module.get_config_form(),
"post_job_actions": module.get_post_job_actions(inputs),
}
@expose_api
def get_tool_predictions(self, trans: ProvidesUserContext, payload, **kwd):
remote_model_url = payload.get("remote_model_url", trans.app.config.tool_recommendation_model_path)
tool_sequence = payload.get("tool_sequence", "")
if "tool_sequence" not in payload or remote_model_url is None:
return
tool_sequence, recommended_tools = self.tool_recommendations.get_predictions(
trans, tool_sequence, remote_model_url
)
return {"current_tool": tool_sequence, "predicted_data": recommended_tools}
def __api_import_from_archive(self, trans: GalaxyWebTransaction, archive_data, source=None, payload=None):
payload = payload or {}
try:
data = json.loads(archive_data)
except Exception:
if "GalaxyWorkflow" in archive_data:
data = {"yaml_content": archive_data}
else:
raise exceptions.MessageException("The data content does not appear to be a valid workflow.")
if not data:
raise exceptions.MessageException("The data content is missing.")
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans, raw_workflow_description, workflow_create_options, source=source
)
workflow_id = workflow.id
workflow = workflow.latest_workflow
response = {
"message": f"Workflow '{escape(workflow.name)}' imported successfully.",
"status": "success",
"id": trans.security.encode_id(workflow_id),
}
if workflow.has_errors:
response["message"] = "Imported, but some steps in this workflow have validation errors."
response["status"] = "error"
elif len(workflow.steps) == 0:
response["message"] = "Imported, but this workflow has no steps."
response["status"] = "error"
elif workflow.has_cycles:
response["message"] = "Imported, but this workflow contains cycles."
response["status"] = "error"
return response
def __api_import_new_workflow(self, trans: GalaxyWebTransaction, payload, **kwd):
data = payload["workflow"]
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans,
raw_workflow_description,
workflow_create_options,
)
workflow_id = workflow.id
encoded_id = trans.security.encode_id(workflow_id)
item = workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["annotations"] = [x.annotation for x in workflow.annotations]
item["url"] = url_for("workflow", id=encoded_id)
item["owner"] = workflow.user.username
item["number_of_steps"] = len(workflow.latest_workflow.steps)
return item
def __normalize_workflow(self, trans: GalaxyWebTransaction, as_dict):
return self.workflow_contents_manager.normalize_workflow_format(trans, as_dict)
@expose_api
def import_shared_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
workflow_id = payload.get("workflow_id", None)
if workflow_id is None:
raise exceptions.ObjectAttributeMissingException("Missing required parameter 'workflow_id'.")
self.__api_import_shared_workflow(trans, workflow_id, payload)
def __api_import_shared_workflow(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
try:
stored_workflow = self.get_stored_workflow(trans, workflow_id, check_ownership=False)
except Exception:
raise exceptions.ObjectNotFound(f"Malformed workflow id ( {workflow_id} ) specified.")
if stored_workflow.importable is False:
raise exceptions.ItemAccessibilityException(
"The owner of this workflow has disabled imports via this link."
)
elif stored_workflow.deleted:
raise exceptions.ItemDeletionException("You can't import this workflow because it has been deleted.")
imported_workflow = self._import_shared_workflow(trans, stored_workflow)
item = imported_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
encoded_id = trans.security.encode_id(imported_workflow.id)
item["url"] = url_for("workflow", id=encoded_id)
return item
@expose_api
def invoke(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
# Get workflow + accessibility check.
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, instance=kwd.get("instance", False))
workflow = stored_workflow.latest_workflow
run_configs = build_workflow_run_configs(trans, workflow, payload)
is_batch = payload.get("batch")
if not is_batch and len(run_configs) != 1:
raise exceptions.RequestParameterInvalidException("Must specify 'batch' to use batch parameters.")
require_exact_tool_versions = util.string_as_bool(payload.get("require_exact_tool_versions", "true"))
tools = self.workflow_contents_manager.get_all_tools(workflow)
missing_tools = [
tool
for tool in tools
if not self.app.toolbox.has_tool(
tool["tool_id"], tool_version=tool["tool_version"], exact=require_exact_tool_versions
)
]
if missing_tools:
missing_tools_message = "Workflow was not invoked; the following required tools are not installed: "
if require_exact_tool_versions:
missing_tools_message += ", ".join(
[f"{tool['tool_id']} (version {tool['tool_version']})" for tool in missing_tools]
)
else:
missing_tools_message += ", ".join([tool["tool_id"] for tool in missing_tools])
raise exceptions.MessageException(missing_tools_message)
invocations = []
for run_config in run_configs:
workflow_scheduler_id = payload.get("scheduler", None)
# TODO: workflow scheduler hints
work_request_params = dict(scheduler=workflow_scheduler_id)
workflow_invocation = queue_invoke(
trans=trans,
workflow=workflow,
workflow_run_config=run_config,
request_params=work_request_params,
flush=False,
)
invocations.append(workflow_invocation)
trans.sa_session.flush()
invocations = [self.encode_all_ids(trans, invocation.to_dict(), recursive=True) for invocation in invocations]
if is_batch:
return invocations
else:
return invocations[0]
@expose_api
def index_invocations(self, trans: GalaxyWebTransaction, **kwd):
invocation_payload = InvocationIndexPayload(**kwd)
serialization_params = InvocationSerializationParams(**kwd)
invocations, total_matches = self.invocations_service.index(trans, invocation_payload, serialization_params)
trans.response.headers["total_matches"] = total_matches
return invocations
@expose_api_anonymous
def create_invocations_from_store(self, trans, payload, **kwd):
create_payload = CreateInvocationFromStore(**payload)
serialization_params = InvocationSerializationParams(**payload)
# refactor into a service...
return self._create_from_store(trans, create_payload, serialization_params)
def _create_from_store(
self, trans, payload: CreateInvocationFromStore, serialization_params: InvocationSerializationParams
):
history = self.history_manager.get_owned(
self.decode_id(payload.history_id), trans.user, current_history=trans.history
)
object_tracker = self.create_objects_from_store(
trans,
payload,
history=history,
)
return self.invocations_service.serialize_workflow_invocations(
object_tracker.invocations_by_key.values(), serialization_params
)
@expose_api
def show_invocation(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id, eager=True)
if not workflow_invocation:
raise exceptions.ObjectNotFound()
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def cancel_invocation(self, trans: ProvidesUserContext, invocation_id, **kwd):
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.cancel_invocation(trans, decoded_workflow_invocation_id)
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def show_invocation_report(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
kwd["format"] = "json"
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
@expose_api_raw
def show_invocation_report_pdf(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
kwd["format"] = "pdf"
trans.response.set_content_type("application/pdf")
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
def _generate_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id)
history = workflow_invocation.history
workflow = workflow_invocation.workflow
stored_workflow = workflow.stored_workflow
# pull in the user info from those who the history and workflow has been shared with
contributing_users = [stored_workflow.user]
# may want to extend this to have more reviewers.
reviewing_users = [stored_workflow.user]
encoded_workflow_id = trans.security.encode_id(stored_workflow.id)
encoded_history_id = trans.security.encode_id(history.id)
dict_workflow = json.loads(self.workflow_dict(trans, encoded_workflow_id))
spec_version = kwd.get("spec_version", "https://w3id.org/ieee/ieee-2791-schema/2791object.json")
for i, w in enumerate(reversed(stored_workflow.workflows)):
if workflow == w:
current_version = i
contributors = []
for contributing_user in contributing_users:
contributor = {
"orcid": kwd.get("xref", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": ["authoredBy"],
"email": contributing_user.email,
}
contributors.append(contributor)
reviewers = []
for reviewer in reviewing_users:
reviewer = {
"status": "approved",
"reviewer_comment": "",
"date": workflow_invocation.update_time.isoformat(),
"reviewer": {
"orcid": kwd.get("orcid", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": "curatedBy",
"email": contributing_user.email,
},
}
reviewers.append(reviewer)
provenance_domain = {
"name": workflow.name,
"version": current_version,
"review": reviewers,
"derived_from": url_for("workflow", id=encoded_workflow_id, qualified=True),
"created": workflow_invocation.create_time.isoformat(),
"modified": workflow_invocation.update_time.isoformat(),
"contributors": contributors,
"license": "https://spdx.org/licenses/CC-BY-4.0.html",
}
keywords = []
for tag in stored_workflow.tags:
keywords.append(tag.user_tname)
for tag in history.tags:
if tag.user_tname not in keywords:
keywords.append(tag.user_tname)
metrics = {}
tools, input_subdomain, output_subdomain, pipeline_steps, software_prerequisites = [], [], [], [], []
for step in workflow_invocation.steps:
if step.workflow_step.type == "tool":
workflow_outputs_list, output_list, input_list = set(), [], []
for wo in step.workflow_step.workflow_outputs:
workflow_outputs_list.add(wo.output_name)
for job in step.jobs:
metrics[i] = summarize_job_metrics(trans, job)
for job_input in job.input_datasets:
if hasattr(job_input.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_input.dataset.dataset_id)
input_obj = {
# TODO: that should maybe be a step prefix + element identifier where appropriate.
"filename": job_input.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_input.dataset.create_time.isoformat(),
}
input_list.append(input_obj)
for job_output in job.output_datasets:
if hasattr(job_output.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_output.dataset.dataset_id)
output_obj = {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
}
output_list.append(output_obj)
if job_output.name in workflow_outputs_list:
output = {
"mediatype": job_output.dataset.extension,
"uri": {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
},
}
output_subdomain.append(output)
workflow_step = step.workflow_step
step_index = workflow_step.order_index
current_step = dict_workflow["steps"][str(step_index)]
pipeline_step = {
"step_number": step_index,
"name": current_step["name"],
"description": current_step["annotation"],
"version": current_step["tool_version"],
"prerequisite": kwd.get("prerequisite", []),
"input_list": input_list,
"output_list": output_list,
}
pipeline_steps.append(pipeline_step)
try:
software_prerequisite = {
"name": current_step["content_id"],
"version": current_step["tool_version"],
"uri": {"uri": current_step["content_id"], "access_time": current_step["uuid"]},
}
if software_prerequisite["uri"]["uri"] not in tools:
software_prerequisites.append(software_prerequisite)
tools.append(software_prerequisite["uri"]["uri"])
except Exception:
continue
if step.workflow_step.type == "data_input" and step.output_datasets:
for output_assoc in step.output_datasets:
encoded_dataset_id = trans.security.encode_id(output_assoc.dataset_id)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content", history_id=encoded_history_id, id=encoded_dataset_id, qualified=True
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
if step.workflow_step.type == "data_collection_input" and step.output_dataset_collections:
for output_dataset_collection_association in step.output_dataset_collections:
encoded_dataset_id = trans.security.encode_id(
output_dataset_collection_association.dataset_collection_id
)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
type="dataset_collection",
qualified=True,
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
usability_domain = []
for a in stored_workflow.annotations:
usability_domain.append(a.annotation)
for h in history.annotations:
usability_domain.append(h.annotation)
parametric_domain = []
for inv_step in workflow_invocation.steps:
try:
for k, v in inv_step.workflow_step.tool_inputs.items():
param, value, step = k, v, inv_step.workflow_step.order_index
parametric_domain.append({"param": param, "value": value, "step": step})
except Exception:
continue
execution_domain = {
"script_access_type": "a_galaxy_workflow",
"script": [url_for("workflows", encoded_workflow_id=encoded_workflow_id, qualified=True)],
"script_driver": "Galaxy",
"software_prerequisites": software_prerequisites,
"external_data_endpoints": [
{"name": "Access to Galaxy", "url": url_for("/", qualified=True)},
kwd.get("external_data_endpoints"),
],
"environment_variables": kwd.get("environment_variables", {}),
}
extension = [
{
"extension_schema": "https://raw.githubusercontent.com/biocompute-objects/extension_domain/6d2cd8482e6075746984662edcf78b57d3d38065/galaxy/galaxy_extension.json",
"galaxy_extension": {
"galaxy_url": url_for("/", qualified=True),
"galaxy_version": VERSION,
# TODO:
# 'aws_estimate': aws_estimate,
# 'job_metrics': metrics
},
}
]
error_domain = {
"empirical_error": kwd.get("empirical_error", []),
"algorithmic_error": kwd.get("algorithmic_error", []),
}
bco_dict = {
"provenance_domain": provenance_domain,
"usability_domain": usability_domain,
"extension_domain": extension,
"description_domain": {
"keywords": keywords,
"xref": kwd.get("xref", []),
"platform": ["Galaxy"],
"pipeline_steps": pipeline_steps,
},
"execution_domain": execution_domain,
"parametric_domain": parametric_domain,
"io_domain": {
"input_subdomain": input_subdomain,
"output_subdomain": output_subdomain,
},
"error_domain": error_domain,
}
# Generate etag from the BCO excluding object_id and spec_version, as
# specified in https://github.com/biocompute-objects/BCO_Specification/blob/main/docs/top-level.md#203-etag-etag
etag = hashlib.sha256(json.dumps(bco_dict, sort_keys=True).encode()).hexdigest()
bco_dict.update(
{
"object_id": url_for(
controller=f"api/invocations/{invocation_id}", action="biocompute", qualified=True
),
"spec_version": spec_version,
"etag": etag,
}
)
return bco_dict
@expose_api
def export_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
return self._generate_invocation_bco(trans, invocation_id, **kwd)
@expose_api_raw
def download_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
ret_dict = self._generate_invocation_bco(trans, invocation_id, **kwd)
trans.response.headers["Content-Disposition"] = f'attachment; filename="bco_{invocation_id}.json"'
trans.response.set_content_type("application/json")
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def invocation_step(self, trans, invocation_id, step_id, **kwd):
decoded_invocation_step_id = self.decode_id(step_id)
invocation_step = self.workflow_manager.get_invocation_step(trans, decoded_invocation_step_id)
return self.__encode_invocation_step(trans, invocation_step)
@expose_api_anonymous_and_sessionless
def invocation_step_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
decoded_invocation_id = self.decode_id(invocation_id)
ids = []
types = []
for (job_source_type, job_source_id, _) in invocation_job_source_iter(trans.sa_session, decoded_invocation_id):
ids.append(job_source_id)
types.append(job_source_type)
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)]
@expose_api_anonymous_and_sessionless
def invocation_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
ids = [self.decode_id(invocation_id)]
types = ["WorkflowInvocation"]
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)][0]
@expose_api
def update_invocation_step(self, trans: GalaxyWebTransaction, invocation_id, step_id, payload, **kwd):
decoded_invocation_step_id = self.decode_id(step_id)
action = payload.get("action", None)
invocation_step = self.workflow_manager.update_invocation_step(
trans,
decoded_invocation_step_id,
action=action,
)
return self.__encode_invocation_step(trans, invocation_step)
def _workflow_from_dict(self, trans, data, workflow_create_options, source=None):
publish = workflow_create_options.publish
importable = workflow_create_options.is_importable
if publish and not importable:
raise exceptions.RequestParameterInvalidException("Published workflow must be importable.")
workflow_contents_manager = self.app.workflow_contents_manager
raw_workflow_description = workflow_contents_manager.ensure_raw_description(data)
created_workflow = workflow_contents_manager.build_workflow_from_raw_description(
trans,
raw_workflow_description,
workflow_create_options,
source=source,
)
if importable:
self._make_item_accessible(trans.sa_session, created_workflow.stored_workflow)
trans.sa_session.flush()
self._import_tools_if_needed(trans, workflow_create_options, raw_workflow_description)
return created_workflow.stored_workflow, created_workflow.missing_tools
def _import_tools_if_needed(self, trans, workflow_create_options, raw_workflow_description):
if not workflow_create_options.import_tools:
return
if not trans.user_is_admin:
raise exceptions.AdminRequiredException()
data = raw_workflow_description.as_dict
tools = {}
for key in data["steps"]:
item = data["steps"][key]
if item is not None:
if "tool_shed_repository" in item:
tool_shed_repository = item["tool_shed_repository"]
if (
"owner" in tool_shed_repository
and "changeset_revision" in tool_shed_repository
and "name" in tool_shed_repository
and "tool_shed" in tool_shed_repository
):
toolstr = (
tool_shed_repository["owner"]
+ tool_shed_repository["changeset_revision"]
+ tool_shed_repository["name"]
+ tool_shed_repository["tool_shed"]
)
tools[toolstr] = tool_shed_repository
irm = InstallRepositoryManager(self.app)
install_options = workflow_create_options.install_options
for k in tools:
item = tools[k]
tool_shed_url = f"https://{item['tool_shed']}/"
name = item["name"]
owner = item["owner"]
changeset_revision = item["changeset_revision"]
irm.install(tool_shed_url, name, owner, changeset_revision, install_options)
def __encode_invocation_step(self, trans: ProvidesUserContext, invocation_step):
return self.encode_all_ids(trans, invocation_step.to_dict("element"), True)
def __get_stored_accessible_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_accessible_workflow(trans, workflow_id, by_stored_id=not instance)
def __get_stored_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_workflow(trans, workflow_id, by_stored_id=not instance)
def __encode_invocation(self, invocation, **kwd):
params = InvocationSerializationParams(**kwd)
return self.invocations_service.serialize_workflow_invocation(invocation, params)
StoredWorkflowIDPathParam: EncodedDatabaseIdField = Path(
..., title="Stored Workflow ID", description="The encoded database identifier of the Stored Workflow."
)
InvocationIDPathParam: EncodedDatabaseIdField = Path(
..., title="Invocation ID", description="The encoded database identifier of the Invocation."
)
DeletedQueryParam: bool = Query(
default=False, title="Display deleted", description="Whether to restrict result to deleted workflows."
)
HiddenQueryParam: bool = Query(
default=False, title="Display hidden", description="Whether to restrict result to hidden workflows."
)
MissingToolsQueryParam: bool = Query(
default=False,
title="Display missing tools",
description="Whether to include a list of missing tools per workflow entry",
)
ShowPublishedQueryParam: Optional[bool] = Query(default=None, title="Include published workflows.", description="")
ShowSharedQueryParam: Optional[bool] = Query(
default=None, title="Include workflows shared with authenticated user.", description=""
)
SortByQueryParam: Optional[WorkflowSortByEnum] = Query(
default=None,
title="Sort workflow index by this attribute",
description="In unspecified, default ordering depends on other parameters but generally the user's own workflows appear first based on update time",
)
SortDescQueryParam: Optional[bool] = Query(
default=None,
title="Sort Descending",
description="Sort in descending order?",
)
LimitQueryParam: Optional[int] = Query(default=None, title="Limit number of queries.")
OffsetQueryParam: Optional[int] = Query(
default=0,
title="Number of workflows to skip in sorted query (to enable pagination).",
)
query_tags = [
IndexQueryTag("name", "The stored workflow's name.", "n"),
IndexQueryTag(
"tag",
"The workflow's tag, if the tag contains a colon an approach will be made to match the key and value of the tag separately.",
"t",
),
IndexQueryTag("user", "The stored workflow's owner's username.", "u"),
IndexQueryTag(
"is:published",
"Include only published workflows in the final result. Be sure the the query parameter `show_published` is set to `true` if to include all published workflows and not just the requesting user's.",
),
IndexQueryTag(
"is:share_with_me",
"Include only workflows shared with the requesting user. Be sure the the query parameter `show_shared` is set to `true` if to include shared workflows.",
),
]
SearchQueryParam: Optional[str] = search_query_param(
model_name="Stored Workflow",
tags=query_tags,
free_text_fields=["name", "tag", "user"],
)
SkipStepCountsQueryParam: bool = Query(
default=False,
title="Skip step counts.",
description="Set this to true to skip joining workflow step counts and optimize the resulting index query. Response objects will not contain step counts.",
)
@router.cbv
class FastAPIWorkflows:
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
@router.get(
"/api/workflows",
summary="Lists stored workflows viewable by the user.",
response_description="A list with summary stored workflow information per viewable entry.",
)
def index(
self,
response: Response,
trans: ProvidesUserContext = DependsOnTrans,
show_deleted: bool = DeletedQueryParam,
show_hidden: bool = HiddenQueryParam,
missing_tools: bool = MissingToolsQueryParam,
show_published: Optional[bool] = ShowPublishedQueryParam,
show_shared: Optional[bool] = ShowSharedQueryParam,
sort_by: Optional[WorkflowSortByEnum] = SortByQueryParam,
sort_desc: Optional[bool] = SortDescQueryParam,
limit: Optional[int] = LimitQueryParam,
offset: Optional[int] = OffsetQueryParam,
search: Optional[str] = SearchQueryParam,
skip_step_counts: bool = SkipStepCountsQueryParam,
) -> List[Dict[str, Any]]:
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
sort_by=sort_by,
sort_desc=sort_desc,
limit=limit,
offset=offset,
search=search,
skip_step_counts=skip_step_counts,
)
workflows, total_matches = self.service.index(trans, payload, include_total_count=True)
response.headers["total_matches"] = str(total_matches)
return workflows
@router.get(
"/api/workflows/{id}/sharing",
summary="Get the current sharing status of the given item.",
)
def sharing(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
return self.service.shareable_service.sharing(trans, id)
@router.put(
"/api/workflows/{id}/enable_link_access",
summary="Makes this item accessible by a URL link.",
)
def enable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
return self.service.shareable_service.enable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/disable_link_access",
summary="Makes this item inaccessible by a URL link.",
)
def disable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
return self.service.shareable_service.disable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/publish",
summary="Makes this item public and accessible by a URL link.",
)
def publish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
return self.service.shareable_service.publish(trans, id)
@router.put(
"/api/workflows/{id}/unpublish",
summary="Removes this item from the published list.",
)
def unpublish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
return self.service.shareable_service.unpublish(trans, id)
@router.put(
"/api/workflows/{id}/share_with_users",
summary="Share this item with specific users.",
)
def share_with_users(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: ShareWithPayload = Body(...),
) -> ShareWithStatus:
return self.service.shareable_service.share_with_users(trans, id, payload)
@router.put(
"/api/workflows/{id}/slug",
summary="Set a new slug for this shared item.",
status_code=status.HTTP_204_NO_CONTENT,
)
def set_slug(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: SetSlugPayload = Body(...),
):
self.service.shareable_service.set_slug(trans, id, payload)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post(
"/api/invocations/{invocation_id}/prepare_store_download",
summary="Prepare a worklfow invocation export-style download.",
)
def prepare_store_download(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: PrepareStoreDownloadPayload = Body(...),
) -> AsyncFile:
return self.invocations_service.prepare_store_download(
trans,
invocation_id,
payload,
)
@router.post(
"/api/invocations/{invocation_id}/write_store",
summary="Prepare a worklfow invocation export-style download and write to supplied URI.",
)
def write_store(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: WriteStoreToPayload = Body(...),
) -> AsyncTaskResultSummary:
rval = self.invocations_service.write_store(
trans,
invocation_id,
payload,
)
return rval
| true | true |
f72f7ca31617049ebeb428ce645c75a60f6eb63c | 22,243 | py | Python | setup.py | kylemannock/python-games | 1cd25fb4b52c2226dae2d77c996c878bad4bcd65 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | setup.py | kylemannock/python-games | 1cd25fb4b52c2226dae2d77c996c878bad4bcd65 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | setup.py | kylemannock/python-games | 1cd25fb4b52c2226dae2d77c996c878bad4bcd65 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | #!/usr/bin/env python
#
# This is the distutils setup script for pygame.
# Full instructions are in https://www.pygame.org/wiki/GettingStarted
#
# To configure, compile, install, just run this script.
# python setup.py install
DESCRIPTION = """Pygame is a Python wrapper module for the
SDL multimedia library. It contains python functions and classes
that will allow you to use SDL's support for playing cdroms,
audio and video output, and keyboard, mouse and joystick input."""
EXTRAS = {}
METADATA = {
"name": "pygame",
"version": "1.9.5.dev0",
"license": "LGPL",
"url": "https://www.pygame.org",
"author": "Pete Shinners, Rene Dudfield, Marcus von Appen, Bob Pendleton, others...",
"author_email": "pygame@seul.org",
"description": "Python Game Development",
"long_description": DESCRIPTION,
}
import sys
import os
def compilation_help():
""" On failure point people to a web page for help.
"""
import platform
the_system = platform.system()
if the_system == 'Linux':
if hasattr(platform, 'linux_distribution'):
distro = platform.linux_distribution()
if distro[0] == 'Ubuntu':
the_system = 'Ubuntu'
elif distro[0] == 'Debian':
the_system = 'Debian'
help_urls = {
'Linux': 'https://www.pygame.org/wiki/Compilation',
'Ubuntu': 'https://www.pygame.org/wiki/CompileUbuntu',
'Debian': 'https://www.pygame.org/wiki/CompileDebian',
'Windows': 'https://www.pygame.org/wiki/CompileWindows',
'Darwin': 'https://www.pygame.org/wiki/MacCompile',
}
default = 'https://www.pygame.org/wiki/Compilation'
url = help_urls.get(platform.system(), default)
is_pypy = '__pypy__' in sys.builtin_module_names
if is_pypy:
url += '\n https://www.pygame.org/wiki/CompilePyPy'
print ('---')
print ('For help with compilation see:')
print (' %s' % url)
print ('To contribute to pygame development see:')
print (' https://www.pygame.org/contribute.html')
print ('---')
if not hasattr(sys, 'version_info') or sys.version_info < (2,7):
compilation_help()
raise SystemExit("Pygame requires Python version 2.7 or above.")
#get us to the correct directory
path = os.path.split(os.path.abspath(sys.argv[0]))[0]
os.chdir(path)
#os.environ["CFLAGS"] = "-W -Wall -Wpointer-arith -Wcast-qual -Winline " + \
# "-Wcast-align -Wconversion -Wstrict-prototypes " + \
# "-Wmissing-prototypes -Wmissing-declarations " + \
# "-Wnested-externs -Wshadow -Wredundant-decls"
if "-warnings" in sys.argv:
os.environ["CFLAGS"] = "-W -Wimplicit-int " + \
"-Wimplicit-function-declaration " + \
"-Wimplicit -Wmain -Wreturn-type -Wunused -Wswitch " + \
"-Wcomment -Wtrigraphs -Wformat -Wchar-subscripts " + \
"-Wuninitialized -Wparentheses " +\
"-Wpointer-arith -Wcast-qual -Winline -Wcast-align " + \
"-Wconversion -Wstrict-prototypes " + \
"-Wmissing-prototypes -Wmissing-declarations " + \
"-Wnested-externs -Wshadow -Wredundant-decls"
sys.argv.remove ("-warnings")
AUTO_CONFIG = False
if '-auto' in sys.argv:
AUTO_CONFIG = True
sys.argv.remove('-auto')
import os.path, glob, stat, shutil
import distutils.sysconfig
from distutils.core import setup, Extension, Command
from distutils.extension import read_setup_file
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
revision = ''
# Python 3.0 patch
if sys.version_info[0:2] == (3, 0):
import distutils.version
def _cmp(x, y):
try:
if x < y:
return -1
elif x == y:
return 0
return 1
except TypeError:
return NotImplemented
distutils.version.cmp = _cmp
del _cmp
def add_datafiles(data_files, dest_dir, pattern):
"""Add directory structures to data files according to a pattern"""
src_dir, elements = pattern
def do_directory(root_dest_path, root_src_path, elements):
files = []
for e in elements:
if isinstance(e, list):
src_dir, elems = e
dest_path = '/'.join([root_dest_path, src_dir])
src_path = os.path.join(root_src_path, src_dir)
do_directory(dest_path, src_path, elems)
else:
files.extend(glob.glob(os.path.join(root_src_path, e)))
if files:
data_files.append((root_dest_path, files))
do_directory(dest_dir, src_dir, elements)
# allow optionally using setuptools for bdist_egg.
if "-setuptools" in sys.argv:
from setuptools import setup, find_packages
sys.argv.remove ("-setuptools")
from setuptools import setup, find_packages
# NOTE: the bdist_mpkg_support is for darwin.
try:
import bdist_mpkg_support
from setuptools import setup, Extension
except ImportError:
pass
else:
EXTRAS.update({
'options': bdist_mpkg_support.options,
'setup_requires': ['bdist_mpkg>=0.4.2'],
#'install_requires': ['pyobjc'],
#'dependency_links': ['http://rene.f0o.com/~rene/stuff/macosx/']
})
#headers to install
headers = glob.glob(os.path.join('src_c', '*.h'))
headers.remove(os.path.join('src_c', 'scale.h'))
# option for not installing the headers.
if "-noheaders" in sys.argv:
headers = []
sys.argv.remove ("-noheaders")
#sanity check for any arguments
if len(sys.argv) == 1 and sys.stdout.isatty():
if sys.version_info[0] >= 3:
reply = input('\nNo Arguments Given, Perform Default Install? [Y/n]')
else:
reply = raw_input('\nNo Arguments Given, Perform Default Install? [Y/n]')
if not reply or reply[0].lower() != 'n':
sys.argv.append('install')
#make sure there is a Setup file
if AUTO_CONFIG or not os.path.isfile('Setup'):
print ('\n\nWARNING, No "Setup" File Exists, Running "buildconfig/config.py"')
import buildconfig.config
buildconfig.config.main(AUTO_CONFIG)
if '-config' in sys.argv:
sys.exit(0)
print ('\nContinuing With "setup.py"')
try:
s_mtime = os.stat("Setup")[stat.ST_MTIME]
sin_mtime = os.stat(os.path.join('buildconfig', 'Setup.SDL1.in'))[stat.ST_MTIME]
if sin_mtime > s_mtime:
print ('\n\nWARNING, "buildconfig/Setup.SDL1.in" newer than "Setup",'
'you might need to modify "Setup".')
except:
pass
# get compile info for all extensions
try:
extensions = read_setup_file('Setup')
except:
print ("""Error with the "Setup" file,
perhaps make a clean copy from "Setup.in".""")
compilation_help()
raise
#decide whether or not to enable new buffer protocol support
enable_newbuf = False
if sys.version_info >= (2, 6, 0):
try:
sys.pypy_version_info
except AttributeError:
enable_newbuf = True
if enable_newbuf:
enable_newbuf_value = '1'
else:
enable_newbuf_value = '0'
for e in extensions:
e.define_macros.append(('ENABLE_NEWBUF', enable_newbuf_value))
#if new buffer protocol support is disabled then remove the testing framework
if not enable_newbuf:
posn = None
for i, e in enumerate(extensions):
if e.name == 'newbuffer':
posn = i
if (posn is not None):
del extensions[posn]
# if not building font, try replacing with ftfont
alternate_font = os.path.join('src_py', 'font.py')
if os.path.exists(alternate_font):
os.remove(alternate_font)
have_font = False
have_freetype = False
for e in extensions:
if e.name == 'font':
have_font = True
if e.name == '_freetype':
have_freetype = True
if not have_font and have_freetype:
shutil.copyfile(os.path.join('src_py', 'ftfont.py'), alternate_font)
#extra files to install
data_path = os.path.join(distutils.sysconfig.get_python_lib(), 'pygame')
pygame_data_files = []
data_files = [('pygame', pygame_data_files)]
#add files in distribution directory
# pygame_data_files.append('LGPL')
# pygame_data_files.append('readme.html')
# pygame_data_files.append('install.html')
#add non .py files in lib directory
for f in glob.glob(os.path.join('src_py', '*')):
if not f[-3:] == '.py' and not f[-4:] == '.doc' and os.path.isfile(f):
pygame_data_files.append(f)
#tests/fixtures
add_datafiles(data_files, 'pygame/tests',
['test',
[['fixtures',
[['xbm_cursors',
['*.xbm']],
['fonts',
['*.ttf', '*.otf', '*.bdf', '*.png']]]]]])
#examples
add_datafiles(data_files, 'pygame/examples',
['examples',
['readme.rst',
['data',
['*']],
['macosx',
['*.py',
['aliens_app_example',
['*.py',
'README.txt',
['English.lproj',
['aliens.icns',
['MainMenu.nib',
['*']]]]]]]]]])
#docs
add_datafiles(data_files, 'pygame/docs',
['docs',
['*.html', # Navigation and help pages
'*.gif', # pygame logos
'*.js', # For doc search
['ref', # pygame reference
['*.html', # Reference pages
'*.js', # Comments script
'*.json']], # Comment data
['c_api', # pygame C API
['*.html']],
['tut', # Tutorials
['*.html',
['tom',
['*.html',
'*.png']]]],
['_static', # Sphinx added support files
['*.css',
'*.png',
'*.ico',
'*.js']],
['_images', # Sphinx added reST ".. image::" refs
['*.jpg',
'*.png',
'*.gif']],
['_sources', # Used for ref search
['*.txt',
['ref',
['*.txt']]]]]])
#generate the version module
def parse_version(ver):
from re import findall
return ', '.join(s for s in findall('\d+', ver)[0:3])
def write_version_module(pygame_version, revision):
vernum = parse_version(pygame_version)
with open(os.path.join('buildconfig', 'version.py.in'), 'r') as header_file:
header = header_file.read()
with open(os.path.join('src_py', 'version.py'), 'w') as version_file:
version_file.write(header)
version_file.write('ver = "' + pygame_version + '"\n')
version_file.write('vernum = ' + vernum + '\n')
version_file.write('rev = "' + revision + '"\n')
write_version_module(METADATA['version'], revision)
#required. This will be filled if doing a Windows build.
cmdclass = {}
#try to find DLLs and copy them too (only on windows)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext
#add dependency DLLs to the project
lib_dependencies = {}
for e in extensions:
if e.name.startswith('COPYLIB_'):
lib_dependencies[e.name[8:]] = e.libraries
def dependencies(roots):
"""Return a set of dependencies for the list of library file roots
The return set is a dictionary keyed on library root name with values of 1.
"""
root_set = {}
for root in roots:
try:
deps = lib_dependencies[root]
except KeyError:
pass
else:
root_set[root] = 1
root_set.update(dependencies(deps))
return root_set
the_dlls = {}
required_dlls = {}
for e in extensions:
if e.name.startswith('COPYLIB_'):
the_dlls[e.name[8:]] = e.library_dirs[0]
else:
required_dlls.update(dependencies(e.libraries))
# join the required_dlls and the_dlls keys together.
lib_names = {}
for lib in list(required_dlls.keys()) + list(the_dlls.keys()):
lib_names[lib] = 1
for lib in lib_names.keys():
#next DLL; a distutils bug requires the paths to have Windows separators
f = the_dlls[lib].replace('/', os.sep)
if f == '_':
print ("WARNING, DLL for %s library not found." % lib)
else:
pygame_data_files.append(f)
class WinBuildExt(build_ext):
"""This build_ext sets necessary environment variables for MinGW"""
# __sdl_lib_dir is possible location of msvcrt replacement import
# libraries, if they exist. Pygame module base only links to SDL so
# should have the SDL library directory as its only -L option.
for e in extensions:
if e.name == 'base':
__sdl_lib_dir = e.library_dirs[0].replace('/', os.sep)
break
cmdclass['build_ext'] = WinBuildExt
# Add the precompiled smooth scale MMX functions to transform.
def replace_scale_mmx():
for e in extensions:
if e.name == 'transform':
if '64 bit' in sys.version:
e.extra_objects.append(
os.path.join('buildconfig', 'obj', 'win64', 'scale_mmx.obj'))
else:
e.extra_objects.append(
os.path.join('buildconfig', 'obj', 'win32', 'scale_mmx.obj'))
for i in range(len(e.sources)):
if e.sources[i].endswith('scale_mmx.c'):
del e.sources[i]
return
replace_scale_mmx()
#clean up the list of extensions
for e in extensions[:]:
if e.name.startswith('COPYLIB_'):
extensions.remove(e) #don't compile the COPYLIBs, just clean them
else:
e.name = 'pygame.' + e.name #prepend package name on modules
#data installer with improved intelligence over distutils
#data files are copied into the project directory instead
#of willy-nilly
class smart_install_data(install_data):
def run(self):
#need to change self.install_dir to the actual library dir
install_cmd = self.get_finalized_command('install')
self.install_dir = getattr(install_cmd, 'install_lib')
return install_data.run(self)
cmdclass['install_data'] = smart_install_data
class OurSdist(sdist):
def initialize_options(self):
sdist.initialize_options(self)
# we do not want MANIFEST.in to appear in the root cluttering up things.
self.template = os.path.join('buildconfig', 'MANIFEST.in')
cmdclass['sdist'] = OurSdist
if "bdist_msi" in sys.argv:
# if you are making an msi, we want it to overwrite files
# we also want to include the repository revision in the file name
from distutils.command import bdist_msi
import msilib
class bdist_msi_overwrite_on_install(bdist_msi.bdist_msi):
def run(self):
bdist_msi.bdist_msi.run(self)
# Remove obsolete files.
comp = "pygame1" # Pygame component
prop = comp # Directory property
records = [("surfarray.pyd", comp,
"SURFAR~1.PYD|surfarray.pyd", prop, 1),
("sndarray.pyd", comp,
"SNDARRAY.PYD|sndarray.pyd", prop, 1),
("camera.pyd", comp, "CAMERA.PYD|camera.pyd", prop, 1),
("color.py", comp, "COLOR.PY|color.py", prop, 1),
("color.pyc", comp, "COLOR.PYC|color.pyc", prop, 1),
("color.pyo", comp, "COLOR.PYO|color.pyo", prop, 1)]
msilib.add_data(self.db, "RemoveFile", records)
# Overwrite outdated files.
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
print ("changing %s to overwrite files on install" % installer_name)
msilib.add_data(self.db, "Property", [("REINSTALLMODE", "amus")])
self.db.Commit()
def get_installer_filename(self, fullname):
if revision:
fullname += '-hg_' + revision
return bdist_msi.bdist_msi.get_installer_filename(self, fullname)
cmdclass['bdist_msi'] = bdist_msi_overwrite_on_install
# test command. For doing 'python setup.py test'
class TestCommand(Command):
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
'''
runs the tests with default options.
'''
import subprocess
return subprocess.call([sys.executable, os.path.join('test', '__main__.py')])
cmdclass['test'] = TestCommand
class DocsCommand(Command):
""" For building the pygame documentation with `python setup.py docs`.
This generates html, and documentation .h header files.
"""
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
'''
runs the tests with default options.
'''
docs_help = (
"Building docs requires Python version 3.6 or above, and sphinx."
)
if not hasattr(sys, 'version_info') or sys.version_info < (3, 6):
raise SystemExit(docs_help)
import subprocess
try:
return subprocess.call([
sys.executable, os.path.join('buildconfig', 'makeref.py')]
)
except:
print(docs_help)
raise
cmdclass['docs'] = DocsCommand
# Prune empty file lists.
date_files = [(path, files) for path, files in data_files if files]
#finally,
#call distutils with all needed info
PACKAGEDATA = {
"cmdclass": cmdclass,
"packages": ['pygame', 'pygame.gp2x', 'pygame.threads',
'pygame.tests',
'pygame.tests.test_utils',
'pygame.tests.run_tests__tests',
'pygame.tests.run_tests__tests.all_ok',
'pygame.tests.run_tests__tests.failures1',
'pygame.tests.run_tests__tests.incomplete',
'pygame.tests.run_tests__tests.infinite_loop',
'pygame.tests.run_tests__tests.print_stderr',
'pygame.tests.run_tests__tests.print_stdout',
'pygame.tests.run_tests__tests.incomplete_todo',
'pygame.tests.run_tests__tests.exclude',
'pygame.tests.run_tests__tests.timeout',
'pygame.tests.run_tests__tests.everything',
'pygame.docs',
'pygame.examples'],
"package_dir": {'pygame': 'src_py',
'pygame.threads': 'src_py/threads',
'pygame.gp2x': 'src_py/gp2x',
'pygame.tests': 'test',
'pygame.docs': 'docs',
'pygame.examples': 'examples'},
"headers": headers,
"ext_modules": extensions,
"data_files": data_files,
"zip_safe": False,
}
PACKAGEDATA.update(METADATA)
PACKAGEDATA.update(EXTRAS)
try:
setup(**PACKAGEDATA)
except:
compilation_help()
raise
def remove_old_files():
# try and figure out where we are installed.
#pygame could be installed in a weird location because of
# setuptools or something else. The only sane way seems to be by trying
# first to import it, and see where the imported one is.
#
# Otherwise we might delete some files from another installation.
try:
import pygame.base
use_pygame = 1
except:
use_pygame = 0
if use_pygame:
install_path= os.path.split(pygame.base.__file__)[0]
extension_ext = os.path.splitext(pygame.base.__file__)[1]
else:
if not os.path.exists(data_path):
return
install_path = data_path
base_file = glob.glob(os.path.join(data_path, "base*"))
if not base_file:
return
extension_ext = os.path.splitext(base_file[0])[1]
# here are the .so/.pyd files we need to ask to remove.
ext_to_remove = ["camera"]
# here are the .py/.pyo/.pyc files we need to ask to remove.
py_to_remove = ["color"]
os.path.join(data_path, 'color.py')
if os.name == "e32": # Don't warn on Symbian. The color.py is used as a wrapper.
py_to_remove = []
# See if any of the files are there.
extension_files = ["%s%s" % (x, extension_ext) for x in ext_to_remove]
py_files = ["%s%s" % (x, py_ext)
for py_ext in [".py", ".pyc", ".pyo"]
for x in py_to_remove]
files = py_files + extension_files
unwanted_files = []
for f in files:
unwanted_files.append( os.path.join( install_path, f ) )
ask_remove = []
for f in unwanted_files:
if os.path.exists(f):
ask_remove.append(f)
for f in ask_remove:
try:
print("trying to remove old file :%s: ..." %f)
os.remove(f)
print("Successfully removed :%s:." % f)
except:
print("FAILED to remove old file :%s:" % f)
if "install" in sys.argv:
# remove some old files.
# only call after a successful install. Should only reach here if there is
# a successful install... otherwise setup() raises an error.
try:
remove_old_files()
except:
pass
| 32.61437 | 99 | 0.572944 |
DESCRIPTION = """Pygame is a Python wrapper module for the
SDL multimedia library. It contains python functions and classes
that will allow you to use SDL's support for playing cdroms,
audio and video output, and keyboard, mouse and joystick input."""
EXTRAS = {}
METADATA = {
"name": "pygame",
"version": "1.9.5.dev0",
"license": "LGPL",
"url": "https://www.pygame.org",
"author": "Pete Shinners, Rene Dudfield, Marcus von Appen, Bob Pendleton, others...",
"author_email": "pygame@seul.org",
"description": "Python Game Development",
"long_description": DESCRIPTION,
}
import sys
import os
def compilation_help():
import platform
the_system = platform.system()
if the_system == 'Linux':
if hasattr(platform, 'linux_distribution'):
distro = platform.linux_distribution()
if distro[0] == 'Ubuntu':
the_system = 'Ubuntu'
elif distro[0] == 'Debian':
the_system = 'Debian'
help_urls = {
'Linux': 'https://www.pygame.org/wiki/Compilation',
'Ubuntu': 'https://www.pygame.org/wiki/CompileUbuntu',
'Debian': 'https://www.pygame.org/wiki/CompileDebian',
'Windows': 'https://www.pygame.org/wiki/CompileWindows',
'Darwin': 'https://www.pygame.org/wiki/MacCompile',
}
default = 'https://www.pygame.org/wiki/Compilation'
url = help_urls.get(platform.system(), default)
is_pypy = '__pypy__' in sys.builtin_module_names
if is_pypy:
url += '\n https://www.pygame.org/wiki/CompilePyPy'
print ('---')
print ('For help with compilation see:')
print (' %s' % url)
print ('To contribute to pygame development see:')
print (' https://www.pygame.org/contribute.html')
print ('---')
if not hasattr(sys, 'version_info') or sys.version_info < (2,7):
compilation_help()
raise SystemExit("Pygame requires Python version 2.7 or above.")
#get us to the correct directory
path = os.path.split(os.path.abspath(sys.argv[0]))[0]
os.chdir(path)
#os.environ["CFLAGS"] = "-W -Wall -Wpointer-arith -Wcast-qual -Winline " + \
# "-Wcast-align -Wconversion -Wstrict-prototypes " + \
# "-Wmissing-prototypes -Wmissing-declarations " + \
# "-Wnested-externs -Wshadow -Wredundant-decls"
if "-warnings" in sys.argv:
os.environ["CFLAGS"] = "-W -Wimplicit-int " + \
"-Wimplicit-function-declaration " + \
"-Wimplicit -Wmain -Wreturn-type -Wunused -Wswitch " + \
"-Wcomment -Wtrigraphs -Wformat -Wchar-subscripts " + \
"-Wuninitialized -Wparentheses " +\
"-Wpointer-arith -Wcast-qual -Winline -Wcast-align " + \
"-Wconversion -Wstrict-prototypes " + \
"-Wmissing-prototypes -Wmissing-declarations " + \
"-Wnested-externs -Wshadow -Wredundant-decls"
sys.argv.remove ("-warnings")
AUTO_CONFIG = False
if '-auto' in sys.argv:
AUTO_CONFIG = True
sys.argv.remove('-auto')
import os.path, glob, stat, shutil
import distutils.sysconfig
from distutils.core import setup, Extension, Command
from distutils.extension import read_setup_file
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
revision = ''
# Python 3.0 patch
if sys.version_info[0:2] == (3, 0):
import distutils.version
def _cmp(x, y):
try:
if x < y:
return -1
elif x == y:
return 0
return 1
except TypeError:
return NotImplemented
distutils.version.cmp = _cmp
del _cmp
def add_datafiles(data_files, dest_dir, pattern):
src_dir, elements = pattern
def do_directory(root_dest_path, root_src_path, elements):
files = []
for e in elements:
if isinstance(e, list):
src_dir, elems = e
dest_path = '/'.join([root_dest_path, src_dir])
src_path = os.path.join(root_src_path, src_dir)
do_directory(dest_path, src_path, elems)
else:
files.extend(glob.glob(os.path.join(root_src_path, e)))
if files:
data_files.append((root_dest_path, files))
do_directory(dest_dir, src_dir, elements)
# allow optionally using setuptools for bdist_egg.
if "-setuptools" in sys.argv:
from setuptools import setup, find_packages
sys.argv.remove ("-setuptools")
from setuptools import setup, find_packages
# NOTE: the bdist_mpkg_support is for darwin.
try:
import bdist_mpkg_support
from setuptools import setup, Extension
except ImportError:
pass
else:
EXTRAS.update({
'options': bdist_mpkg_support.options,
'setup_requires': ['bdist_mpkg>=0.4.2'],
#'install_requires': ['pyobjc'],
#'dependency_links': ['http://rene.f0o.com/~rene/stuff/macosx/']
})
#headers to install
headers = glob.glob(os.path.join('src_c', '*.h'))
headers.remove(os.path.join('src_c', 'scale.h'))
# option for not installing the headers.
if "-noheaders" in sys.argv:
headers = []
sys.argv.remove ("-noheaders")
#sanity check for any arguments
if len(sys.argv) == 1 and sys.stdout.isatty():
if sys.version_info[0] >= 3:
reply = input('\nNo Arguments Given, Perform Default Install? [Y/n]')
else:
reply = raw_input('\nNo Arguments Given, Perform Default Install? [Y/n]')
if not reply or reply[0].lower() != 'n':
sys.argv.append('install')
#make sure there is a Setup file
if AUTO_CONFIG or not os.path.isfile('Setup'):
print ('\n\nWARNING, No "Setup" File Exists, Running "buildconfig/config.py"')
import buildconfig.config
buildconfig.config.main(AUTO_CONFIG)
if '-config' in sys.argv:
sys.exit(0)
print ('\nContinuing With "setup.py"')
try:
s_mtime = os.stat("Setup")[stat.ST_MTIME]
sin_mtime = os.stat(os.path.join('buildconfig', 'Setup.SDL1.in'))[stat.ST_MTIME]
if sin_mtime > s_mtime:
print ('\n\nWARNING, "buildconfig/Setup.SDL1.in" newer than "Setup",'
'you might need to modify "Setup".')
except:
pass
# get compile info for all extensions
try:
extensions = read_setup_file('Setup')
except:
print ("""Error with the "Setup" file,
perhaps make a clean copy from "Setup.in".""")
compilation_help()
raise
#decide whether or not to enable new buffer protocol support
enable_newbuf = False
if sys.version_info >= (2, 6, 0):
try:
sys.pypy_version_info
except AttributeError:
enable_newbuf = True
if enable_newbuf:
enable_newbuf_value = '1'
else:
enable_newbuf_value = '0'
for e in extensions:
e.define_macros.append(('ENABLE_NEWBUF', enable_newbuf_value))
#if new buffer protocol support is disabled then remove the testing framework
if not enable_newbuf:
posn = None
for i, e in enumerate(extensions):
if e.name == 'newbuffer':
posn = i
if (posn is not None):
del extensions[posn]
# if not building font, try replacing with ftfont
alternate_font = os.path.join('src_py', 'font.py')
if os.path.exists(alternate_font):
os.remove(alternate_font)
have_font = False
have_freetype = False
for e in extensions:
if e.name == 'font':
have_font = True
if e.name == '_freetype':
have_freetype = True
if not have_font and have_freetype:
shutil.copyfile(os.path.join('src_py', 'ftfont.py'), alternate_font)
#extra files to install
data_path = os.path.join(distutils.sysconfig.get_python_lib(), 'pygame')
pygame_data_files = []
data_files = [('pygame', pygame_data_files)]
#add files in distribution directory
# pygame_data_files.append('LGPL')
# pygame_data_files.append('readme.html')
# pygame_data_files.append('install.html')
#add non .py files in lib directory
for f in glob.glob(os.path.join('src_py', '*')):
if not f[-3:] == '.py' and not f[-4:] == '.doc' and os.path.isfile(f):
pygame_data_files.append(f)
#tests/fixtures
add_datafiles(data_files, 'pygame/tests',
['test',
[['fixtures',
[['xbm_cursors',
['*.xbm']],
['fonts',
['*.ttf', '*.otf', '*.bdf', '*.png']]]]]])
#examples
add_datafiles(data_files, 'pygame/examples',
['examples',
['readme.rst',
['data',
['*']],
['macosx',
['*.py',
['aliens_app_example',
['*.py',
'README.txt',
['English.lproj',
['aliens.icns',
['MainMenu.nib',
['*']]]]]]]]]])
#docs
add_datafiles(data_files, 'pygame/docs',
['docs',
['*.html', # Navigation and help pages
'*.gif', # pygame logos
'*.js', # For doc search
['ref', # pygame reference
['*.html', # Reference pages
'*.js', # Comments script
'*.json']], # Comment data
['c_api', # pygame C API
['*.html']],
['tut', # Tutorials
['*.html',
['tom',
['*.html',
'*.png']]]],
['_static', # Sphinx added support files
['*.css',
'*.png',
'*.ico',
'*.js']],
['_images', # Sphinx added reST ".. image::" refs
['*.jpg',
'*.png',
'*.gif']],
['_sources', # Used for ref search
['*.txt',
['ref',
['*.txt']]]]]])
#generate the version module
def parse_version(ver):
from re import findall
return ', '.join(s for s in findall('\d+', ver)[0:3])
def write_version_module(pygame_version, revision):
vernum = parse_version(pygame_version)
with open(os.path.join('buildconfig', 'version.py.in'), 'r') as header_file:
header = header_file.read()
with open(os.path.join('src_py', 'version.py'), 'w') as version_file:
version_file.write(header)
version_file.write('ver = "' + pygame_version + '"\n')
version_file.write('vernum = ' + vernum + '\n')
version_file.write('rev = "' + revision + '"\n')
write_version_module(METADATA['version'], revision)
#required. This will be filled if doing a Windows build.
cmdclass = {}
#try to find DLLs and copy them too (only on windows)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext
#add dependency DLLs to the project
lib_dependencies = {}
for e in extensions:
if e.name.startswith('COPYLIB_'):
lib_dependencies[e.name[8:]] = e.libraries
def dependencies(roots):
root_set = {}
for root in roots:
try:
deps = lib_dependencies[root]
except KeyError:
pass
else:
root_set[root] = 1
root_set.update(dependencies(deps))
return root_set
the_dlls = {}
required_dlls = {}
for e in extensions:
if e.name.startswith('COPYLIB_'):
the_dlls[e.name[8:]] = e.library_dirs[0]
else:
required_dlls.update(dependencies(e.libraries))
# join the required_dlls and the_dlls keys together.
lib_names = {}
for lib in list(required_dlls.keys()) + list(the_dlls.keys()):
lib_names[lib] = 1
for lib in lib_names.keys():
#next DLL; a distutils bug requires the paths to have Windows separators
f = the_dlls[lib].replace('/', os.sep)
if f == '_':
print ("WARNING, DLL for %s library not found." % lib)
else:
pygame_data_files.append(f)
class WinBuildExt(build_ext):
# __sdl_lib_dir is possible location of msvcrt replacement import
# libraries, if they exist. Pygame module base only links to SDL so
# should have the SDL library directory as its only -L option.
for e in extensions:
if e.name == 'base':
__sdl_lib_dir = e.library_dirs[0].replace('/', os.sep)
break
cmdclass['build_ext'] = WinBuildExt
# Add the precompiled smooth scale MMX functions to transform.
def replace_scale_mmx():
for e in extensions:
if e.name == 'transform':
if '64 bit' in sys.version:
e.extra_objects.append(
os.path.join('buildconfig', 'obj', 'win64', 'scale_mmx.obj'))
else:
e.extra_objects.append(
os.path.join('buildconfig', 'obj', 'win32', 'scale_mmx.obj'))
for i in range(len(e.sources)):
if e.sources[i].endswith('scale_mmx.c'):
del e.sources[i]
return
replace_scale_mmx()
#clean up the list of extensions
for e in extensions[:]:
if e.name.startswith('COPYLIB_'):
extensions.remove(e) #don't compile the COPYLIBs, just clean them
else:
e.name = 'pygame.' + e.name
class smart_install_data(install_data):
def run(self):
install_cmd = self.get_finalized_command('install')
self.install_dir = getattr(install_cmd, 'install_lib')
return install_data.run(self)
cmdclass['install_data'] = smart_install_data
class OurSdist(sdist):
def initialize_options(self):
sdist.initialize_options(self)
self.template = os.path.join('buildconfig', 'MANIFEST.in')
cmdclass['sdist'] = OurSdist
if "bdist_msi" in sys.argv:
from distutils.command import bdist_msi
import msilib
class bdist_msi_overwrite_on_install(bdist_msi.bdist_msi):
def run(self):
bdist_msi.bdist_msi.run(self)
comp = "pygame1"
prop = comp
records = [("surfarray.pyd", comp,
"SURFAR~1.PYD|surfarray.pyd", prop, 1),
("sndarray.pyd", comp,
"SNDARRAY.PYD|sndarray.pyd", prop, 1),
("camera.pyd", comp, "CAMERA.PYD|camera.pyd", prop, 1),
("color.py", comp, "COLOR.PY|color.py", prop, 1),
("color.pyc", comp, "COLOR.PYC|color.pyc", prop, 1),
("color.pyo", comp, "COLOR.PYO|color.pyo", prop, 1)]
msilib.add_data(self.db, "RemoveFile", records)
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
print ("changing %s to overwrite files on install" % installer_name)
msilib.add_data(self.db, "Property", [("REINSTALLMODE", "amus")])
self.db.Commit()
def get_installer_filename(self, fullname):
if revision:
fullname += '-hg_' + revision
return bdist_msi.bdist_msi.get_installer_filename(self, fullname)
cmdclass['bdist_msi'] = bdist_msi_overwrite_on_install
class TestCommand(Command):
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
import subprocess
return subprocess.call([sys.executable, os.path.join('test', '__main__.py')])
cmdclass['test'] = TestCommand
class DocsCommand(Command):
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
docs_help = (
"Building docs requires Python version 3.6 or above, and sphinx."
)
if not hasattr(sys, 'version_info') or sys.version_info < (3, 6):
raise SystemExit(docs_help)
import subprocess
try:
return subprocess.call([
sys.executable, os.path.join('buildconfig', 'makeref.py')]
)
except:
print(docs_help)
raise
cmdclass['docs'] = DocsCommand
date_files = [(path, files) for path, files in data_files if files]
PACKAGEDATA = {
"cmdclass": cmdclass,
"packages": ['pygame', 'pygame.gp2x', 'pygame.threads',
'pygame.tests',
'pygame.tests.test_utils',
'pygame.tests.run_tests__tests',
'pygame.tests.run_tests__tests.all_ok',
'pygame.tests.run_tests__tests.failures1',
'pygame.tests.run_tests__tests.incomplete',
'pygame.tests.run_tests__tests.infinite_loop',
'pygame.tests.run_tests__tests.print_stderr',
'pygame.tests.run_tests__tests.print_stdout',
'pygame.tests.run_tests__tests.incomplete_todo',
'pygame.tests.run_tests__tests.exclude',
'pygame.tests.run_tests__tests.timeout',
'pygame.tests.run_tests__tests.everything',
'pygame.docs',
'pygame.examples'],
"package_dir": {'pygame': 'src_py',
'pygame.threads': 'src_py/threads',
'pygame.gp2x': 'src_py/gp2x',
'pygame.tests': 'test',
'pygame.docs': 'docs',
'pygame.examples': 'examples'},
"headers": headers,
"ext_modules": extensions,
"data_files": data_files,
"zip_safe": False,
}
PACKAGEDATA.update(METADATA)
PACKAGEDATA.update(EXTRAS)
try:
setup(**PACKAGEDATA)
except:
compilation_help()
raise
def remove_old_files():
try:
import pygame.base
use_pygame = 1
except:
use_pygame = 0
if use_pygame:
install_path= os.path.split(pygame.base.__file__)[0]
extension_ext = os.path.splitext(pygame.base.__file__)[1]
else:
if not os.path.exists(data_path):
return
install_path = data_path
base_file = glob.glob(os.path.join(data_path, "base*"))
if not base_file:
return
extension_ext = os.path.splitext(base_file[0])[1]
ext_to_remove = ["camera"]
py_to_remove = ["color"]
os.path.join(data_path, 'color.py')
if os.name == "e32":
py_to_remove = []
# See if any of the files are there.
extension_files = ["%s%s" % (x, extension_ext) for x in ext_to_remove]
py_files = ["%s%s" % (x, py_ext)
for py_ext in [".py", ".pyc", ".pyo"]
for x in py_to_remove]
files = py_files + extension_files
unwanted_files = []
for f in files:
unwanted_files.append( os.path.join( install_path, f ) )
ask_remove = []
for f in unwanted_files:
if os.path.exists(f):
ask_remove.append(f)
for f in ask_remove:
try:
print("trying to remove old file :%s: ..." %f)
os.remove(f)
print("Successfully removed :%s:." % f)
except:
print("FAILED to remove old file :%s:" % f)
if "install" in sys.argv:
# remove some old files.
# only call after a successful install. Should only reach here if there is
# a successful install... otherwise setup() raises an error.
try:
remove_old_files()
except:
pass
| true | true |
f72f7d71973611e5b1b6ca5c103ced2746353d09 | 2,070 | py | Python | ansible/old/inventory.py | Otus-DevOps-2021-11/ivan32rus_infra | d504a0bf8d98c9850b005390cd93aeea7e81bd1e | [
"MIT"
] | null | null | null | ansible/old/inventory.py | Otus-DevOps-2021-11/ivan32rus_infra | d504a0bf8d98c9850b005390cd93aeea7e81bd1e | [
"MIT"
] | 9 | 2021-12-20T21:16:20.000Z | 2022-02-18T11:56:22.000Z | docker/docker-monolith/infra/ansible/inventory.py | Otus-DevOps-2021-11/ivan32rus_microservices | 88e273124d1e1b39c46c526370b96385779c5a05 | [
"MIT"
] | 1 | 2021-12-18T11:49:30.000Z | 2021-12-18T11:49:30.000Z | #!/usr/bin/env python
'''
Example custom dynamic inventory script for Ansible, in Python.
FOR pyhhon 3.8.10 it's working
used:
https://www.jeffgeerling.com/blog/creating-custom-dynamic-inventories-ansible
'''
import os
import sys
import argparse
try:
import json
except ImportError:
import simplejson as json
class ExampleInventory(object):
def __init__(self):
self.inventory = {}
self.read_cli_args()
# Called with `--list`.
if self.args.list:
self.inventory = self.example_inventory()
# Called with `--host [hostname]`.
elif self.args.host:
# Not implemented, since we return _meta info `--list`.
self.inventory = self.empty_inventory()
# If no groups or vars are present, return an empty inventory.
else:
self.inventory = self.empty_inventory()
print (json.dumps(self.inventory))
# Example inventory for testing.
def example_inventory(self):
return {
"group": {
"hosts": [
"51.250.4.212",
"62.84.113.197"
],
"vars": {
"ansible_ssh_user": "ubuntu",
"ansible_ssh_private_key_file": "~/.ssh/id_rsa",
"example_variable": "value"
}
},
"_meta": {
"hostvars": {
"51.250.4.212": {
"reddit": "db"
},
"62.84.113.197": {
"reddit": "app"
}
}
}
}
# Empty inventory for testing.
def empty_inventory(self):
return {'_meta': {'hostvars': {}}}
# Read the command line args passed to the script.
def read_cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--list', action = 'store_true')
parser.add_argument('--host', action = 'store')
self.args = parser.parse_args()
# Get the inventory.
ExampleInventory()
| 26.883117 | 77 | 0.528986 |
import os
import sys
import argparse
try:
import json
except ImportError:
import simplejson as json
class ExampleInventory(object):
def __init__(self):
self.inventory = {}
self.read_cli_args()
if self.args.list:
self.inventory = self.example_inventory()
elif self.args.host:
self.inventory = self.empty_inventory()
else:
self.inventory = self.empty_inventory()
print (json.dumps(self.inventory))
def example_inventory(self):
return {
"group": {
"hosts": [
"51.250.4.212",
"62.84.113.197"
],
"vars": {
"ansible_ssh_user": "ubuntu",
"ansible_ssh_private_key_file": "~/.ssh/id_rsa",
"example_variable": "value"
}
},
"_meta": {
"hostvars": {
"51.250.4.212": {
"reddit": "db"
},
"62.84.113.197": {
"reddit": "app"
}
}
}
}
def empty_inventory(self):
return {'_meta': {'hostvars': {}}}
def read_cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--list', action = 'store_true')
parser.add_argument('--host', action = 'store')
self.args = parser.parse_args()
ExampleInventory()
| true | true |
f72f7e10becbbc60d0c8669c8b5575f45e441968 | 4,353 | py | Python | pyrelational/models/mcdropout_model.py | RelationRx/pyrelational | 41ededeff84158bd88b76d39006764de3388c821 | [
"Apache-2.0"
] | 42 | 2022-02-09T16:36:37.000Z | 2022-03-25T00:25:34.000Z | pyrelational/models/mcdropout_model.py | RelationRx/pyrelational | 41ededeff84158bd88b76d39006764de3388c821 | [
"Apache-2.0"
] | 4 | 2022-03-22T13:22:38.000Z | 2022-03-25T16:14:40.000Z | pyrelational/models/mcdropout_model.py | RelationRx/pyrelational | 41ededeff84158bd88b76d39006764de3388c821 | [
"Apache-2.0"
] | 3 | 2022-02-15T17:50:30.000Z | 2022-03-10T18:14:16.000Z | import copy
import logging
from abc import ABC
from typing import Dict, Optional, Type, Union
import torch
from pytorch_lightning import LightningModule
from torch.nn.modules import Module
from torch.utils.data import DataLoader
from .generic_model import GenericModel
from .lightning_model import LightningModel
logger = logging.getLogger()
class GenericMCDropoutModel(GenericModel, ABC):
"""
Generic model wrapper for mcdropout uncertainty estimator
"""
def __init__(
self,
model_class: Type[Module],
model_config: Union[str, Dict],
trainer_config: Union[str, Dict],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(GenericMCDropoutModel, self).__init__(model_class, model_config, trainer_config)
_check_mc_dropout_model(model_class, model_config)
self.n_estimators = n_estimators
self.eval_dropout_prob = eval_dropout_prob
def __call__(self, loader: DataLoader) -> torch.Tensor:
"""
:param loader: pytorch dataloader
:return: model predictions
"""
if self.current_model is None:
raise ValueError("No current model, call 'train(train_loader, valid_loader)' to train the model first")
predictions = []
model = self.current_model
model.eval()
with torch.no_grad():
_enable_only_dropout_layers(model, self.eval_dropout_prob)
for _ in range(self.n_estimators):
model_prediction = []
for x, _ in loader:
model_prediction.append(model(x).detach().cpu())
predictions.append(torch.cat(model_prediction, 0))
predictions = torch.stack(predictions)
return predictions
class LightningMCDropoutModel(GenericMCDropoutModel, LightningModel):
r"""
Wrapper for MC Dropout estimator with pytorch lightning trainer
Example:
.. code-block:: python
import torch
import pytorch_lightning as pl
class PyLModel(pl.LightningModule):
def __init__(self, in_dim, out_dim):
super(PyLModel, self).()
self.linear = torch.nn.Linear(in_dim, out_dim)
# need to define other train/test steps and optimizers methods required
# by pytorch-lightning to run this example
wrapper = LightningMCDropoutModel(
PyLModel,
model_config={"in_dim":10, "out_dim":1},
trainer_config={"epochs":100},
n_estimators=10,
eval_dropout_prob=0.2,
)
wrapper.train(train_loader, valid_loader)
predictions = wrapper(loader)
assert predictions.size(0) == 10
"""
def __init__(
self,
model_class: Type[LightningModule],
model_config: Union[Dict, str],
trainer_config: Union[Dict, str],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(LightningMCDropoutModel, self).__init__(
model_class,
model_config,
trainer_config,
n_estimators=n_estimators,
eval_dropout_prob=eval_dropout_prob,
)
def _enable_only_dropout_layers(model: Module, p: Optional[float] = None) -> None:
def enable_dropout_on_module(m):
if m.__class__.__name__.startswith("Dropout"):
if isinstance(p, float) and (0 <= p <= 1):
m.p = p
elif isinstance(p, float) and (p < 0 or p > 1):
logger.warning(f"Evaluation dropout probability should be a float between 0 and 1, got {p}")
m.train()
model.apply(enable_dropout_on_module)
def _check_mc_dropout_model(model_class: Type[Module], model_config: Dict) -> None:
model = model_class(**model_config)
def has_dropout_module(model):
is_dropout = []
for m in model.children():
if m.__class__.__name__.startswith("Dropout"):
is_dropout.append(True)
else:
is_dropout += has_dropout_module(m)
return is_dropout
if not any(has_dropout_module(model)):
raise ValueError("Model provided do not contain any torch.nn.Dropout modules, cannot apply MC Dropout")
| 32.977273 | 115 | 0.623938 | import copy
import logging
from abc import ABC
from typing import Dict, Optional, Type, Union
import torch
from pytorch_lightning import LightningModule
from torch.nn.modules import Module
from torch.utils.data import DataLoader
from .generic_model import GenericModel
from .lightning_model import LightningModel
logger = logging.getLogger()
class GenericMCDropoutModel(GenericModel, ABC):
def __init__(
self,
model_class: Type[Module],
model_config: Union[str, Dict],
trainer_config: Union[str, Dict],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(GenericMCDropoutModel, self).__init__(model_class, model_config, trainer_config)
_check_mc_dropout_model(model_class, model_config)
self.n_estimators = n_estimators
self.eval_dropout_prob = eval_dropout_prob
def __call__(self, loader: DataLoader) -> torch.Tensor:
if self.current_model is None:
raise ValueError("No current model, call 'train(train_loader, valid_loader)' to train the model first")
predictions = []
model = self.current_model
model.eval()
with torch.no_grad():
_enable_only_dropout_layers(model, self.eval_dropout_prob)
for _ in range(self.n_estimators):
model_prediction = []
for x, _ in loader:
model_prediction.append(model(x).detach().cpu())
predictions.append(torch.cat(model_prediction, 0))
predictions = torch.stack(predictions)
return predictions
class LightningMCDropoutModel(GenericMCDropoutModel, LightningModel):
def __init__(
self,
model_class: Type[LightningModule],
model_config: Union[Dict, str],
trainer_config: Union[Dict, str],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(LightningMCDropoutModel, self).__init__(
model_class,
model_config,
trainer_config,
n_estimators=n_estimators,
eval_dropout_prob=eval_dropout_prob,
)
def _enable_only_dropout_layers(model: Module, p: Optional[float] = None) -> None:
def enable_dropout_on_module(m):
if m.__class__.__name__.startswith("Dropout"):
if isinstance(p, float) and (0 <= p <= 1):
m.p = p
elif isinstance(p, float) and (p < 0 or p > 1):
logger.warning(f"Evaluation dropout probability should be a float between 0 and 1, got {p}")
m.train()
model.apply(enable_dropout_on_module)
def _check_mc_dropout_model(model_class: Type[Module], model_config: Dict) -> None:
model = model_class(**model_config)
def has_dropout_module(model):
is_dropout = []
for m in model.children():
if m.__class__.__name__.startswith("Dropout"):
is_dropout.append(True)
else:
is_dropout += has_dropout_module(m)
return is_dropout
if not any(has_dropout_module(model)):
raise ValueError("Model provided do not contain any torch.nn.Dropout modules, cannot apply MC Dropout")
| true | true |
f72f7e5086ec62838452027d2f90f7f9292e8954 | 1,602 | py | Python | fairml/tests/test_orthogonal_projection.py | ravish0007/fairml | bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8 | [
"MIT"
] | 330 | 2017-02-24T08:34:39.000Z | 2022-02-24T15:41:19.000Z | fairml/tests/test_orthogonal_projection.py | ravish0007/fairml | bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8 | [
"MIT"
] | 14 | 2017-02-02T00:54:16.000Z | 2021-02-19T16:01:20.000Z | fairml/tests/test_orthogonal_projection.py | ravish0007/fairml | bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8 | [
"MIT"
] | 70 | 2017-01-31T20:51:10.000Z | 2022-02-17T07:38:52.000Z | from __future__ import division
import pytest
import numpy as np
from random import randint
from fairml.orthogonal_projection import audit_model
from fairml.orthogonal_projection import get_orthogonal_vector
from fairml.utils import mse
from fairml.utils import accuracy
from fairml.utils import detect_feature_sign
from fairml.perturbation_strategies import constant_zero
# let's define a black-box function
def black_box_function(input_data):
if not (input_data.shape[1] == weights.shape[0]):
raise Exception("problem, misaligned dimensions")
output = np.dot(input_data, weights)
return output
def test_orthogonal_projection(number_of_tries=20, size=10000):
"""Orthogonal projection function. """
for i in range(number_of_tries):
a = np.random.normal(0, 1, size)
b = np.random.normal(0, 1, size)
c = np.random.binomial(10, 0.1, size)
d = np.random.uniform(0, 10, size)
# normal-normal check
orth_b = get_orthogonal_vector(a, b)
assert np.dot(orth_b, a) < 1e-8
# normal- normal check
ortho_c = get_orthogonal_vector(a, c)
assert np.dot(ortho_c, a) < 1e-8
# normal - uniform check
ortho_d = get_orthogonal_vector(a, d)
assert np.dot(ortho_d, a) < 1e-8
def test_mse():
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
test_mse = mse(y_true, y_pred)
assert test_mse == 0.375
def test_accuracy():
y_pred = [0, 2, 1, 3]
y_true = [0, 1, 2, 3]
test_acc = accuracy(y_pred, y_true)
print(test_acc)
assert test_acc == 0.5
| 25.428571 | 63 | 0.671036 | from __future__ import division
import pytest
import numpy as np
from random import randint
from fairml.orthogonal_projection import audit_model
from fairml.orthogonal_projection import get_orthogonal_vector
from fairml.utils import mse
from fairml.utils import accuracy
from fairml.utils import detect_feature_sign
from fairml.perturbation_strategies import constant_zero
def black_box_function(input_data):
if not (input_data.shape[1] == weights.shape[0]):
raise Exception("problem, misaligned dimensions")
output = np.dot(input_data, weights)
return output
def test_orthogonal_projection(number_of_tries=20, size=10000):
for i in range(number_of_tries):
a = np.random.normal(0, 1, size)
b = np.random.normal(0, 1, size)
c = np.random.binomial(10, 0.1, size)
d = np.random.uniform(0, 10, size)
# normal-normal check
orth_b = get_orthogonal_vector(a, b)
assert np.dot(orth_b, a) < 1e-8
# normal- normal check
ortho_c = get_orthogonal_vector(a, c)
assert np.dot(ortho_c, a) < 1e-8
# normal - uniform check
ortho_d = get_orthogonal_vector(a, d)
assert np.dot(ortho_d, a) < 1e-8
def test_mse():
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
test_mse = mse(y_true, y_pred)
assert test_mse == 0.375
def test_accuracy():
y_pred = [0, 2, 1, 3]
y_true = [0, 1, 2, 3]
test_acc = accuracy(y_pred, y_true)
print(test_acc)
assert test_acc == 0.5
| true | true |
f72f8089cc89101fe7a243dfe2b57fdf92cb7ad2 | 22,912 | py | Python | LLDBagility/stubvm.py | killvxk/LLDBagility | 6cc0df91bd49e43997b77048611d1b73f43f7b29 | [
"Apache-2.0"
] | 1 | 2019-07-13T12:04:04.000Z | 2019-07-13T12:04:04.000Z | LLDBagility/stubvm.py | killvxk/LLDBagility | 6cc0df91bd49e43997b77048611d1b73f43f7b29 | [
"Apache-2.0"
] | null | null | null | LLDBagility/stubvm.py | killvxk/LLDBagility | 6cc0df91bd49e43997b77048611d1b73f43f7b29 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import re
import threading
import lldbagilityutils
from PyFDP.FDP import FDP
from VMSN import VMSN
logger = lldbagilityutils.create_indented_logger(__name__, "/tmp/stubvm.log")
NULL = 0x0
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/eflags.h
EFL_TF = 0x00000100
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/i386/vm_param.h
I386_PGBYTES = 4096
VM_MIN_KERNEL_ADDRESS = 0xFFFFFF8000000000
VM_MAX_KERNEL_ADDRESS = 0xFFFFFFFFFFFFEFFF
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/EXTERNAL_HEADERS/mach-o/loader.h
MH_MAGIC_64 = 0xFEEDFACF
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/exception_types.h
EXC_SOFTWARE = 0x5
EXC_BREAKPOINT = 0x6
EXC_SOFT_SIGNAL = 0x10003
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/i386/exception.h
EXC_I386_BPTFLT = 0x3
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/bsd/sys/signal.h
SIGINT = 0x2
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/proc_reg.h
MSR_IA32_GS_BASE = 0xC0000101
MSR_IA32_KERNEL_GS_BASE = 0xC0000102
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/machine.h
CPU_TYPE_X86 = 0x7
CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_X86_64 = CPU_TYPE_X86 | CPU_ARCH_ABI64
CPU_SUBTYPE_X86_ARCH1 = 0x4
class STUBVM(object):
def __init__(self, stub, name):
self.stub = stub(name)
self.name = name
self.lock = threading.RLock()
self._exception = None
self._soft_breakpoints = {}
self._interrupt_at_next_resume = False
self._singlestep_at_next_resume = False
self._kdp_vaddr = None
self._store_kdp_at_next_write_virtual_memory = False
self._return_incremented_at_next_read_register_rip = False
@lldbagilityutils.indented(logger)
def _continue_until_kernel_code(self):
logger.debug("_continue_until_kernel_code()")
if _in_kernel_space(self.read_register("rip")):
return
# set a breakpoint on writes to the CR3 register (with high probability
# only the kernel is doing it)
cr3bp_id = self.stub.SetBreakpoint(
self.stub.CR_HBP,
0x0,
self.stub.WRITE_BP,
self.stub.VIRTUAL_ADDRESS,
0x3,
0x1,
self.stub.NO_CR3,
)
assert 0 <= cr3bp_id <= 254
# resume the VM execution until reaching kernel code
while True:
self.stub.Resume()
self.stub.WaitForStateChanged()
if _in_kernel_space(self.read_register("rip")):
logger.debug("> stopping: 0x{:016x}".format(self.read_register("rip")))
break
self.stub.SingleStep()
self.stub.UnsetBreakpoint(cr3bp_id)
@lldbagilityutils.indented(logger)
def _get_active_thread_vaddr(self):
logger.debug("_get_active_thread_vaddr()")
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/cpu_data.h#L392
def _get_gs_base(self):
logger.debug("_get_gs_base()")
gs_base = self.read_msr64(MSR_IA32_GS_BASE)
logger.debug("> MSR_IA32_GS_BASE: 0x{:016x}".format(gs_base))
if not _in_kernel_space(gs_base):
gs_base = self.read_msr64(MSR_IA32_KERNEL_GS_BASE)
logger.debug("> MSR_IA32_KERNEL_GS_BASE: 0x{:016x}".format(gs_base))
return gs_base
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/mp_desc.c#L476
cpu_data_vaddr = _get_gs_base(self)
logger.debug("> cpu_data_vaddr: 0x{:016x}".format(cpu_data_vaddr))
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/cpu_data.h#L149
cpu_this = lldbagilityutils.u64(self.read_virtual_memory(cpu_data_vaddr, 0x8))
logger.debug("> cpu_this: 0x{:016x}".format(cpu_this))
assert cpu_data_vaddr == cpu_this
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/cpu_data.h#L150
cpu_active_thread = lldbagilityutils.u64(
self.read_virtual_memory(cpu_data_vaddr + 0x8, 0x8)
)
logger.debug("> cpu_active_thread: 0x{:016x}".format(cpu_active_thread))
return cpu_active_thread
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def complete_attach(self):
logger.debug("complete_attach()")
self.halt()
self.unset_all_breakpoints()
self._continue_until_kernel_code()
assert _in_kernel_space(self.read_register("rip"))
self.kernel_cr3 = self.read_register("cr3")
logger.debug("> kernel_cr3: 0x{:x}".format(self.kernel_cr3))
self.kernel_load_vaddr = _find_kernel_load_vaddr(self)
logger.debug("> kernel_load_vaddr: 0x{:016x}".format(self.kernel_load_vaddr))
self.kernel_slide = _compute_kernel_slide(self.kernel_load_vaddr)
logger.debug("> kernel_slide: 0x{:x}".format(self.kernel_slide))
self.kernel_version = _find_kernel_version(self)
logger.debug("> kernel_version: {}".format(self.kernel_version))
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_num_cpus(self):
logger.debug("get_num_cpus()")
return self.stub.GetCpuCount()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_host_info(self):
logger.debug("get_host_info()")
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/kdp/ml/x86_64/kdp_machdep.c#L256
cpus_mask = 0x0
for i in range(self.get_num_cpus()):
cpus_mask |= 1 << i
cpu_type = CPU_TYPE_X86_64
cpu_subtype = CPU_SUBTYPE_X86_ARCH1
return cpus_mask, cpu_type, cpu_subtype
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_kernel_version(self):
logger.debug("get_kernel_version()")
kernel_version = self.kernel_version
if b"stext" not in kernel_version:
logger.debug("> stext")
# return the known kernel load address to make LLDB do less requests
kernel_version += "; stext=0x{:016x}".format(self.kernel_load_vaddr)
return kernel_version
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_msr64(self, msr):
logger.debug("read_msr64(msr=0x{:x})".format(msr))
return self.stub.ReadMsr(msr, CpuId=self.stub.CPU0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_msr64(self, msr, val):
logger.debug("write_msr64(msr=0x{:x}, val=0x{:x})".format(msr, val))
self.stub.WriteMsr(self, msr, val, CpuId=self.stub.CPU0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_register(self, reg):
logger.debug("read_register(reg='{}')".format(reg))
val = getattr(self.stub, reg)
if reg == "rip" and self._return_incremented_at_next_read_register_rip:
logger.debug("> _return_incremented_at_next_read_register_rip")
self._return_incremented_at_next_read_register_rip = False
# https://github.com/llvm/llvm-project/tree/llvmorg-8.0.0/lldb/source/Plugins/Process/MacOSX-Kernel/ThreadKDP.cpp#L157
# https://github.com/llvm/llvm-project/tree/llvmorg-8.0.0/lldb/source/Plugins/Process/Utility/StopInfoMachException.cpp#L571
return val + 1
return val
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_registers(self, regs):
logger.debug("read_registers()")
return {reg: self.read_register(reg) for reg in regs}
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_register(self, reg, val):
logger.debug("write_register(reg='{}', val=0x{:x})".format(reg, val))
if reg == "rflags":
if val & EFL_TF:
logger.debug("> _singlestep_at_next_resume")
self._singlestep_at_next_resume = True
# disallow changes to RFLAGS
return
setattr(self.stub, reg, val)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_registers(self, regs):
logger.debug("write_registers()")
for reg, val in regs.items():
self.write_register(reg, val)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_virtual_memory(self, vaddr, nbytes):
logger.debug(
"read_virtual_memory(vaddr=0x{:016x}, nbytes=0x{:x})".format(vaddr, nbytes)
)
data = self.stub.ReadVirtualMemory(vaddr, nbytes)
if not data and not _in_kernel_space(self.read_register("rip")):
# if reading fails, it could be the case that we are trying to read kernel
# virtual addresses from user space (e.g. when LLDB stops in user land and
# the user loads or uses lldbmacros)
# in this case, we try the read again but using the kernel pmap
logger.debug("> using kernel pmap")
process_cr3 = self.read_register("cr3")
# switch to kernel pmap
self.write_register("cr3", self.kernel_cr3)
# try the read again
data = self.stub.ReadVirtualMemory(vaddr, nbytes)
# switch back to the process pmap
self.write_register("cr3", process_cr3)
if self._kdp_vaddr and vaddr <= self._kdp_vaddr <= vaddr + nbytes:
# this request has very likely been generated by LLDBmacros
logger.debug("> fake kdp struct")
assert data is not None
# fill some fields of the empty (since the boot-arg "debug" is probably not set) kdp struct
saved_state = lldbagilityutils.p64(NULL)
kdp_thread = lldbagilityutils.p64(self._get_active_thread_vaddr())
fake_partial_kdp_struct = b"".join((saved_state, kdp_thread))
kdp_struct_offset = self._kdp_vaddr - vaddr
data = (
data[:kdp_struct_offset]
+ fake_partial_kdp_struct
+ data[kdp_struct_offset + len(fake_partial_kdp_struct) :]
)
data = data if data else b""
logger.debug("> len(data): 0x{:x}".format(len(data)))
return data
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_virtual_memory(self, vaddr, data):
logger.debug("write_virtual_memory(vaddr=0x{:016x}, data=...)".format(vaddr))
assert self.is_state_halted()
if self._store_kdp_at_next_write_virtual_memory:
logger.debug("> _store_kdp_at_next_write_virtual_memory")
self._store_kdp_at_next_write_virtual_memory = False
self._kdp_vaddr = vaddr
return
return self.stub.WriteVirtualMemory(vaddr, data)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def set_soft_exec_breakpoint(self, vaddr):
logger.debug("set_soft_exec_breakpoint(vaddr=0x{:016x})".format(vaddr))
assert self.is_state_halted()
id = 0x0
length = 0x1
self._soft_breakpoints[vaddr] = self.stub.SetBreakpoint(
self.stub.SOFT_HBP,
id,
self.stub.EXECUTE_BP,
self.stub.VIRTUAL_ADDRESS,
vaddr,
length,
self.stub.NO_CR3,
)
logger.debug("> bp id: {}".format(self._soft_breakpoints[vaddr]))
return self._soft_breakpoints[vaddr]
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_soft_breakpoint(self, vaddr):
logger.debug("unset_soft_breakpoint(vaddr=0x{:016x})")
assert self.is_state_halted()
try:
id = self._soft_breakpoints[vaddr]
except KeyError:
logger.debug("> no such breakpoint")
else:
del self._soft_breakpoints[vaddr]
return self.stub.UnsetBreakpoint(id)
return False
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def set_hard_breakpoint(self, trigger, nreg, vaddr):
logger.debug(
"set_hard_exec_breakpoint(trigger='{}', nreg=0x{:016x}, vaddr=0x{:016x})".format(
trigger, nreg, vaddr
)
)
assert self.is_state_halted()
assert trigger in ("e", "w", "rw")
assert 0 <= nreg <= 3
trigger_bitshifts = {nreg: 16 + nreg * 4 for nreg in range(4)}
status_bitshifts = {nreg: nreg * 2 for nreg in range(4)}
ctrl_mask = self.read_register("dr7")
# reset trigger entry for the chosen register to 0b00
ctrl_mask &= ~(0b11 << trigger_bitshifts[nreg])
# set new entry
if trigger == "e":
trigger_entry = 0b00
elif trigger == "w":
trigger_entry = 0b01
elif trigger == "rw":
trigger_entry = 0b11
else:
raise NotImplementedError
ctrl_mask |= trigger_entry << trigger_bitshifts[nreg]
# enable breakpoint globally
ctrl_mask |= 0b10 << status_bitshifts[nreg]
logger.debug("> ctrl_mask: 0b{:032b}".format(ctrl_mask))
self.write_register("dr{}".format(nreg), vaddr)
self.write_register("dr7", ctrl_mask)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_hard_breakpoint(self, nreg):
logger.debug("unset_hard_breakpoint(nreg=0x{:016x})".format(nreg))
assert self.is_state_halted()
assert 0 <= nreg <= 3
status_bitshifts = {nreg: nreg * 2 for nreg in range(4)}
ctrl_mask = self.read_register("dr7")
# disable breakpoint globally and locally
ctrl_mask &= ~(0b11 << status_bitshifts[nreg])
logger.debug("> ctrl_mask: 0b{:032b}".format(ctrl_mask))
self.write_register("dr{}".format(nreg), 0x0)
self.write_register("dr7", ctrl_mask)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_all_breakpoints(self):
logger.debug("unset_all_breakpoints()")
assert self.is_state_halted()
# remove soft breakpoints
self._soft_breakpoints.clear()
self.stub.UnsetAllBreakpoint()
# remove hard breakpoints
self.write_register("dr0", 0x0)
self.write_register("dr1", 0x0)
self.write_register("dr2", 0x0)
self.write_register("dr3", 0x0)
self.write_register("dr6", 0x0)
self.write_register("dr7", 0x0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def halt(self):
logger.debug("halt()")
self.stub.Pause()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt(self):
logger.debug("interrupt()")
self._exception = (EXC_SOFTWARE, EXC_SOFT_SIGNAL, SIGINT)
self.halt()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def single_step(self):
logger.debug("single_step()")
self._exception = (EXC_BREAKPOINT, EXC_I386_BPTFLT, 0x0)
self.stub.SingleStep()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def resume(self):
logger.debug("resume()")
if self._interrupt_at_next_resume:
logger.debug("> _interrupt_at_next_resume")
self._interrupt_at_next_resume = False
self.interrupt()
return
if self._singlestep_at_next_resume:
logger.debug("> _singlestep_at_next_resume")
self._singlestep_at_next_resume = False
self.single_step()
return
if self.is_breakpoint_hit():
logger.debug(
"> state breakpoint hit: 0x{:016x}".format(self.read_register("rip"))
)
self.stub.SingleStep()
self.stub.Resume()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_and_take_snapshot(self):
logger.debug("interrupt_and_take_snapshot()")
self.interrupt()
self.stub.Save()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_and_restore_last_snapshot(self):
logger.debug("interrupt_and_restore_last_snapshot()")
self.interrupt()
if self.stub.Restore():
# breakpoints are not restored
self._soft_breakpoints.clear()
return True
else:
logger.debug("> could not restore")
return False
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def state(self):
logger.debug("state()")
if self.is_breakpoint_hit():
logger.debug("> state breakpoint hit")
self._exception = (EXC_BREAKPOINT, EXC_I386_BPTFLT, 0x0)
# the following assumes that the next call to STUBVM.read_register("rip")
# will be made by LLDB in response to this EXC_BREAKPOINT exception
self._return_incremented_at_next_read_register_rip = True
state = (self.stub.GetState(), self._exception)
self._exception = None
return state
@lldbagilityutils.synchronized
def is_state_changed(self):
return self.stub.GetStateChanged() or self._exception
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def is_state_halted(self):
logger.debug("is_state_halted()")
return self.stub.GetState() & self.stub.STATE_PAUSED
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def is_breakpoint_hit(self):
logger.debug("is_breakpoint_hit()")
return self.stub.GetState() & (
self.stub.STATE_BREAKPOINT_HIT | self.stub.STATE_HARD_BREAKPOINT_HIT
)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_at_next_resume(self):
logger.debug("interrupt_at_next_resume()")
self._interrupt_at_next_resume = True
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def store_kdp_at_next_write_virtual_memory(self):
logger.debug("store_kdp_at_next_write_virtual_memory()")
self._store_kdp_at_next_write_virtual_memory = True
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def abort_store_kdp_at_next_write_virtual_memory(self):
logger.debug("abort_store_kdp_at_next_write_virtual_memory()")
assert not self._kdp_vaddr
self._store_kdp_at_next_write_virtual_memory = False
def _in_kernel_space(addr):
return VM_MIN_KERNEL_ADDRESS <= addr <= VM_MAX_KERNEL_ADDRESS
@lldbagilityutils.indented(logger)
def _find_kernel_load_vaddr(vm):
logger.debug("_find_kernel_load_vaddr()")
assert _in_kernel_space(vm.read_register("rip"))
@lldbagilityutils.indented(logger)
def _is_kernel_load_vaddr(vaddr):
logger.debug("_is_kernel_load_vaddr()")
if not _in_kernel_space(vaddr):
return False
data = vm.read_virtual_memory(vaddr, 0x4)
return data and lldbagilityutils.u32(data) == MH_MAGIC_64
@lldbagilityutils.indented(logger)
def _get_debug_kernel_load_vaddr():
logger.debug("_get_debug_kernel_load_vaddr()")
# from the LLDB documentation: "If the debug flag is included in the
# boot-args nvram setting, the kernel's load address will be noted
# in the lowglo page at a fixed address"
# https://github.com/llvm/llvm-project/blob/llvmorg-8.0.0/lldb/source/Plugins/DynamicLoader/Darwin-Kernel/DynamicLoaderDarwinKernel.cpp#L226
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/x86_64/lowglobals.h#L54
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/x86_64/pmap.c#L1175
lgStext_vaddr = 0xFFFFFF8000002010
data = vm.read_virtual_memory(lgStext_vaddr, 0x8)
if data:
vaddr = lldbagilityutils.u64(data)
if _is_kernel_load_vaddr(vaddr):
return vaddr
else:
# probably trying to attach to the target before lgStext is initialised
return None
else:
return None
@lldbagilityutils.indented(logger)
def _search_kernel_load_vaddr(start_vaddr):
logger.debug(
"_search_kernel_load_vaddr(start_vaddr=0x{:016x})".format(start_vaddr)
)
# try to find the load address manually
assert _in_kernel_space(start_vaddr)
vaddr = start_vaddr & ~(I386_PGBYTES - 1)
while vaddr >= VM_MIN_KERNEL_ADDRESS:
if _is_kernel_load_vaddr(vaddr):
return vaddr
vaddr -= I386_PGBYTES
else:
raise AssertionError
kernel_load_vaddr = _get_debug_kernel_load_vaddr() or _search_kernel_load_vaddr(
vm.read_register("rip")
)
return kernel_load_vaddr
def _compute_kernel_slide(kernel_load_vaddr):
return kernel_load_vaddr - 0xFFFFFF8000200000
@lldbagilityutils.indented(logger)
def _find_kernel_version(vm):
logger.debug("_find_kernel_version()")
kernel_macho = b""
while len(kernel_macho) < 42 * 1024 * 1024: # a reasonable upper bound?
buf = b""
while len(buf) < 2 * 1024 * 1024:
vaddr = vm.kernel_load_vaddr + len(kernel_macho) + len(buf)
buf += vm.read_virtual_memory(vaddr, I386_PGBYTES)
kernel_macho += buf
try:
kernel_version = re.search(
b"(?P<version>Darwin Kernel Version .+?X86_64)\0", kernel_macho
).group("version")
except AttributeError:
continue
else:
return kernel_version
else:
raise AssertionError
class FDPSTUB(FDP):
NO_CR3 = FDP.FDP_NO_CR3
SOFT_HBP = FDP.FDP_SOFTHBP
CR_HBP = FDP.FDP_CRHBP
VIRTUAL_ADDRESS = FDP.FDP_VIRTUAL_ADDRESS
EXECUTE_BP = FDP.FDP_EXECUTE_BP
WRITE_BP = FDP.FDP_WRITE_BP
STATE_PAUSED = FDP.FDP_STATE_PAUSED
STATE_BREAKPOINT_HIT = FDP.FDP_STATE_BREAKPOINT_HIT
STATE_HARD_BREAKPOINT_HIT = FDP.FDP_STATE_HARD_BREAKPOINT_HIT
CPU0 = FDP.FDP_CPU0
def __init__(self, name):
super(FDPSTUB, self).__init__(name)
assert self.GetCpuCount() == 1, (
"VMs with more than one CPU are not fully supported by FDP! "
"Decrease the number of processors in the VM settings"
)
class VMSNSTUB(VMSN):
NO_CR3 = 0
SOFT_HBP = 2
CR_HBP = 0
VIRTUAL_ADDRESS = 0
EXECUTE_BP = 0
WRITE_BP = 0
STATE_PAUSED = 1
STATE_BREAKPOINT_HIT = 1
STATE_HARD_BREAKPOINT_HIT = 0
CPU0 = FDP.FDP_CPU0
def __init__(self, name):
super(VMSNSTUB, self).__init__(name)
| 37.255285 | 148 | 0.659043 |
import re
import threading
import lldbagilityutils
from PyFDP.FDP import FDP
from VMSN import VMSN
logger = lldbagilityutils.create_indented_logger(__name__, "/tmp/stubvm.log")
NULL = 0x0
EFL_TF = 0x00000100
I386_PGBYTES = 4096
VM_MIN_KERNEL_ADDRESS = 0xFFFFFF8000000000
VM_MAX_KERNEL_ADDRESS = 0xFFFFFFFFFFFFEFFF
MH_MAGIC_64 = 0xFEEDFACF
EXC_SOFTWARE = 0x5
EXC_BREAKPOINT = 0x6
EXC_SOFT_SIGNAL = 0x10003
EXC_I386_BPTFLT = 0x3
SIGINT = 0x2
MSR_IA32_GS_BASE = 0xC0000101
MSR_IA32_KERNEL_GS_BASE = 0xC0000102
CPU_TYPE_X86 = 0x7
CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_X86_64 = CPU_TYPE_X86 | CPU_ARCH_ABI64
CPU_SUBTYPE_X86_ARCH1 = 0x4
class STUBVM(object):
def __init__(self, stub, name):
self.stub = stub(name)
self.name = name
self.lock = threading.RLock()
self._exception = None
self._soft_breakpoints = {}
self._interrupt_at_next_resume = False
self._singlestep_at_next_resume = False
self._kdp_vaddr = None
self._store_kdp_at_next_write_virtual_memory = False
self._return_incremented_at_next_read_register_rip = False
@lldbagilityutils.indented(logger)
def _continue_until_kernel_code(self):
logger.debug("_continue_until_kernel_code()")
if _in_kernel_space(self.read_register("rip")):
return
cr3bp_id = self.stub.SetBreakpoint(
self.stub.CR_HBP,
0x0,
self.stub.WRITE_BP,
self.stub.VIRTUAL_ADDRESS,
0x3,
0x1,
self.stub.NO_CR3,
)
assert 0 <= cr3bp_id <= 254
while True:
self.stub.Resume()
self.stub.WaitForStateChanged()
if _in_kernel_space(self.read_register("rip")):
logger.debug("> stopping: 0x{:016x}".format(self.read_register("rip")))
break
self.stub.SingleStep()
self.stub.UnsetBreakpoint(cr3bp_id)
@lldbagilityutils.indented(logger)
def _get_active_thread_vaddr(self):
logger.debug("_get_active_thread_vaddr()")
def _get_gs_base(self):
logger.debug("_get_gs_base()")
gs_base = self.read_msr64(MSR_IA32_GS_BASE)
logger.debug("> MSR_IA32_GS_BASE: 0x{:016x}".format(gs_base))
if not _in_kernel_space(gs_base):
gs_base = self.read_msr64(MSR_IA32_KERNEL_GS_BASE)
logger.debug("> MSR_IA32_KERNEL_GS_BASE: 0x{:016x}".format(gs_base))
return gs_base
cpu_data_vaddr = _get_gs_base(self)
logger.debug("> cpu_data_vaddr: 0x{:016x}".format(cpu_data_vaddr))
cpu_this = lldbagilityutils.u64(self.read_virtual_memory(cpu_data_vaddr, 0x8))
logger.debug("> cpu_this: 0x{:016x}".format(cpu_this))
assert cpu_data_vaddr == cpu_this
cpu_active_thread = lldbagilityutils.u64(
self.read_virtual_memory(cpu_data_vaddr + 0x8, 0x8)
)
logger.debug("> cpu_active_thread: 0x{:016x}".format(cpu_active_thread))
return cpu_active_thread
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def complete_attach(self):
logger.debug("complete_attach()")
self.halt()
self.unset_all_breakpoints()
self._continue_until_kernel_code()
assert _in_kernel_space(self.read_register("rip"))
self.kernel_cr3 = self.read_register("cr3")
logger.debug("> kernel_cr3: 0x{:x}".format(self.kernel_cr3))
self.kernel_load_vaddr = _find_kernel_load_vaddr(self)
logger.debug("> kernel_load_vaddr: 0x{:016x}".format(self.kernel_load_vaddr))
self.kernel_slide = _compute_kernel_slide(self.kernel_load_vaddr)
logger.debug("> kernel_slide: 0x{:x}".format(self.kernel_slide))
self.kernel_version = _find_kernel_version(self)
logger.debug("> kernel_version: {}".format(self.kernel_version))
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_num_cpus(self):
logger.debug("get_num_cpus()")
return self.stub.GetCpuCount()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_host_info(self):
logger.debug("get_host_info()")
cpus_mask = 0x0
for i in range(self.get_num_cpus()):
cpus_mask |= 1 << i
cpu_type = CPU_TYPE_X86_64
cpu_subtype = CPU_SUBTYPE_X86_ARCH1
return cpus_mask, cpu_type, cpu_subtype
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_kernel_version(self):
logger.debug("get_kernel_version()")
kernel_version = self.kernel_version
if b"stext" not in kernel_version:
logger.debug("> stext")
kernel_version += "; stext=0x{:016x}".format(self.kernel_load_vaddr)
return kernel_version
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_msr64(self, msr):
logger.debug("read_msr64(msr=0x{:x})".format(msr))
return self.stub.ReadMsr(msr, CpuId=self.stub.CPU0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_msr64(self, msr, val):
logger.debug("write_msr64(msr=0x{:x}, val=0x{:x})".format(msr, val))
self.stub.WriteMsr(self, msr, val, CpuId=self.stub.CPU0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_register(self, reg):
logger.debug("read_register(reg='{}')".format(reg))
val = getattr(self.stub, reg)
if reg == "rip" and self._return_incremented_at_next_read_register_rip:
logger.debug("> _return_incremented_at_next_read_register_rip")
self._return_incremented_at_next_read_register_rip = False
return val + 1
return val
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_registers(self, regs):
logger.debug("read_registers()")
return {reg: self.read_register(reg) for reg in regs}
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_register(self, reg, val):
logger.debug("write_register(reg='{}', val=0x{:x})".format(reg, val))
if reg == "rflags":
if val & EFL_TF:
logger.debug("> _singlestep_at_next_resume")
self._singlestep_at_next_resume = True
return
setattr(self.stub, reg, val)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_registers(self, regs):
logger.debug("write_registers()")
for reg, val in regs.items():
self.write_register(reg, val)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_virtual_memory(self, vaddr, nbytes):
logger.debug(
"read_virtual_memory(vaddr=0x{:016x}, nbytes=0x{:x})".format(vaddr, nbytes)
)
data = self.stub.ReadVirtualMemory(vaddr, nbytes)
if not data and not _in_kernel_space(self.read_register("rip")):
logger.debug("> using kernel pmap")
process_cr3 = self.read_register("cr3")
self.write_register("cr3", self.kernel_cr3)
data = self.stub.ReadVirtualMemory(vaddr, nbytes)
self.write_register("cr3", process_cr3)
if self._kdp_vaddr and vaddr <= self._kdp_vaddr <= vaddr + nbytes:
logger.debug("> fake kdp struct")
assert data is not None
saved_state = lldbagilityutils.p64(NULL)
kdp_thread = lldbagilityutils.p64(self._get_active_thread_vaddr())
fake_partial_kdp_struct = b"".join((saved_state, kdp_thread))
kdp_struct_offset = self._kdp_vaddr - vaddr
data = (
data[:kdp_struct_offset]
+ fake_partial_kdp_struct
+ data[kdp_struct_offset + len(fake_partial_kdp_struct) :]
)
data = data if data else b""
logger.debug("> len(data): 0x{:x}".format(len(data)))
return data
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_virtual_memory(self, vaddr, data):
logger.debug("write_virtual_memory(vaddr=0x{:016x}, data=...)".format(vaddr))
assert self.is_state_halted()
if self._store_kdp_at_next_write_virtual_memory:
logger.debug("> _store_kdp_at_next_write_virtual_memory")
self._store_kdp_at_next_write_virtual_memory = False
self._kdp_vaddr = vaddr
return
return self.stub.WriteVirtualMemory(vaddr, data)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def set_soft_exec_breakpoint(self, vaddr):
logger.debug("set_soft_exec_breakpoint(vaddr=0x{:016x})".format(vaddr))
assert self.is_state_halted()
id = 0x0
length = 0x1
self._soft_breakpoints[vaddr] = self.stub.SetBreakpoint(
self.stub.SOFT_HBP,
id,
self.stub.EXECUTE_BP,
self.stub.VIRTUAL_ADDRESS,
vaddr,
length,
self.stub.NO_CR3,
)
logger.debug("> bp id: {}".format(self._soft_breakpoints[vaddr]))
return self._soft_breakpoints[vaddr]
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_soft_breakpoint(self, vaddr):
logger.debug("unset_soft_breakpoint(vaddr=0x{:016x})")
assert self.is_state_halted()
try:
id = self._soft_breakpoints[vaddr]
except KeyError:
logger.debug("> no such breakpoint")
else:
del self._soft_breakpoints[vaddr]
return self.stub.UnsetBreakpoint(id)
return False
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def set_hard_breakpoint(self, trigger, nreg, vaddr):
logger.debug(
"set_hard_exec_breakpoint(trigger='{}', nreg=0x{:016x}, vaddr=0x{:016x})".format(
trigger, nreg, vaddr
)
)
assert self.is_state_halted()
assert trigger in ("e", "w", "rw")
assert 0 <= nreg <= 3
trigger_bitshifts = {nreg: 16 + nreg * 4 for nreg in range(4)}
status_bitshifts = {nreg: nreg * 2 for nreg in range(4)}
ctrl_mask = self.read_register("dr7")
ctrl_mask &= ~(0b11 << trigger_bitshifts[nreg])
if trigger == "e":
trigger_entry = 0b00
elif trigger == "w":
trigger_entry = 0b01
elif trigger == "rw":
trigger_entry = 0b11
else:
raise NotImplementedError
ctrl_mask |= trigger_entry << trigger_bitshifts[nreg]
ctrl_mask |= 0b10 << status_bitshifts[nreg]
logger.debug("> ctrl_mask: 0b{:032b}".format(ctrl_mask))
self.write_register("dr{}".format(nreg), vaddr)
self.write_register("dr7", ctrl_mask)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_hard_breakpoint(self, nreg):
logger.debug("unset_hard_breakpoint(nreg=0x{:016x})".format(nreg))
assert self.is_state_halted()
assert 0 <= nreg <= 3
status_bitshifts = {nreg: nreg * 2 for nreg in range(4)}
ctrl_mask = self.read_register("dr7")
ctrl_mask &= ~(0b11 << status_bitshifts[nreg])
logger.debug("> ctrl_mask: 0b{:032b}".format(ctrl_mask))
self.write_register("dr{}".format(nreg), 0x0)
self.write_register("dr7", ctrl_mask)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_all_breakpoints(self):
logger.debug("unset_all_breakpoints()")
assert self.is_state_halted()
self._soft_breakpoints.clear()
self.stub.UnsetAllBreakpoint()
self.write_register("dr0", 0x0)
self.write_register("dr1", 0x0)
self.write_register("dr2", 0x0)
self.write_register("dr3", 0x0)
self.write_register("dr6", 0x0)
self.write_register("dr7", 0x0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def halt(self):
logger.debug("halt()")
self.stub.Pause()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt(self):
logger.debug("interrupt()")
self._exception = (EXC_SOFTWARE, EXC_SOFT_SIGNAL, SIGINT)
self.halt()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def single_step(self):
logger.debug("single_step()")
self._exception = (EXC_BREAKPOINT, EXC_I386_BPTFLT, 0x0)
self.stub.SingleStep()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def resume(self):
logger.debug("resume()")
if self._interrupt_at_next_resume:
logger.debug("> _interrupt_at_next_resume")
self._interrupt_at_next_resume = False
self.interrupt()
return
if self._singlestep_at_next_resume:
logger.debug("> _singlestep_at_next_resume")
self._singlestep_at_next_resume = False
self.single_step()
return
if self.is_breakpoint_hit():
logger.debug(
"> state breakpoint hit: 0x{:016x}".format(self.read_register("rip"))
)
self.stub.SingleStep()
self.stub.Resume()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_and_take_snapshot(self):
logger.debug("interrupt_and_take_snapshot()")
self.interrupt()
self.stub.Save()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_and_restore_last_snapshot(self):
logger.debug("interrupt_and_restore_last_snapshot()")
self.interrupt()
if self.stub.Restore():
self._soft_breakpoints.clear()
return True
else:
logger.debug("> could not restore")
return False
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def state(self):
logger.debug("state()")
if self.is_breakpoint_hit():
logger.debug("> state breakpoint hit")
self._exception = (EXC_BREAKPOINT, EXC_I386_BPTFLT, 0x0)
self._return_incremented_at_next_read_register_rip = True
state = (self.stub.GetState(), self._exception)
self._exception = None
return state
@lldbagilityutils.synchronized
def is_state_changed(self):
return self.stub.GetStateChanged() or self._exception
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def is_state_halted(self):
logger.debug("is_state_halted()")
return self.stub.GetState() & self.stub.STATE_PAUSED
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def is_breakpoint_hit(self):
logger.debug("is_breakpoint_hit()")
return self.stub.GetState() & (
self.stub.STATE_BREAKPOINT_HIT | self.stub.STATE_HARD_BREAKPOINT_HIT
)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_at_next_resume(self):
logger.debug("interrupt_at_next_resume()")
self._interrupt_at_next_resume = True
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def store_kdp_at_next_write_virtual_memory(self):
logger.debug("store_kdp_at_next_write_virtual_memory()")
self._store_kdp_at_next_write_virtual_memory = True
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def abort_store_kdp_at_next_write_virtual_memory(self):
logger.debug("abort_store_kdp_at_next_write_virtual_memory()")
assert not self._kdp_vaddr
self._store_kdp_at_next_write_virtual_memory = False
def _in_kernel_space(addr):
return VM_MIN_KERNEL_ADDRESS <= addr <= VM_MAX_KERNEL_ADDRESS
@lldbagilityutils.indented(logger)
def _find_kernel_load_vaddr(vm):
logger.debug("_find_kernel_load_vaddr()")
assert _in_kernel_space(vm.read_register("rip"))
@lldbagilityutils.indented(logger)
def _is_kernel_load_vaddr(vaddr):
logger.debug("_is_kernel_load_vaddr()")
if not _in_kernel_space(vaddr):
return False
data = vm.read_virtual_memory(vaddr, 0x4)
return data and lldbagilityutils.u32(data) == MH_MAGIC_64
@lldbagilityutils.indented(logger)
def _get_debug_kernel_load_vaddr():
logger.debug("_get_debug_kernel_load_vaddr()")
# boot-args nvram setting, the kernel's load address will be noted
# in the lowglo page at a fixed address"
# https://github.com/llvm/llvm-project/blob/llvmorg-8.0.0/lldb/source/Plugins/DynamicLoader/Darwin-Kernel/DynamicLoaderDarwinKernel.cpp#L226
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/x86_64/lowglobals.h#L54
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/x86_64/pmap.c#L1175
lgStext_vaddr = 0xFFFFFF8000002010
data = vm.read_virtual_memory(lgStext_vaddr, 0x8)
if data:
vaddr = lldbagilityutils.u64(data)
if _is_kernel_load_vaddr(vaddr):
return vaddr
else:
# probably trying to attach to the target before lgStext is initialised
return None
else:
return None
@lldbagilityutils.indented(logger)
def _search_kernel_load_vaddr(start_vaddr):
logger.debug(
"_search_kernel_load_vaddr(start_vaddr=0x{:016x})".format(start_vaddr)
)
# try to find the load address manually
assert _in_kernel_space(start_vaddr)
vaddr = start_vaddr & ~(I386_PGBYTES - 1)
while vaddr >= VM_MIN_KERNEL_ADDRESS:
if _is_kernel_load_vaddr(vaddr):
return vaddr
vaddr -= I386_PGBYTES
else:
raise AssertionError
kernel_load_vaddr = _get_debug_kernel_load_vaddr() or _search_kernel_load_vaddr(
vm.read_register("rip")
)
return kernel_load_vaddr
def _compute_kernel_slide(kernel_load_vaddr):
return kernel_load_vaddr - 0xFFFFFF8000200000
@lldbagilityutils.indented(logger)
def _find_kernel_version(vm):
logger.debug("_find_kernel_version()")
kernel_macho = b""
while len(kernel_macho) < 42 * 1024 * 1024: # a reasonable upper bound?
buf = b""
while len(buf) < 2 * 1024 * 1024:
vaddr = vm.kernel_load_vaddr + len(kernel_macho) + len(buf)
buf += vm.read_virtual_memory(vaddr, I386_PGBYTES)
kernel_macho += buf
try:
kernel_version = re.search(
b"(?P<version>Darwin Kernel Version .+?X86_64)\0", kernel_macho
).group("version")
except AttributeError:
continue
else:
return kernel_version
else:
raise AssertionError
class FDPSTUB(FDP):
NO_CR3 = FDP.FDP_NO_CR3
SOFT_HBP = FDP.FDP_SOFTHBP
CR_HBP = FDP.FDP_CRHBP
VIRTUAL_ADDRESS = FDP.FDP_VIRTUAL_ADDRESS
EXECUTE_BP = FDP.FDP_EXECUTE_BP
WRITE_BP = FDP.FDP_WRITE_BP
STATE_PAUSED = FDP.FDP_STATE_PAUSED
STATE_BREAKPOINT_HIT = FDP.FDP_STATE_BREAKPOINT_HIT
STATE_HARD_BREAKPOINT_HIT = FDP.FDP_STATE_HARD_BREAKPOINT_HIT
CPU0 = FDP.FDP_CPU0
def __init__(self, name):
super(FDPSTUB, self).__init__(name)
assert self.GetCpuCount() == 1, (
"VMs with more than one CPU are not fully supported by FDP! "
"Decrease the number of processors in the VM settings"
)
class VMSNSTUB(VMSN):
NO_CR3 = 0
SOFT_HBP = 2
CR_HBP = 0
VIRTUAL_ADDRESS = 0
EXECUTE_BP = 0
WRITE_BP = 0
STATE_PAUSED = 1
STATE_BREAKPOINT_HIT = 1
STATE_HARD_BREAKPOINT_HIT = 0
CPU0 = FDP.FDP_CPU0
def __init__(self, name):
super(VMSNSTUB, self).__init__(name)
| true | true |
f72f83101ec2c8b57466741fe18334c3e7fb24c9 | 3,882 | py | Python | dewars/zone6.py | drnasmith/flask-ispyb-logistics | 930b2707fc1d679607fc2d8d3d0895edfc944af6 | [
"Apache-2.0"
] | null | null | null | dewars/zone6.py | drnasmith/flask-ispyb-logistics | 930b2707fc1d679607fc2d8d3d0895edfc944af6 | [
"Apache-2.0"
] | null | null | null | dewars/zone6.py | drnasmith/flask-ispyb-logistics | 930b2707fc1d679607fc2d8d3d0895edfc944af6 | [
"Apache-2.0"
] | 1 | 2018-10-31T13:53:27.000Z | 2018-10-31T13:53:27.000Z | # System imports
from datetime import datetime
import time
import json
import logging
# Package imports
from flask import Blueprint
from flask import render_template
from flask import jsonify
from flask import request
# Local imports
import common
api = Blueprint('zone6', __name__, url_prefix='/zone6')
rack_prefix = 'RACK'
rack_suffixes = ['A1', 'A2', 'A3', 'A4',
'B1', 'B2', 'B3', 'B4',
'C1', 'C2', 'C3', 'C4',
'D1', 'D2', 'D3', 'D4',
'E1', 'E2', 'E3', 'E4',
'F1', 'F2', 'F3', 'F4',
'G1', 'G2', 'G3', 'G4',
'H1', 'H2', 'H3', 'H4',
'J1', 'J2', 'J3', 'J4',
'K1', 'K2', 'K3', 'K4',
'L1', 'L2', 'L3', 'L4',
'M1', 'M2', 'M3', 'M4',
'N1', 'N2', 'N3', 'N4',
'P1', 'P2', 'P3', 'P4',
'Q1', 'Q2', 'Q3', 'Q4',
'R1', 'R2', 'R3', 'R4',
'X1', 'X2', 'X3', 'X4',
'X5', 'X6', 'X7', 'X8',
'X9', 'X10', 'X11', 'X12',
]
rack_locations = ['-'.join([rack_prefix, suffix])
for suffix in rack_suffixes]
beamlines = ['i03',
'i04',
'i04-1',
'i24',
]
beamline_prefix = 'BEAMLINE'
beamline_locations = ['{}-{}'.format(beamline_prefix, x.upper()) for x in beamlines]
beamline_locations.extend(['USER-COLLECTION',
'STORES-OUT',
'ZONE-6-STORE',
])
"""
App to demonstrate use of vuejs
"""
@api.route("/vdewars")
def vdewars():
return render_template('vue-dewars.html', title="Zone6 Dewars", api_prefix="zone6", rack_locations=rack_locations)
@api.route('/')
def index():
"""
Main page for dewar management
"""
return render_template('dewars.html',
title="zone6 Dewar Management",
rack_locations=rack_locations,
rack_suffixes=rack_suffixes,
rack_prefix=rack_prefix,
beamlines=beamline_locations,
api_prefix="zone6",
)
@api.route('/dewars', methods=["GET", "POST", "DELETE"])
def location():
"""
API route for dewar management
"""
result = {}
status_code = 200
if request.method == "GET":
# Get any dewar with any rack location
# There should only be one per location
# Simple call so use controller directly
result, status_code = common.find_dewars_by_location(rack_locations)
elif request.method == "POST":
location = request.form['location']
barcode = request.form['barcode']
result, status_code = common.update_dewar_location(barcode, location)
elif request.method == "DELETE":
try:
location = request.form['location']
except KeyError:
# No form data (used axios?) Try params
location = request.args.get('location')
result, status_code = common.remove_dewar_from_location(location)
else:
result = {'location': '',
'barcode': '',
'status': 'fail',
'reason': 'Method/route not implemented yet'}
status_code = 501
return jsonify(result), status_code
@api.route('/dewars/find', methods=["GET"])
def find():
"""
Return a list of matching dewars with this facility code
Should be requested with parameters in the URL ?fc=DLS-MS-1234 request
We specifically return the status code so the front end can show feedback
"""
facilitycode = request.args.get('fc')
result, status_code = common.find_dewar(facilitycode)
return jsonify(result), status_code
| 29.409091 | 118 | 0.516486 |
from datetime import datetime
import time
import json
import logging
from flask import Blueprint
from flask import render_template
from flask import jsonify
from flask import request
import common
api = Blueprint('zone6', __name__, url_prefix='/zone6')
rack_prefix = 'RACK'
rack_suffixes = ['A1', 'A2', 'A3', 'A4',
'B1', 'B2', 'B3', 'B4',
'C1', 'C2', 'C3', 'C4',
'D1', 'D2', 'D3', 'D4',
'E1', 'E2', 'E3', 'E4',
'F1', 'F2', 'F3', 'F4',
'G1', 'G2', 'G3', 'G4',
'H1', 'H2', 'H3', 'H4',
'J1', 'J2', 'J3', 'J4',
'K1', 'K2', 'K3', 'K4',
'L1', 'L2', 'L3', 'L4',
'M1', 'M2', 'M3', 'M4',
'N1', 'N2', 'N3', 'N4',
'P1', 'P2', 'P3', 'P4',
'Q1', 'Q2', 'Q3', 'Q4',
'R1', 'R2', 'R3', 'R4',
'X1', 'X2', 'X3', 'X4',
'X5', 'X6', 'X7', 'X8',
'X9', 'X10', 'X11', 'X12',
]
rack_locations = ['-'.join([rack_prefix, suffix])
for suffix in rack_suffixes]
beamlines = ['i03',
'i04',
'i04-1',
'i24',
]
beamline_prefix = 'BEAMLINE'
beamline_locations = ['{}-{}'.format(beamline_prefix, x.upper()) for x in beamlines]
beamline_locations.extend(['USER-COLLECTION',
'STORES-OUT',
'ZONE-6-STORE',
])
@api.route("/vdewars")
def vdewars():
return render_template('vue-dewars.html', title="Zone6 Dewars", api_prefix="zone6", rack_locations=rack_locations)
@api.route('/')
def index():
return render_template('dewars.html',
title="zone6 Dewar Management",
rack_locations=rack_locations,
rack_suffixes=rack_suffixes,
rack_prefix=rack_prefix,
beamlines=beamline_locations,
api_prefix="zone6",
)
@api.route('/dewars', methods=["GET", "POST", "DELETE"])
def location():
result = {}
status_code = 200
if request.method == "GET":
result, status_code = common.find_dewars_by_location(rack_locations)
elif request.method == "POST":
location = request.form['location']
barcode = request.form['barcode']
result, status_code = common.update_dewar_location(barcode, location)
elif request.method == "DELETE":
try:
location = request.form['location']
except KeyError:
location = request.args.get('location')
result, status_code = common.remove_dewar_from_location(location)
else:
result = {'location': '',
'barcode': '',
'status': 'fail',
'reason': 'Method/route not implemented yet'}
status_code = 501
return jsonify(result), status_code
@api.route('/dewars/find', methods=["GET"])
def find():
facilitycode = request.args.get('fc')
result, status_code = common.find_dewar(facilitycode)
return jsonify(result), status_code
| true | true |
f72f834ad520c083812e4cfa1295c11679786a0f | 7,772 | py | Python | video2html.py | stevevai/video2chars | b90a2280d184083a746087d1c12d638afa3da5bb | [
"Apache-2.0"
] | 1 | 2019-04-14T16:28:25.000Z | 2019-04-14T16:28:25.000Z | video2html.py | stevevai/video2chars | b90a2280d184083a746087d1c12d638afa3da5bb | [
"Apache-2.0"
] | null | null | null | video2html.py | stevevai/video2chars | b90a2280d184083a746087d1c12d638afa3da5bb | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
import json
import os
import cv2
import numpy as np
from time import time
import webbrowser
play_chars_js = '''
let i = 0;
window.setInterval(function(){
let img = frames[i++];
let html = ""
for(let line of img){
for(let char of line){
let [[r,g,b], ch] = char;
line += '<span style="color:rgb(' + r + ', ' + g + ', '+ b + ');">'+ ch + '</span>'
}
html += "<br>"
}
document.body.innerHTML = html;
}, 1000/fps);
'''
class VideoToHtml:
# 灰度是数越小越白
pixels = ".,:!+mw1I?2354KE%8B&$WM@#"
def __init__(self, video_path, fps_for_html=8, time_interval=None):
"""
:param video_path: 字符串, 视频文件的路径
:param fps_for_html: 生成的html的帧率
:param time_interval: 用于截取视频(开始时间,结束时间)单位秒
"""
# 从指定文件创建一个VideoCapture对象
self.cap = cv2.VideoCapture(video_path)
self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.frames_count_all = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.resize_width = None
self.resize_height = None
self.frames_count = 0
self.fps_for_html = fps_for_html
self.time_interval = time_interval
def set_width(self, width):
"""只能缩小,而且始终保持长宽比"""
if width >= self.width:
return False
else:
self.resize_width = width
self.resize_height = int(self.height * (width / self.width))
return True
def set_height(self, height):
"""只能缩小,而且始终保持长宽比"""
if height >= self.height:
return False
else:
self.resize_height = height
self.resize_width = int(self.width * (height / self.height))
return True
def resize(self, img):
"""
将img转换成需要的大小
原则:只缩小,不放大。
"""
# 没指定就不需resize了
if not self.resize_width or not self.resize_height:
return img
else:
size = (self.resize_width, self.resize_height)
return cv2.resize(img, size, interpolation=cv2.INTER_AREA)
def get_img_by_pos(self, pos):
# 把指针移动到指定帧的位置
self.cap.set(cv2.CAP_PROP_POS_FRAMES, pos)
# cap.read() 返回值介绍:
# ret 布尔值,表示是否读取到图像
# frame 为图像矩阵,类型为 numpy.ndarray.
ret, frame = self.cap.read()
return ret, frame
def get_frame_pos(self):
"""生成需要获取的帧的位置,使用了惰性求值"""
step = int(self.fps / self.fps_for_html)
# 如果未指定
if not self.time_interval:
self.frames_count = int(self.frames_count_all / step) # 更新count
return range(0, self.frames_count_all, step)
# 如果指定了
start, end = self.time_interval
pos_start = int(self.fps * start)
pos_end = int(self.fps * end)
self.frames_count = int((pos_end - pos_start) / step) # 更新count
return range(pos_start, pos_end, step)
def get_imgs(self):
assert self.cap.isOpened()
for i in self.get_frame_pos():
ret, frame = self.get_img_by_pos(i)
if not ret:
print("读取失败,跳出循环")
break
yield self.resize(frame) # 惰性求值
# 结束时要释放空间
self.cap.release()
def get_char(self, gray):
percent = gray / 255 # 转换到 0-1 之间
index = int(percent * (len(self.pixels) - 1)) # 拿到index
return self.pixels[index]
def get_pix_html(self, r, g, b, gray):
char = self.get_char(gray)
def get_html_pic(self, img, img_id):
"""
将给定的img转换成html字符画
:return: 一个div
"""
hidden = 'hidden="hidden"' if img_id != 0 else ''
html_pic = [f'<div id="f-{img_id}" {hidden}>']
# 宽高刚好和size相反,要注意。(这是numpy的特性。。)
height, width, channel = img.shape
# 转换成灰度图,用来选择合适的字符
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
for y in range(height):
for x in range(width):
r, g, b = img[y][x]
gray = img_gray[y][x]
# 这里字符是灰度,rgb是背景色background-color
pixel_char = f'<span style="background-color:rgb({r}, {g}, {b})"> </span>'
html_pic.append(pixel_char)
html_pic.append("<br>") # 换行
html_pic.append('</div>')
return "".join(html_pic)
def write_html(self, file_name):
time_start = time()
with open(file_name, 'w') as html:
# 要记得设置monospace等宽字体,不然没法玩
# 行距0.75是因为等宽字体,有一定宽高比,所以要平衡一下
html.write('<!DOCTYPE html><html>'
f'<script>window.fps = {self.fps_for_html};</script>'
'<script src="play_chars.js"></script>'
'<body style="font-family: monospace;font-size: xx-small;'
'text-align: center;line-height: 0.75;font-weight: bolder;">'
)
try:
i = 0
for img in self.get_imgs():
html_pic = self.get_html_pic(img, i)
html.write(html_pic)
if i % 30:
print(f"进度:{i/self.frames_count * 100:.2f}%, 已用时:{time() - time_start:.2f}")
i += 1
finally:
html.write("</body>"
"</html>")
def get_json_pic(self, img):
"""测试阶段,不实用"""
json_pic = []
# 宽高刚好和size相反,要注意。(这是numpy的特性。。)
height, width, channel = img.shape
# 转换成灰度图,用来选择合适的字符
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
for y in range(height):
line = []
for x in range(width):
r, g, b = img[y][x]
gray = img_gray[y][x]
char = self.get_char(gray)
line.append([[int(r), int(g), int(b)], char])
json_pic.append(line)
return json.dumps(json_pic)
def write_html_with_json(self, file_name):
"""测试阶段,不实用"""
with open(file_name, 'w') as html:
# 要记得设置monospace等宽字体,不然没法玩
html.write('<!DOCTYPE html>'
'<html>'
'<body style="font-family: monospace;font-size: xx-small;text-align: center;line-height: 0.7;">'
'</body>'
'<script>'
'var frames=[\n')
try:
i = 0
for img in self.get_imgs():
json_pic = self.get_json_pic(img)
html.write(f"{json_pic},\n")
if i % 20:
print(f"进度:{i/self.frames_count * 100:.2f}%")
i += 1
finally:
html.write('];'
f'var fps={self.fps_for_html};'
f'{play_chars_js}'
'</script>'
'</html>')
def get_file_name(file_path):
"""
从文件路径中提取出不带拓展名的文件名
"""
# 从文件路径获取文件名 _name
path, file_name_with_extension = os.path.split(file_path)
# 拿到文件名前缀
file_name, file_extension = os.path.splitext(file_name_with_extension)
return file_name
def main():
# 视频路径,换成你自己的
# video_path = "/home/ryan/Downloads/HorribleSubs+Golden+Kamuy+02+1080p.mp4"
video_path = "BadApple.mp4"
video2html = VideoToHtml(video_path, fps_for_html=5, time_interval=(20, 30))
video2html.set_width(100)
html_name = "output/" + get_file_name(video_path) + ".html"
# video2html.write_html(html_name)
webbrowser.open(html_name)
if __name__ == "__main__":
main()
| 28.15942 | 119 | 0.523289 |
import json
import os
import cv2
import numpy as np
from time import time
import webbrowser
play_chars_js = '''
let i = 0;
window.setInterval(function(){
let img = frames[i++];
let html = ""
for(let line of img){
for(let char of line){
let [[r,g,b], ch] = char;
line += '<span style="color:rgb(' + r + ', ' + g + ', '+ b + ');">'+ ch + '</span>'
}
html += "<br>"
}
document.body.innerHTML = html;
}, 1000/fps);
'''
class VideoToHtml:
pixels = ".,:!+mw1I?2354KE%8B&$WM@#"
def __init__(self, video_path, fps_for_html=8, time_interval=None):
self.cap = cv2.VideoCapture(video_path)
self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.frames_count_all = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.resize_width = None
self.resize_height = None
self.frames_count = 0
self.fps_for_html = fps_for_html
self.time_interval = time_interval
def set_width(self, width):
if width >= self.width:
return False
else:
self.resize_width = width
self.resize_height = int(self.height * (width / self.width))
return True
def set_height(self, height):
if height >= self.height:
return False
else:
self.resize_height = height
self.resize_width = int(self.width * (height / self.height))
return True
def resize(self, img):
if not self.resize_width or not self.resize_height:
return img
else:
size = (self.resize_width, self.resize_height)
return cv2.resize(img, size, interpolation=cv2.INTER_AREA)
def get_img_by_pos(self, pos):
self.cap.set(cv2.CAP_PROP_POS_FRAMES, pos)
ret, frame = self.cap.read()
return ret, frame
def get_frame_pos(self):
step = int(self.fps / self.fps_for_html)
if not self.time_interval:
self.frames_count = int(self.frames_count_all / step)
return range(0, self.frames_count_all, step)
start, end = self.time_interval
pos_start = int(self.fps * start)
pos_end = int(self.fps * end)
self.frames_count = int((pos_end - pos_start) / step)
return range(pos_start, pos_end, step)
def get_imgs(self):
assert self.cap.isOpened()
for i in self.get_frame_pos():
ret, frame = self.get_img_by_pos(i)
if not ret:
print("读取失败,跳出循环")
break
yield self.resize(frame)
self.cap.release()
def get_char(self, gray):
percent = gray / 255
index = int(percent * (len(self.pixels) - 1))
return self.pixels[index]
def get_pix_html(self, r, g, b, gray):
char = self.get_char(gray)
def get_html_pic(self, img, img_id):
hidden = 'hidden="hidden"' if img_id != 0 else ''
html_pic = [f'<div id="f-{img_id}" {hidden}>']
height, width, channel = img.shape
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
for y in range(height):
for x in range(width):
r, g, b = img[y][x]
gray = img_gray[y][x]
pixel_char = f'<span style="background-color:rgb({r}, {g}, {b})"> </span>'
html_pic.append(pixel_char)
html_pic.append("<br>")
html_pic.append('</div>')
return "".join(html_pic)
def write_html(self, file_name):
time_start = time()
with open(file_name, 'w') as html:
html.write('<!DOCTYPE html><html>'
f'<script>window.fps = {self.fps_for_html};</script>'
'<script src="play_chars.js"></script>'
'<body style="font-family: monospace;font-size: xx-small;'
'text-align: center;line-height: 0.75;font-weight: bolder;">'
)
try:
i = 0
for img in self.get_imgs():
html_pic = self.get_html_pic(img, i)
html.write(html_pic)
if i % 30:
print(f"进度:{i/self.frames_count * 100:.2f}%, 已用时:{time() - time_start:.2f}")
i += 1
finally:
html.write("</body>"
"</html>")
def get_json_pic(self, img):
json_pic = []
height, width, channel = img.shape
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
for y in range(height):
line = []
for x in range(width):
r, g, b = img[y][x]
gray = img_gray[y][x]
char = self.get_char(gray)
line.append([[int(r), int(g), int(b)], char])
json_pic.append(line)
return json.dumps(json_pic)
def write_html_with_json(self, file_name):
with open(file_name, 'w') as html:
html.write('<!DOCTYPE html>'
'<html>'
'<body style="font-family: monospace;font-size: xx-small;text-align: center;line-height: 0.7;">'
'</body>'
'<script>'
'var frames=[\n')
try:
i = 0
for img in self.get_imgs():
json_pic = self.get_json_pic(img)
html.write(f"{json_pic},\n")
if i % 20:
print(f"进度:{i/self.frames_count * 100:.2f}%")
i += 1
finally:
html.write('];'
f'var fps={self.fps_for_html};'
f'{play_chars_js}'
'</script>'
'</html>')
def get_file_name(file_path):
path, file_name_with_extension = os.path.split(file_path)
file_name, file_extension = os.path.splitext(file_name_with_extension)
return file_name
def main():
video_path = "BadApple.mp4"
video2html = VideoToHtml(video_path, fps_for_html=5, time_interval=(20, 30))
video2html.set_width(100)
html_name = "output/" + get_file_name(video_path) + ".html"
webbrowser.open(html_name)
if __name__ == "__main__":
main()
| true | true |
f72f8513b66880f4db7e38933741e9e12297e8ac | 142 | py | Python | data_parse/get_banks_NY.py | hhalim/TargetBanks | 8febd7300f3b01e92641e0f63355d3f66bfe674c | [
"MIT"
] | null | null | null | data_parse/get_banks_NY.py | hhalim/TargetBanks | 8febd7300f3b01e92641e0f63355d3f66bfe674c | [
"MIT"
] | null | null | null | data_parse/get_banks_NY.py | hhalim/TargetBanks | 8febd7300f3b01e92641e0f63355d3f66bfe674c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from parse_banks import parse_and_insert
#STATE = TX
file = '../data/NY_SOD_FDIC.html'
parse_and_insert(file, 'NY')
| 17.75 | 40 | 0.697183 |
from parse_banks import parse_and_insert
file = '../data/NY_SOD_FDIC.html'
parse_and_insert(file, 'NY')
| true | true |
f72f865c6f3e0b0d982fd0cd8b5149aa4a6ebc41 | 12,464 | py | Python | tools/gen_esp_err_to_name.py | jkoelker/esp-idf | 4b91c82cc447640e5b61407e810f1d6f3eabd233 | [
"Apache-2.0"
] | 2 | 2018-06-27T02:28:03.000Z | 2020-12-08T19:33:44.000Z | tools/gen_esp_err_to_name.py | jkoelker/esp-idf | 4b91c82cc447640e5b61407e810f1d6f3eabd233 | [
"Apache-2.0"
] | null | null | null | tools/gen_esp_err_to_name.py | jkoelker/esp-idf | 4b91c82cc447640e5b61407e810f1d6f3eabd233 | [
"Apache-2.0"
] | 1 | 2021-01-09T16:19:22.000Z | 2021-01-09T16:19:22.000Z | #!/usr/bin/env python
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import re
import fnmatch
import string
import collections
import textwrap
# list files here which should not be parsed
ignore_files = [ 'components/mdns/test_afl_fuzz_host/esp32_compat.h' ]
# macros from here have higher priorities in case of collisions
priority_headers = [ 'components/esp32/include/esp_err.h' ]
err_dict = collections.defaultdict(list) #identified errors are stored here; mapped by the error code
rev_err_dict = dict() #map of error string to error code
unproc_list = list() #errors with unknown codes which depend on other errors
class ErrItem:
"""
Contains information about the error:
- name - error string
- file - relative path inside the IDF project to the file which defines this error
- comment - (optional) comment for the error
- rel_str - (optional) error string which is a base for the error
- rel_off - (optional) offset in relation to the base error
"""
def __init__(self, name, file, comment, rel_str = "", rel_off = 0):
self.name = name
self.file = file
self.comment = comment
self.rel_str = rel_str
self.rel_off = rel_off
def __str__(self):
ret = self.name + " from " + self.file
if (self.rel_str != ""):
ret += " is (" + self.rel_str + " + " + str(self.rel_off) + ")"
if self.comment != "":
ret += " // " + self.comment
return ret
def __cmp__(self, other):
if self.file in priority_headers and other.file not in priority_headers:
return -1
elif self.file not in priority_headers and other.file in priority_headers:
return 1
base = "_BASE"
if self.file == other.file:
if self.name.endswith(base) and not(other.name.endswith(base)):
return 1
elif not(self.name.endswith(base)) and other.name.endswith(base):
return -1
self_key = self.file + self.name
other_key = other.file + other.name
if self_key < other_key:
return -1
elif self_key > other_key:
return 1
else:
return 0
class InputError(RuntimeError):
"""
Represents and error on the input
"""
def __init__(self, p, e):
super(InputError, self).__init__(p + ": " + e)
def process(line, idf_path):
"""
Process a line of text from file idf_path (relative to IDF project).
Fills the global list unproc_list and dictionaries err_dict, rev_err_dict
"""
if idf_path.endswith(".c"):
# We would not try to include a C file
raise InputError(idf_path, "This line should be in a header file: %s" % line)
words = re.split(r' +', line, 2)
# words[1] is the error name
# words[2] is the rest of the line (value, base + value, comment)
if len(words) < 2:
raise InputError(idf_path, "Error at line %s" % line)
line = ""
todo_str = words[2]
comment = ""
# identify possible comment
m = re.search(r'/\*!<(.+?(?=\*/))', todo_str)
if m:
comment = string.strip(m.group(1))
todo_str = string.strip(todo_str[:m.start()]) # keep just the part before the comment
# identify possible parentheses ()
m = re.search(r'\((.+)\)', todo_str)
if m:
todo_str = m.group(1) #keep what is inside the parentheses
# identify BASE error code, e.g. from the form BASE + 0x01
m = re.search(r'\s*(\w+)\s*\+(.+)', todo_str)
if m:
related = m.group(1) # BASE
todo_str = m.group(2) # keep and process only what is after "BASE +"
# try to match a hexadecimal number
m = re.search(r'0x([0-9A-Fa-f]+)', todo_str)
if m:
num = int(m.group(1), 16)
else:
# Try to match a decimal number. Negative value is possible for some numbers, e.g. ESP_FAIL
m = re.search(r'(-?[0-9]+)', todo_str)
if m:
num = int(m.group(1), 10)
elif re.match(r'\w+', todo_str):
# It is possible that there is no number, e.g. #define ERROR BASE
related = todo_str # BASE error
num = 0 # (BASE + 0)
else:
raise InputError(idf_path, "Cannot parse line %s" % line)
try:
related
except NameError:
# The value of the error is known at this moment because it do not depends on some other BASE error code
err_dict[num].append(ErrItem(words[1], idf_path, comment))
rev_err_dict[words[1]] = num
else:
# Store the information available now and compute the error code later
unproc_list.append(ErrItem(words[1], idf_path, comment, related, num))
def process_remaining_errors():
"""
Create errors which could not be processed before because the error code
for the BASE error code wasn't known.
This works for sure only if there is no multiple-time dependency, e.g.:
#define BASE1 0
#define BASE2 (BASE1 + 10)
#define ERROR (BASE2 + 10) - ERROR will be processed successfully only if it processed later than BASE2
"""
for item in unproc_list:
if item.rel_str in rev_err_dict:
base_num = rev_err_dict[item.rel_str]
base = err_dict[base_num][0]
num = base_num + item.rel_off
err_dict[num].append(ErrItem(item.name, item.file, item.comment))
rev_err_dict[item.name] = num
else:
print(item.rel_str + " referenced by " + item.name + " in " + item.file + " is unknown")
del unproc_list[:]
def path_to_include(path):
"""
Process the path (relative to the IDF project) in a form which can be used
to include in a C file. Using just the filename does not work all the
time because some files are deeper in the tree. This approach tries to
find an 'include' parent directory an include its subdirectories, e.g.
"components/XY/include/esp32/file.h" will be transported into "esp32/file.h"
So this solution works only works when the subdirectory or subdirectories
are inside the "include" directory. Other special cases need to be handled
here when the compiler gives an unknown header file error message.
"""
spl_path = string.split(path, os.sep)
try:
i = spl_path.index('include')
except ValueError:
# no include in the path -> use just the filename
return os.path.basename(path)
else:
return str(os.sep).join(spl_path[i+1:]) # subdirectories and filename in "include"
def print_warning(error_list, error_code):
"""
Print warning about errors with the same error code
"""
print("[WARNING] The following errors have the same code (%d):" % error_code)
for e in error_list:
print(" " + str(e))
def max_string_width():
max = 0
for k in err_dict.keys():
for e in err_dict[k]:
x = len(e.name)
if x > max:
max = x
return max
def generate_c_output(fin, fout):
"""
Writes the output to fout based on th error dictionary err_dict and
template file fin.
"""
# make includes unique by using a set
includes = set()
for k in err_dict.keys():
for e in err_dict[k]:
includes.add(path_to_include(e.file))
# The order in a set in non-deterministic therefore it could happen that the
# include order will be different in other machines and false difference
# in the output file could be reported. In order to avoid this, the items
# are sorted in a list.
include_list = list(includes)
include_list.sort()
max_width = max_string_width() + 17 + 1 # length of " ERR_TBL_IT()," with spaces is 17
max_decdig = max(len(str(k)) for k in err_dict.keys())
for line in fin:
if re.match(r'@COMMENT@', line):
fout.write("//Do not edit this file because it is autogenerated by " + os.path.basename(__file__) + "\n")
elif re.match(r'@HEADERS@', line):
for i in include_list:
fout.write("#if __has_include(\"" + i + "\")\n#include \"" + i + "\"\n#endif\n")
elif re.match(r'@ERROR_ITEMS@', line):
last_file = ""
for k in sorted(err_dict.keys()):
if len(err_dict[k]) > 1:
err_dict[k].sort()
print_warning(err_dict[k], k)
for e in err_dict[k]:
if e.file != last_file:
last_file = e.file
fout.write(" // %s\n" % last_file)
table_line = (" ERR_TBL_IT(" + e.name + "), ").ljust(max_width) + "/* " + str(k).rjust(max_decdig)
fout.write("# ifdef %s\n" % e.name)
fout.write(table_line)
hexnum_length = 0
if k > 0: # negative number and zero should be only ESP_FAIL and ESP_OK
hexnum = " 0x%x" % k
hexnum_length = len(hexnum)
fout.write(hexnum)
if e.comment != "":
if len(e.comment) < 50:
fout.write(" %s" % e.comment)
else:
indent = " " * (len(table_line) + hexnum_length + 1)
w = textwrap.wrap(e.comment, width=120, initial_indent = indent, subsequent_indent = indent)
# this couldn't be done with initial_indent because there is no initial_width option
fout.write(" %s" % w[0].strip())
for i in range(1, len(w)):
fout.write("\n%s" % w[i])
fout.write(" */\n# endif\n")
else:
fout.write(line)
def generate_rst_output(fout):
for k in sorted(err_dict.keys()):
v = err_dict[k][0]
fout.write(':c:macro:`{}` '.format(v.name))
if k > 0:
fout.write('**(0x{:x})**'.format(k))
else:
fout.write('({:d})'.format(k))
if len(v.comment) > 0:
fout.write(': {}'.format(v.comment))
fout.write('\n\n')
def main():
parser = argparse.ArgumentParser(description='ESP32 esp_err_to_name lookup generator for esp_err_t')
parser.add_argument('--c_input', help='Path to the esp_err_to_name.c.in template input.', default=os.environ['IDF_PATH'] + '/components/esp32/esp_err_to_name.c.in')
parser.add_argument('--c_output', help='Path to the esp_err_to_name.c output.', default=os.environ['IDF_PATH'] + '/components/esp32/esp_err_to_name.c')
parser.add_argument('--rst_output', help='Generate .rst output and save it into this file')
args = parser.parse_args()
for root, dirnames, filenames in os.walk(os.environ['IDF_PATH']):
for filename in fnmatch.filter(filenames, '*.[ch]'):
full_path = os.path.join(root, filename)
idf_path = os.path.relpath(full_path, os.environ['IDF_PATH'])
if idf_path in ignore_files:
continue
with open(full_path, "r+") as f:
for line in f:
# match also ESP_OK and ESP_FAIL because some of ESP_ERRs are referencing them
if re.match(r"\s*#define\s+(ESP_ERR_|ESP_OK|ESP_FAIL)", line):
try:
process(str.strip(line), idf_path)
except InputError as e:
print (e)
process_remaining_errors()
if args.rst_output is not None:
with open(args.rst_output, 'w') as fout:
generate_rst_output(fout)
else:
with open(args.c_input, 'r') as fin, open(args.c_output, 'w') as fout:
generate_c_output(fin, fout)
if __name__ == "__main__":
main()
| 39.948718 | 168 | 0.594352 |
import os
import argparse
import re
import fnmatch
import string
import collections
import textwrap
ignore_files = [ 'components/mdns/test_afl_fuzz_host/esp32_compat.h' ]
priority_headers = [ 'components/esp32/include/esp_err.h' ]
err_dict = collections.defaultdict(list)
rev_err_dict = dict()
unproc_list = list()
class ErrItem:
def __init__(self, name, file, comment, rel_str = "", rel_off = 0):
self.name = name
self.file = file
self.comment = comment
self.rel_str = rel_str
self.rel_off = rel_off
def __str__(self):
ret = self.name + " from " + self.file
if (self.rel_str != ""):
ret += " is (" + self.rel_str + " + " + str(self.rel_off) + ")"
if self.comment != "":
ret += " // " + self.comment
return ret
def __cmp__(self, other):
if self.file in priority_headers and other.file not in priority_headers:
return -1
elif self.file not in priority_headers and other.file in priority_headers:
return 1
base = "_BASE"
if self.file == other.file:
if self.name.endswith(base) and not(other.name.endswith(base)):
return 1
elif not(self.name.endswith(base)) and other.name.endswith(base):
return -1
self_key = self.file + self.name
other_key = other.file + other.name
if self_key < other_key:
return -1
elif self_key > other_key:
return 1
else:
return 0
class InputError(RuntimeError):
def __init__(self, p, e):
super(InputError, self).__init__(p + ": " + e)
def process(line, idf_path):
if idf_path.endswith(".c"):
raise InputError(idf_path, "This line should be in a header file: %s" % line)
words = re.split(r' +', line, 2)
if len(words) < 2:
raise InputError(idf_path, "Error at line %s" % line)
line = ""
todo_str = words[2]
comment = ""
m = re.search(r'/\*!<(.+?(?=\*/))', todo_str)
if m:
comment = string.strip(m.group(1))
todo_str = string.strip(todo_str[:m.start()])
m = re.search(r'\((.+)\)', todo_str)
if m:
todo_str = m.group(1)
m = re.search(r'\s*(\w+)\s*\+(.+)', todo_str)
if m:
related = m.group(1)
todo_str = m.group(2)
m = re.search(r'0x([0-9A-Fa-f]+)', todo_str)
if m:
num = int(m.group(1), 16)
else:
m = re.search(r'(-?[0-9]+)', todo_str)
if m:
num = int(m.group(1), 10)
elif re.match(r'\w+', todo_str):
ed = todo_str
num = 0
else:
raise InputError(idf_path, "Cannot parse line %s" % line)
try:
related
except NameError:
err_dict[num].append(ErrItem(words[1], idf_path, comment))
rev_err_dict[words[1]] = num
else:
unproc_list.append(ErrItem(words[1], idf_path, comment, related, num))
def process_remaining_errors():
for item in unproc_list:
if item.rel_str in rev_err_dict:
base_num = rev_err_dict[item.rel_str]
base = err_dict[base_num][0]
num = base_num + item.rel_off
err_dict[num].append(ErrItem(item.name, item.file, item.comment))
rev_err_dict[item.name] = num
else:
print(item.rel_str + " referenced by " + item.name + " in " + item.file + " is unknown")
del unproc_list[:]
def path_to_include(path):
spl_path = string.split(path, os.sep)
try:
i = spl_path.index('include')
except ValueError:
return os.path.basename(path)
else:
return str(os.sep).join(spl_path[i+1:])
def print_warning(error_list, error_code):
print("[WARNING] The following errors have the same code (%d):" % error_code)
for e in error_list:
print(" " + str(e))
def max_string_width():
max = 0
for k in err_dict.keys():
for e in err_dict[k]:
x = len(e.name)
if x > max:
max = x
return max
def generate_c_output(fin, fout):
includes = set()
for k in err_dict.keys():
for e in err_dict[k]:
includes.add(path_to_include(e.file))
include_list = list(includes)
include_list.sort()
max_width = max_string_width() + 17 + 1
max_decdig = max(len(str(k)) for k in err_dict.keys())
for line in fin:
if re.match(r'@COMMENT@', line):
fout.write("//Do not edit this file because it is autogenerated by " + os.path.basename(__file__) + "\n")
elif re.match(r'@HEADERS@', line):
for i in include_list:
fout.write("#if __has_include(\"" + i + "\")\n#include \"" + i + "\"\n#endif\n")
elif re.match(r'@ERROR_ITEMS@', line):
last_file = ""
for k in sorted(err_dict.keys()):
if len(err_dict[k]) > 1:
err_dict[k].sort()
print_warning(err_dict[k], k)
for e in err_dict[k]:
if e.file != last_file:
last_file = e.file
fout.write(" // %s\n" % last_file)
table_line = (" ERR_TBL_IT(" + e.name + "), ").ljust(max_width) + "/* " + str(k).rjust(max_decdig)
fout.write("# ifdef %s\n" % e.name)
fout.write(table_line)
hexnum_length = 0
if k > 0:
hexnum = " 0x%x" % k
hexnum_length = len(hexnum)
fout.write(hexnum)
if e.comment != "":
if len(e.comment) < 50:
fout.write(" %s" % e.comment)
else:
indent = " " * (len(table_line) + hexnum_length + 1)
w = textwrap.wrap(e.comment, width=120, initial_indent = indent, subsequent_indent = indent)
fout.write(" %s" % w[0].strip())
for i in range(1, len(w)):
fout.write("\n%s" % w[i])
fout.write(" */\n# endif\n")
else:
fout.write(line)
def generate_rst_output(fout):
for k in sorted(err_dict.keys()):
v = err_dict[k][0]
fout.write(':c:macro:`{}` '.format(v.name))
if k > 0:
fout.write('**(0x{:x})**'.format(k))
else:
fout.write('({:d})'.format(k))
if len(v.comment) > 0:
fout.write(': {}'.format(v.comment))
fout.write('\n\n')
def main():
parser = argparse.ArgumentParser(description='ESP32 esp_err_to_name lookup generator for esp_err_t')
parser.add_argument('--c_input', help='Path to the esp_err_to_name.c.in template input.', default=os.environ['IDF_PATH'] + '/components/esp32/esp_err_to_name.c.in')
parser.add_argument('--c_output', help='Path to the esp_err_to_name.c output.', default=os.environ['IDF_PATH'] + '/components/esp32/esp_err_to_name.c')
parser.add_argument('--rst_output', help='Generate .rst output and save it into this file')
args = parser.parse_args()
for root, dirnames, filenames in os.walk(os.environ['IDF_PATH']):
for filename in fnmatch.filter(filenames, '*.[ch]'):
full_path = os.path.join(root, filename)
idf_path = os.path.relpath(full_path, os.environ['IDF_PATH'])
if idf_path in ignore_files:
continue
with open(full_path, "r+") as f:
for line in f:
# match also ESP_OK and ESP_FAIL because some of ESP_ERRs are referencing them
if re.match(r"\s*#define\s+(ESP_ERR_|ESP_OK|ESP_FAIL)", line):
try:
process(str.strip(line), idf_path)
except InputError as e:
print (e)
process_remaining_errors()
if args.rst_output is not None:
with open(args.rst_output, 'w') as fout:
generate_rst_output(fout)
else:
with open(args.c_input, 'r') as fin, open(args.c_output, 'w') as fout:
generate_c_output(fin, fout)
if __name__ == "__main__":
main()
| true | true |
f72f87aeb2af6ecbc535d203c78b35e72e451ee6 | 40,174 | py | Python | pandas/core/indexes/interval.py | mattboggess/pandas | 5551bcf9d297ea8a0aeffb70b17ae6730e8abf89 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/interval.py | mattboggess/pandas | 5551bcf9d297ea8a0aeffb70b17ae6730e8abf89 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/interval.py | mattboggess/pandas | 5551bcf9d297ea8a0aeffb70b17ae6730e8abf89 | [
"BSD-3-Clause"
] | 1 | 2018-10-14T18:27:49.000Z | 2018-10-14T18:27:49.000Z | """ define the IntervalIndex """
import textwrap
import warnings
import numpy as np
from pandas.compat import add_metaclass
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_platform_int,
is_list_like,
is_datetime_or_timedelta_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_float_dtype,
is_interval_dtype,
is_object_dtype,
is_scalar,
is_float,
is_number,
is_integer)
from pandas.core.indexes.base import (
Index, ensure_index,
default_pprint, _index_shared_docs)
from pandas._libs import Timestamp, Timedelta
from pandas._libs.interval import (
Interval, IntervalMixin, IntervalTree,
)
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.indexes.multi import MultiIndex
import pandas.core.common as com
from pandas.util._decorators import cache_readonly, Appender
from pandas.util._doctools import _WritableDoc
from pandas.util._exceptions import rewrite_exception
from pandas.core.config import get_option
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
import pandas.core.indexes.base as ibase
from pandas.core.arrays.interval import (IntervalArray,
_interval_shared_docs)
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
target_klass='IntervalIndex or list of Intervals',
name=textwrap.dedent("""\
name : object, optional
to be stored in the index.
"""),
))
def _get_next_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_prev_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_interval_closed_bounds(interval):
"""
Given an Interval or IntervalIndex, return the corresponding interval with
closed bounds.
"""
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
def _new_IntervalIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't have
arguments and breaks __new__
"""
return cls.from_arrays(**d)
@Appender(_interval_shared_docs['class'] % dict(
klass="IntervalIndex",
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs['name'],
versionadded="0.20.0",
extra_methods="contains\n",
examples=textwrap.dedent("""\
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
closed='right', dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""),
))
@add_metaclass(_WritableDoc)
class IntervalIndex(IntervalMixin, Index):
_typ = 'intervalindex'
_comparables = ['name']
_attributes = ['name', 'closed']
# we would like our indexing holder to defer to us
_defer_to_indexing = True
# Immutable, so we are able to cache computations like isna in '_mask'
_mask = None
def __new__(cls, data, closed=None, dtype=None, copy=False,
name=None, verify_integrity=True):
if name is None and hasattr(data, 'name'):
name = data.name
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype,
verify_integrity=verify_integrity)
return cls._simple_new(array, name)
@classmethod
def _simple_new(cls, array, name, closed=None):
"""
Construct from an IntervalArray
Parameters
----------
array : IntervalArray
name : str
Attached as result.name
closed : Any
Ignored.
"""
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, left=None, right=None, **kwargs):
result = self._data._shallow_copy(left=left, right=right)
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(result, **attributes)
@cache_readonly
def _isnan(self):
"""Return a mask indicating if each value is NA"""
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
return IntervalTree(self.left, self.right, closed=self.closed)
def __contains__(self, key):
"""
return a boolean if this key is IN the index
We *only* accept an Interval
Parameters
----------
key : Interval
Returns
-------
boolean
"""
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
def contains(self, key):
"""
Return a boolean indicating if the key is IN the index
We accept / allow keys to be not *just* actual
objects.
Parameters
----------
key : int, float, Interval
Returns
-------
boolean
"""
try:
self.get_loc(key)
return True
except KeyError:
return False
@classmethod
@Appender(_interval_shared_docs['from_breaks'] % _index_doc_kwargs)
def from_breaks(cls, breaks, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_arrays'] % _index_doc_kwargs)
def from_arrays(cls, left, right, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(left, right, closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_intervals'] % _index_doc_kwargs)
def from_intervals(cls, data, closed=None, name=None, copy=False,
dtype=None):
msg = ('IntervalIndex.from_intervals is deprecated and will be '
'removed in a future version; Use IntervalIndex(...) instead')
warnings.warn(msg, FutureWarning, stacklevel=2)
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype)
if name is None and isinstance(data, cls):
name = data.name
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_tuples'] % _index_doc_kwargs)
def from_tuples(cls, data, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(arr, name=name)
@Appender(_interval_shared_docs['to_tuples'] % dict(
return_type="Index",
examples="""
Examples
--------
>>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])
>>> idx.to_tuples()
Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')
>>> idx.to_tuples(na_tuple=False)
Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""",
))
def to_tuples(self, na_tuple=True):
tuples = self._data.to_tuples(na_tuple=na_tuple)
return Index(tuples)
@cache_readonly
def _multiindex(self):
return MultiIndex.from_arrays([self.left, self.right],
names=['left', 'right'])
@property
def left(self):
"""
Return the left endpoints of each Interval in the IntervalIndex as
an Index
"""
return self._data._left
@property
def right(self):
"""
Return the right endpoints of each Interval in the IntervalIndex as
an Index
"""
return self._data._right
@property
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
neither
"""
return self._data._closed
@Appender(_interval_shared_docs['set_closed'] % _index_doc_kwargs)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
raise ValueError(msg.format(closed=closed))
# return self._shallow_copy(closed=closed)
array = self._data.set_closed(closed)
return self._simple_new(array, self.name)
@property
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
the IntervalIndex
"""
return self._data.length
@property
def size(self):
# Avoid materializing ndarray[Interval]
return self._data.size
@property
def shape(self):
# Avoid materializing ndarray[Interval]
return self._data.shape
@property
def itemsize(self):
msg = ('IntervalIndex.itemsize is deprecated and will be removed in '
'a future version')
warnings.warn(msg, FutureWarning, stacklevel=2)
# supress the warning from the underlying left/right itemsize
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return self.left.itemsize + self.right.itemsize
def __len__(self):
return len(self.left)
@cache_readonly
def values(self):
"""
Return the IntervalIndex's data as an IntervalArray.
"""
return self._data
@cache_readonly
def _values(self):
return self._data
@cache_readonly
def _ndarray_values(self):
return np.array(self._data)
def __array__(self, result=None):
""" the array interface, return my values """
return self._ndarray_values
def __array_wrap__(self, result, context=None):
# we don't want the superclass implementation
return result
def __reduce__(self):
d = dict(left=self.left,
right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (self.__class__, d), None
@Appender(_index_shared_docs['copy'])
def copy(self, deep=False, name=None):
array = self._data.copy(deep=deep)
attributes = self._get_attributes_dict()
if name is not None:
attributes.update(name=name)
return self._simple_new(array, **attributes)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
with rewrite_exception('IntervalArray', self.__class__.__name__):
new_values = self.values.astype(dtype, copy=copy)
if is_interval_dtype(new_values):
return self._shallow_copy(new_values.left, new_values.right)
return super(IntervalIndex, self).astype(dtype, copy=copy)
@cache_readonly
def dtype(self):
"""Return the dtype object of the underlying data"""
return self._data.dtype
@property
def inferred_type(self):
"""Return a string of the type inferred from the values"""
return 'interval'
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we don't use an explicit engine
# so return the bytes here
return (self.left.memory_usage(deep=deep) +
self.right.memory_usage(deep=deep))
@cache_readonly
def mid(self):
"""
Return the midpoint of each Interval in the IntervalIndex as an Index
"""
return self._data.mid
@cache_readonly
def is_monotonic(self):
"""
Return True if the IntervalIndex is monotonic increasing (only equal or
increasing values), else False
"""
return self._multiindex.is_monotonic
@cache_readonly
def is_monotonic_increasing(self):
"""
Return True if the IntervalIndex is monotonic increasing (only equal or
increasing values), else False
"""
return self._multiindex.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self):
"""
Return True if the IntervalIndex is monotonic decreasing (only equal or
decreasing values), else False
"""
return self._multiindex.is_monotonic_decreasing
@cache_readonly
def is_unique(self):
"""
Return True if the IntervalIndex contains unique elements, else False
"""
return self._multiindex.is_unique
@cache_readonly
def is_non_overlapping_monotonic(self):
return self._data.is_non_overlapping_monotonic
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
return super(IntervalIndex, self)._convert_scalar_indexer(
key, kind=kind)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError
return locs
def _maybe_cast_indexed(self, key):
"""
we need to cast the key, which could be a scalar
or an array-like to the type of our subtype
"""
if isinstance(key, IntervalIndex):
return key
subtype = self.dtype.subtype
if is_float_dtype(subtype):
if is_integer(key):
key = float(key)
elif isinstance(key, (np.ndarray, Index)):
key = key.astype('float64')
elif is_integer_dtype(subtype):
if is_integer(key):
key = int(key)
return key
def _check_method(self, method):
if method is None:
return
if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:
msg = 'method {method} not yet implemented for IntervalIndex'
raise NotImplementedError(msg.format(method=method))
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError('can only get slices from an IntervalIndex if '
'bounds are non-overlapping and all monotonic '
'increasing or decreasing')
if isinstance(label, IntervalMixin):
raise NotImplementedError
# GH 20921: "not is_monotonic_increasing" for the second condition
# instead of "is_monotonic_decreasing" to account for single element
# indexes being both increasing and decreasing
if ((side == 'left' and self.left.is_monotonic_increasing) or
(side == 'right' and not self.left.is_monotonic_increasing)):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def _get_loc_only_exact_matches(self, key):
if isinstance(key, Interval):
if not self.is_unique:
raise ValueError("cannot index with a slice Interval"
" and a non-unique index")
# TODO: this expands to a tuple index, see if we can
# do better
return Index(self._multiindex.values).get_loc(key)
raise KeyError
def _find_non_overlapping_monotonic_bounds(self, key):
if isinstance(key, IntervalMixin):
start = self._searchsorted_monotonic(
key.left, 'left', exclude_label=key.open_left)
stop = self._searchsorted_monotonic(
key.right, 'right', exclude_label=key.open_right)
elif isinstance(key, slice):
# slice
start, stop = key.start, key.stop
if (key.step or 1) != 1:
raise NotImplementedError("cannot slice with a slice step")
if start is None:
start = 0
else:
start = self._searchsorted_monotonic(start, 'left')
if stop is None:
stop = len(self)
else:
stop = self._searchsorted_monotonic(stop, 'right')
else:
# scalar or index-like
start = self._searchsorted_monotonic(key, 'left')
stop = self._searchsorted_monotonic(key, 'right')
return start, stop
def get_loc(self, key, method=None):
"""Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}, optional
* default: matches where the label is within an interval only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
>>> index = pd.IntervalIndex([i1, i2])
>>> index.get_loc(1)
0
You can also supply an interval or an location for a point inside an
interval.
>>> index.get_loc(pd.Interval(0, 2))
array([0, 1], dtype=int64)
>>> index.get_loc(1.5)
1
If a label is in several intervals, you get the locations of all the
relevant intervals.
>>> i3 = pd.Interval(0, 2)
>>> overlapping_index = pd.IntervalIndex([i2, i3])
>>> overlapping_index.get_loc(1.5)
array([0, 1], dtype=int64)
"""
self._check_method(method)
original_key = key
key = self._maybe_cast_indexed(key)
if self.is_non_overlapping_monotonic:
if isinstance(key, Interval):
left = self._maybe_cast_slice_bound(key.left, 'left', None)
right = self._maybe_cast_slice_bound(key.right, 'right', None)
key = Interval(left, right, key.closed)
else:
key = self._maybe_cast_slice_bound(key, 'left', None)
start, stop = self._find_non_overlapping_monotonic_bounds(key)
if start is None or stop is None:
return slice(start, stop)
elif start + 1 == stop:
return start
elif start < stop:
return slice(start, stop)
else:
raise KeyError(original_key)
else:
# use the interval tree
if isinstance(key, Interval):
left, right = _get_interval_closed_bounds(key)
return self._engine.get_loc_interval(left, right)
else:
return self._engine.get_loc(key)
def get_value(self, series, key):
if com.is_bool_indexer(key):
loc = key
elif is_list_like(key):
loc = self.get_indexer(key)
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default step in a slice")
try:
loc = self.get_loc(key)
except TypeError:
# we didn't find exact intervals or are non-unique
msg = "unable to slice with this key: {key}".format(key=key)
raise ValueError(msg)
else:
loc = self.get_loc(key)
return series.iloc[loc]
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
self._check_method(method)
target = ensure_index(target)
target = self._maybe_cast_indexed(target)
if self.equals(target):
return np.arange(len(self), dtype='intp')
if self.is_non_overlapping_monotonic:
start, stop = self._find_non_overlapping_monotonic_bounds(target)
start_plus_one = start + 1
if not ((start_plus_one < stop).any()):
return np.where(start_plus_one == stop, start, -1)
if not self.is_unique:
raise ValueError("cannot handle non-unique indices")
# IntervalIndex
if isinstance(target, IntervalIndex):
indexer = self._get_reindexer(target)
# non IntervalIndex
else:
indexer = np.concatenate([self.get_loc(i) for i in target])
return ensure_platform_int(indexer)
def _get_reindexer(self, target):
"""
Return an indexer for a target IntervalIndex with self
"""
# find the left and right indexers
lindexer = self._engine.get_indexer(target.left.values)
rindexer = self._engine.get_indexer(target.right.values)
# we want to return an indexer on the intervals
# however, our keys could provide overlapping of multiple
# intervals, so we iterate thru the indexers and construct
# a set of indexers
indexer = []
n = len(self)
for i, (lhs, rhs) in enumerate(zip(lindexer, rindexer)):
target_value = target[i]
# matching on the lhs bound
if (lhs != -1 and
self.closed == 'right' and
target_value.left == self[lhs].right):
lhs += 1
# matching on the lhs bound
if (rhs != -1 and
self.closed == 'left' and
target_value.right == self[rhs].left):
rhs -= 1
# not found
if lhs == -1 and rhs == -1:
indexer.append(np.array([-1]))
elif rhs == -1:
indexer.append(np.arange(lhs, n))
elif lhs == -1:
# care about left/right closed here
value = self[i]
# target.closed same as self.closed
if self.closed == target.closed:
if target_value.left < value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'left'
elif self.closed == 'right':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'right'
elif self.closed == 'left':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
indexer.append(np.arange(0, rhs + 1))
else:
indexer.append(np.arange(lhs, rhs + 1))
return np.concatenate(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = self._maybe_cast_indexed(ensure_index(target))
return super(IntervalIndex, self).get_indexer_non_unique(target)
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy(values)
def delete(self, loc):
"""
Return a new IntervalIndex with passed location(-s) deleted
Returns
-------
new_index : IntervalIndex
"""
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
"""
Return a new IntervalIndex inserting new item at location. Follows
Python list.append semantics for negative values. Only Interval
objects and NA can be inserted into an IntervalIndex
Parameters
----------
loc : int
item : object
Returns
-------
new_index : IntervalIndex
"""
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError('inserted item must be closed on the same '
'side as the index')
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
# GH 18295
left_insert = right_insert = item
else:
raise ValueError('can only insert Interval objects and NA into '
'an IntervalIndex')
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
def _as_like_interval_index(self, other):
self._assert_can_do_setop(other)
other = ensure_index(other)
if not isinstance(other, IntervalIndex):
msg = ('the other index needs to be an IntervalIndex too, but '
'was type {}').format(other.__class__.__name__)
raise TypeError(msg)
elif self.closed != other.closed:
msg = ('can only do set operations between two IntervalIndex '
'objects that are closed on the same side')
raise ValueError(msg)
return other
def _concat_same_dtype(self, to_concat, name):
"""
assert that we all have the same .closed
we allow a 0-len index here as well
"""
if not len({i.closed for i in to_concat if len(i)}) == 1:
msg = ('can only append two IntervalIndex objects '
'that are closed on the same side')
raise ValueError(msg)
return super(IntervalIndex, self)._concat_same_dtype(to_concat, name)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
result = self._data.take(indices, axis=axis, allow_fill=allow_fill,
fill_value=fill_value, **kwargs)
attributes = self._get_attributes_dict()
return self._simple_new(result, **attributes)
def __getitem__(self, value):
result = self._data[value]
if isinstance(result, IntervalArray):
return self._shallow_copy(result)
else:
# scalar
return result
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
from pandas.io.formats.format import IntervalArrayFormatter
return IntervalArrayFormatter(values=self,
na_rep=na_rep,
justify='all').get_result()
def _format_data(self, name=None):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option(
'display.max_seq_items') or n) // 10, 10)
formatter = str
if n == 0:
summary = '[]'
elif n == 1:
first = formatter(self[0])
summary = '[{first}]'.format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[{first}, {last}]'.format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = '[{head} ... {tail}]'.format(
head=', '.join(head), tail=', '.join(tail))
else:
tail = [formatter(x) for x in self]
summary = '[{tail}]'.format(tail=', '.join(tail))
return summary + ',' + self._format_space()
def _format_attrs(self):
attrs = [('closed', repr(self.closed))]
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
attrs.append(('dtype', "'{dtype}'".format(dtype=self.dtype)))
return attrs
def _format_space(self):
space = ' ' * (len(self.__class__.__name__) + 1)
return "\n{space}".format(space=space)
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
"""
Determines if two IntervalIndex objects contain the same elements
"""
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(getattr(other, '.values', other))
return (self.left.equals(other.left) and
self.right.equals(other.right) and
self.closed == other.closed)
def _setop(op_name):
def func(self, other):
other = self._as_like_interval_index(other)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
msg = ('can only do {op} between two IntervalIndex '
'objects that have compatible dtypes')
raise TypeError(msg.format(op=op_name))
result = getattr(self._multiindex, op_name)(other._multiindex)
result_name = self.name if self.name == other.name else None
# GH 19101: ensure empty results have correct dtype
if result.empty:
result = result.values.astype(self.dtype.subtype)
else:
result = result.values
return type(self).from_tuples(result, closed=self.closed,
name=result_name)
return func
union = _setop('union')
intersection = _setop('intersection')
difference = _setop('difference')
symmetric_difference = _setop('symmetric_difference')
# TODO: arithmetic operations
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint):
"""helper for interval_range to check if start/end are valid types"""
return any([is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None])
def _is_type_compatible(a, b):
"""helper for interval_range to check type compat of start/end/freq"""
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return ((is_number(a) and is_number(b)) or
(is_ts_compat(a) and is_ts_compat(b)) or
(is_td_compat(a) and is_td_compat(b)) or
com._any_none(a, b))
def interval_range(start=None, end=None, periods=None, freq=None,
name=None, closed='right'):
"""
Return a fixed frequency IntervalIndex
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals
end : numeric or datetime-like, default None
Right bound for generating intervals
periods : integer, default None
Number of periods to generate
freq : numeric, string, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : string, default None
Name of the resulting IntervalIndex
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : IntervalIndex
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]]
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]]
closed='right', dtype='interval[datetime64[ns]]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
closed='right',
dtype='interval[float64]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]]
closed='both', dtype='interval[int64]')
See Also
--------
IntervalIndex : an Index of intervals that are all closed on the same side.
"""
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com._any_none(periods, start, end):
freq = 1 if is_number(endpoint) else 'D'
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, and '
'freq, exactly three must be specified')
if not _is_valid_endpoint(start):
msg = 'start must be numeric or datetime-like, got {start}'
raise ValueError(msg.format(start=start))
elif not _is_valid_endpoint(end):
msg = 'end must be numeric or datetime-like, got {end}'
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError('freq must be numeric or convertible to '
'DateOffset, got {freq}'.format(freq=freq))
# verify type compatibility
if not all([_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq)]):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com._all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com._not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, 'int64')
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
| 34.543422 | 84 | 0.599218 | import textwrap
import warnings
import numpy as np
from pandas.compat import add_metaclass
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_platform_int,
is_list_like,
is_datetime_or_timedelta_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_float_dtype,
is_interval_dtype,
is_object_dtype,
is_scalar,
is_float,
is_number,
is_integer)
from pandas.core.indexes.base import (
Index, ensure_index,
default_pprint, _index_shared_docs)
from pandas._libs import Timestamp, Timedelta
from pandas._libs.interval import (
Interval, IntervalMixin, IntervalTree,
)
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.indexes.multi import MultiIndex
import pandas.core.common as com
from pandas.util._decorators import cache_readonly, Appender
from pandas.util._doctools import _WritableDoc
from pandas.util._exceptions import rewrite_exception
from pandas.core.config import get_option
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
import pandas.core.indexes.base as ibase
from pandas.core.arrays.interval import (IntervalArray,
_interval_shared_docs)
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
target_klass='IntervalIndex or list of Intervals',
name=textwrap.dedent("""\
name : object, optional
to be stored in the index.
"""),
))
def _get_next_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_prev_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_interval_closed_bounds(interval):
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
def _new_IntervalIndex(cls, d):
return cls.from_arrays(**d)
@Appender(_interval_shared_docs['class'] % dict(
klass="IntervalIndex",
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs['name'],
versionadded="0.20.0",
extra_methods="contains\n",
examples=textwrap.dedent("""\
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
closed='right', dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""),
))
@add_metaclass(_WritableDoc)
class IntervalIndex(IntervalMixin, Index):
_typ = 'intervalindex'
_comparables = ['name']
_attributes = ['name', 'closed']
_defer_to_indexing = True
_mask = None
def __new__(cls, data, closed=None, dtype=None, copy=False,
name=None, verify_integrity=True):
if name is None and hasattr(data, 'name'):
name = data.name
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype,
verify_integrity=verify_integrity)
return cls._simple_new(array, name)
@classmethod
def _simple_new(cls, array, name, closed=None):
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, left=None, right=None, **kwargs):
result = self._data._shallow_copy(left=left, right=right)
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(result, **attributes)
@cache_readonly
def _isnan(self):
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
return IntervalTree(self.left, self.right, closed=self.closed)
def __contains__(self, key):
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
def contains(self, key):
try:
self.get_loc(key)
return True
except KeyError:
return False
@classmethod
@Appender(_interval_shared_docs['from_breaks'] % _index_doc_kwargs)
def from_breaks(cls, breaks, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_arrays'] % _index_doc_kwargs)
def from_arrays(cls, left, right, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(left, right, closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_intervals'] % _index_doc_kwargs)
def from_intervals(cls, data, closed=None, name=None, copy=False,
dtype=None):
msg = ('IntervalIndex.from_intervals is deprecated and will be '
'removed in a future version; Use IntervalIndex(...) instead')
warnings.warn(msg, FutureWarning, stacklevel=2)
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype)
if name is None and isinstance(data, cls):
name = data.name
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_tuples'] % _index_doc_kwargs)
def from_tuples(cls, data, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(arr, name=name)
@Appender(_interval_shared_docs['to_tuples'] % dict(
return_type="Index",
examples="""
Examples
--------
>>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])
>>> idx.to_tuples()
Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')
>>> idx.to_tuples(na_tuple=False)
Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""",
))
def to_tuples(self, na_tuple=True):
tuples = self._data.to_tuples(na_tuple=na_tuple)
return Index(tuples)
@cache_readonly
def _multiindex(self):
return MultiIndex.from_arrays([self.left, self.right],
names=['left', 'right'])
@property
def left(self):
return self._data._left
@property
def right(self):
return self._data._right
@property
def closed(self):
return self._data._closed
@Appender(_interval_shared_docs['set_closed'] % _index_doc_kwargs)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
raise ValueError(msg.format(closed=closed))
array = self._data.set_closed(closed)
return self._simple_new(array, self.name)
@property
def length(self):
return self._data.length
@property
def size(self):
return self._data.size
@property
def shape(self):
return self._data.shape
@property
def itemsize(self):
msg = ('IntervalIndex.itemsize is deprecated and will be removed in '
'a future version')
warnings.warn(msg, FutureWarning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return self.left.itemsize + self.right.itemsize
def __len__(self):
return len(self.left)
@cache_readonly
def values(self):
return self._data
@cache_readonly
def _values(self):
return self._data
@cache_readonly
def _ndarray_values(self):
return np.array(self._data)
def __array__(self, result=None):
return self._ndarray_values
def __array_wrap__(self, result, context=None):
return result
def __reduce__(self):
d = dict(left=self.left,
right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (self.__class__, d), None
@Appender(_index_shared_docs['copy'])
def copy(self, deep=False, name=None):
array = self._data.copy(deep=deep)
attributes = self._get_attributes_dict()
if name is not None:
attributes.update(name=name)
return self._simple_new(array, **attributes)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
with rewrite_exception('IntervalArray', self.__class__.__name__):
new_values = self.values.astype(dtype, copy=copy)
if is_interval_dtype(new_values):
return self._shallow_copy(new_values.left, new_values.right)
return super(IntervalIndex, self).astype(dtype, copy=copy)
@cache_readonly
def dtype(self):
return self._data.dtype
@property
def inferred_type(self):
return 'interval'
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we don't use an explicit engine
return (self.left.memory_usage(deep=deep) +
self.right.memory_usage(deep=deep))
@cache_readonly
def mid(self):
return self._data.mid
@cache_readonly
def is_monotonic(self):
return self._multiindex.is_monotonic
@cache_readonly
def is_monotonic_increasing(self):
return self._multiindex.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self):
return self._multiindex.is_monotonic_decreasing
@cache_readonly
def is_unique(self):
return self._multiindex.is_unique
@cache_readonly
def is_non_overlapping_monotonic(self):
return self._data.is_non_overlapping_monotonic
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
return super(IntervalIndex, self)._convert_scalar_indexer(
key, kind=kind)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
locs = self.get_indexer_for(keyarr)
if (locs == -1).any():
raise KeyError
return locs
def _maybe_cast_indexed(self, key):
if isinstance(key, IntervalIndex):
return key
subtype = self.dtype.subtype
if is_float_dtype(subtype):
if is_integer(key):
key = float(key)
elif isinstance(key, (np.ndarray, Index)):
key = key.astype('float64')
elif is_integer_dtype(subtype):
if is_integer(key):
key = int(key)
return key
def _check_method(self, method):
if method is None:
return
if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:
msg = 'method {method} not yet implemented for IntervalIndex'
raise NotImplementedError(msg.format(method=method))
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError('can only get slices from an IntervalIndex if '
'bounds are non-overlapping and all monotonic '
'increasing or decreasing')
if isinstance(label, IntervalMixin):
raise NotImplementedError
if ((side == 'left' and self.left.is_monotonic_increasing) or
(side == 'right' and not self.left.is_monotonic_increasing)):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def _get_loc_only_exact_matches(self, key):
if isinstance(key, Interval):
if not self.is_unique:
raise ValueError("cannot index with a slice Interval"
" and a non-unique index")
return Index(self._multiindex.values).get_loc(key)
raise KeyError
def _find_non_overlapping_monotonic_bounds(self, key):
if isinstance(key, IntervalMixin):
start = self._searchsorted_monotonic(
key.left, 'left', exclude_label=key.open_left)
stop = self._searchsorted_monotonic(
key.right, 'right', exclude_label=key.open_right)
elif isinstance(key, slice):
start, stop = key.start, key.stop
if (key.step or 1) != 1:
raise NotImplementedError("cannot slice with a slice step")
if start is None:
start = 0
else:
start = self._searchsorted_monotonic(start, 'left')
if stop is None:
stop = len(self)
else:
stop = self._searchsorted_monotonic(stop, 'right')
else:
start = self._searchsorted_monotonic(key, 'left')
stop = self._searchsorted_monotonic(key, 'right')
return start, stop
def get_loc(self, key, method=None):
self._check_method(method)
original_key = key
key = self._maybe_cast_indexed(key)
if self.is_non_overlapping_monotonic:
if isinstance(key, Interval):
left = self._maybe_cast_slice_bound(key.left, 'left', None)
right = self._maybe_cast_slice_bound(key.right, 'right', None)
key = Interval(left, right, key.closed)
else:
key = self._maybe_cast_slice_bound(key, 'left', None)
start, stop = self._find_non_overlapping_monotonic_bounds(key)
if start is None or stop is None:
return slice(start, stop)
elif start + 1 == stop:
return start
elif start < stop:
return slice(start, stop)
else:
raise KeyError(original_key)
else:
if isinstance(key, Interval):
left, right = _get_interval_closed_bounds(key)
return self._engine.get_loc_interval(left, right)
else:
return self._engine.get_loc(key)
def get_value(self, series, key):
if com.is_bool_indexer(key):
loc = key
elif is_list_like(key):
loc = self.get_indexer(key)
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default step in a slice")
try:
loc = self.get_loc(key)
except TypeError:
msg = "unable to slice with this key: {key}".format(key=key)
raise ValueError(msg)
else:
loc = self.get_loc(key)
return series.iloc[loc]
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
self._check_method(method)
target = ensure_index(target)
target = self._maybe_cast_indexed(target)
if self.equals(target):
return np.arange(len(self), dtype='intp')
if self.is_non_overlapping_monotonic:
start, stop = self._find_non_overlapping_monotonic_bounds(target)
start_plus_one = start + 1
if not ((start_plus_one < stop).any()):
return np.where(start_plus_one == stop, start, -1)
if not self.is_unique:
raise ValueError("cannot handle non-unique indices")
# IntervalIndex
if isinstance(target, IntervalIndex):
indexer = self._get_reindexer(target)
# non IntervalIndex
else:
indexer = np.concatenate([self.get_loc(i) for i in target])
return ensure_platform_int(indexer)
def _get_reindexer(self, target):
# find the left and right indexers
lindexer = self._engine.get_indexer(target.left.values)
rindexer = self._engine.get_indexer(target.right.values)
# we want to return an indexer on the intervals
# however, our keys could provide overlapping of multiple
# intervals, so we iterate thru the indexers and construct
# a set of indexers
indexer = []
n = len(self)
for i, (lhs, rhs) in enumerate(zip(lindexer, rindexer)):
target_value = target[i]
# matching on the lhs bound
if (lhs != -1 and
self.closed == 'right' and
target_value.left == self[lhs].right):
lhs += 1
# matching on the lhs bound
if (rhs != -1 and
self.closed == 'left' and
target_value.right == self[rhs].left):
rhs -= 1
# not found
if lhs == -1 and rhs == -1:
indexer.append(np.array([-1]))
elif rhs == -1:
indexer.append(np.arange(lhs, n))
elif lhs == -1:
# care about left/right closed here
value = self[i]
# target.closed same as self.closed
if self.closed == target.closed:
if target_value.left < value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'left'
elif self.closed == 'right':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'right'
elif self.closed == 'left':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
indexer.append(np.arange(0, rhs + 1))
else:
indexer.append(np.arange(lhs, rhs + 1))
return np.concatenate(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = self._maybe_cast_indexed(ensure_index(target))
return super(IntervalIndex, self).get_indexer_non_unique(target)
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy(values)
def delete(self, loc):
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError('inserted item must be closed on the same '
'side as the index')
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
# GH 18295
left_insert = right_insert = item
else:
raise ValueError('can only insert Interval objects and NA into '
'an IntervalIndex')
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
def _as_like_interval_index(self, other):
self._assert_can_do_setop(other)
other = ensure_index(other)
if not isinstance(other, IntervalIndex):
msg = ('the other index needs to be an IntervalIndex too, but '
'was type {}').format(other.__class__.__name__)
raise TypeError(msg)
elif self.closed != other.closed:
msg = ('can only do set operations between two IntervalIndex '
'objects that are closed on the same side')
raise ValueError(msg)
return other
def _concat_same_dtype(self, to_concat, name):
if not len({i.closed for i in to_concat if len(i)}) == 1:
msg = ('can only append two IntervalIndex objects '
'that are closed on the same side')
raise ValueError(msg)
return super(IntervalIndex, self)._concat_same_dtype(to_concat, name)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
result = self._data.take(indices, axis=axis, allow_fill=allow_fill,
fill_value=fill_value, **kwargs)
attributes = self._get_attributes_dict()
return self._simple_new(result, **attributes)
def __getitem__(self, value):
result = self._data[value]
if isinstance(result, IntervalArray):
return self._shallow_copy(result)
else:
# scalar
return result
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
from pandas.io.formats.format import IntervalArrayFormatter
return IntervalArrayFormatter(values=self,
na_rep=na_rep,
justify='all').get_result()
def _format_data(self, name=None):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option(
'display.max_seq_items') or n) // 10, 10)
formatter = str
if n == 0:
summary = '[]'
elif n == 1:
first = formatter(self[0])
summary = '[{first}]'.format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[{first}, {last}]'.format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = '[{head} ... {tail}]'.format(
head=', '.join(head), tail=', '.join(tail))
else:
tail = [formatter(x) for x in self]
summary = '[{tail}]'.format(tail=', '.join(tail))
return summary + ',' + self._format_space()
def _format_attrs(self):
attrs = [('closed', repr(self.closed))]
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
attrs.append(('dtype', "'{dtype}'".format(dtype=self.dtype)))
return attrs
def _format_space(self):
space = ' ' * (len(self.__class__.__name__) + 1)
return "\n{space}".format(space=space)
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(getattr(other, '.values', other))
return (self.left.equals(other.left) and
self.right.equals(other.right) and
self.closed == other.closed)
def _setop(op_name):
def func(self, other):
other = self._as_like_interval_index(other)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
msg = ('can only do {op} between two IntervalIndex '
'objects that have compatible dtypes')
raise TypeError(msg.format(op=op_name))
result = getattr(self._multiindex, op_name)(other._multiindex)
result_name = self.name if self.name == other.name else None
# GH 19101: ensure empty results have correct dtype
if result.empty:
result = result.values.astype(self.dtype.subtype)
else:
result = result.values
return type(self).from_tuples(result, closed=self.closed,
name=result_name)
return func
union = _setop('union')
intersection = _setop('intersection')
difference = _setop('difference')
symmetric_difference = _setop('symmetric_difference')
# TODO: arithmetic operations
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint):
return any([is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None])
def _is_type_compatible(a, b):
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return ((is_number(a) and is_number(b)) or
(is_ts_compat(a) and is_ts_compat(b)) or
(is_td_compat(a) and is_td_compat(b)) or
com._any_none(a, b))
def interval_range(start=None, end=None, periods=None, freq=None,
name=None, closed='right'):
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com._any_none(periods, start, end):
freq = 1 if is_number(endpoint) else 'D'
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, and '
'freq, exactly three must be specified')
if not _is_valid_endpoint(start):
msg = 'start must be numeric or datetime-like, got {start}'
raise ValueError(msg.format(start=start))
elif not _is_valid_endpoint(end):
msg = 'end must be numeric or datetime-like, got {end}'
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError('freq must be numeric or convertible to '
'DateOffset, got {freq}'.format(freq=freq))
# verify type compatibility
if not all([_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq)]):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com._all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com._not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, 'int64')
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
| true | true |
f72f87d18502022289e2800971f93cfb71acdce7 | 50 | py | Python | gateway/__init__.py | RockefellerArchiveCenter/ProjectElectronAPIGateway | 4a35c24fe5c09df4f849d1f74b45af5a49c61ac6 | [
"MIT"
] | null | null | null | gateway/__init__.py | RockefellerArchiveCenter/ProjectElectronAPIGateway | 4a35c24fe5c09df4f849d1f74b45af5a49c61ac6 | [
"MIT"
] | null | null | null | gateway/__init__.py | RockefellerArchiveCenter/ProjectElectronAPIGateway | 4a35c24fe5c09df4f849d1f74b45af5a49c61ac6 | [
"MIT"
] | null | null | null | default_app_config = 'gateway.apps.GatewayConfig'
| 25 | 49 | 0.84 | default_app_config = 'gateway.apps.GatewayConfig'
| true | true |
f72f8822d3d4ac66a54db0bfc296d706ac05a14c | 1,219 | py | Python | setup.py | duyyudus/iconviet-lottery | ade157e050a6dac468a300d37c0c67a1e92e5d30 | [
"MIT"
] | null | null | null | setup.py | duyyudus/iconviet-lottery | ade157e050a6dac468a300d37c0c67a1e92e5d30 | [
"MIT"
] | null | null | null | setup.py | duyyudus/iconviet-lottery | ade157e050a6dac468a300d37c0c67a1e92e5d30 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
# All dependences
deps = {
'test': [],
'dev': ['iconsdk', 'tbears', 'pylint', 'autopep8', 'rope', 'black',],
}
install_requires = []
extra_requires = deps
test_requires = deps['test']
with open('README.adoc') as readme_file:
long_description = readme_file.read()
setup(
name='megaloop_lottery',
version='0.0.1',
description='A simple and incentived lottery Dapp on ICON network',
long_description=long_description,
long_description_content_type='text/asciidoc',
author='duyyudus',
author_email='duyyudus@gmail.com',
url='https://github.com/duyyudus/megaloop-lottery',
include_package_data=True,
tests_require=test_requires,
install_requires=install_requires,
extras_require=extra_requires,
license='MIT',
zip_safe=False,
keywords='Lottery Dapp',
python_requires='>=3.6',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux',
],
)
| 28.348837 | 73 | 0.660377 |
from setuptools import setup, find_packages
deps = {
'test': [],
'dev': ['iconsdk', 'tbears', 'pylint', 'autopep8', 'rope', 'black',],
}
install_requires = []
extra_requires = deps
test_requires = deps['test']
with open('README.adoc') as readme_file:
long_description = readme_file.read()
setup(
name='megaloop_lottery',
version='0.0.1',
description='A simple and incentived lottery Dapp on ICON network',
long_description=long_description,
long_description_content_type='text/asciidoc',
author='duyyudus',
author_email='duyyudus@gmail.com',
url='https://github.com/duyyudus/megaloop-lottery',
include_package_data=True,
tests_require=test_requires,
install_requires=install_requires,
extras_require=extra_requires,
license='MIT',
zip_safe=False,
keywords='Lottery Dapp',
python_requires='>=3.6',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux',
],
)
| true | true |
f72f888a64c2817014cb56a0375b9b4d1a93dade | 749 | py | Python | try.py | charliezjw/Neural-Signal-Decoder | fb0df09ba0314724c7c90141bd47cc8fb0201b7a | [
"MIT"
] | null | null | null | try.py | charliezjw/Neural-Signal-Decoder | fb0df09ba0314724c7c90141bd47cc8fb0201b7a | [
"MIT"
] | null | null | null | try.py | charliezjw/Neural-Signal-Decoder | fb0df09ba0314724c7c90141bd47cc8fb0201b7a | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
# a = tf.placeholder(tf.int32, [None, 3])
#
# b = tf.convert_to_tensor(tf.argmax(tf.bincount(a[0])))
# b = tf.stack([b, tf.argmax(tf.bincount(a[1]))], 0)
# for i in range(2, 5):
# max_indx = tf.argmax(tf.bincount(a[i]))
# b = tf.concat([b, [max_indx]], 0)
#
# with tf.Session() as sess:
# t1 = np.asarray([[1, 1, 0], [2, 4, 4], [6, 6, 6], [5, 5, 5], [2, 7, 7]])
# t2, t3 = sess.run([b, max_indx], feed_dict={a: t1})
# print(t2)
# print(t3)
a = np.asarray(np.asarray([[1, 1, 0], [2, 4, 4], [6, 6, 6], [5, 5, 5], [2, 7, 7]]))
b = np.zeros(a.shape[0])
c = np.asarray([1, 4, 6, 7, 9])
for i in range(a.shape[0]):
b[i] = np.argmax(np.bincount(a[i]))
print(np.mean(np.equal(b, c))) | 31.208333 | 83 | 0.542056 | import numpy as np
import tensorflow as tf
a = np.asarray(np.asarray([[1, 1, 0], [2, 4, 4], [6, 6, 6], [5, 5, 5], [2, 7, 7]]))
b = np.zeros(a.shape[0])
c = np.asarray([1, 4, 6, 7, 9])
for i in range(a.shape[0]):
b[i] = np.argmax(np.bincount(a[i]))
print(np.mean(np.equal(b, c))) | true | true |
f72f896b8a657f3551e24d80c0f44854acb5a54c | 666 | py | Python | pipeline/compilers/livescript.py | jnis77diver/django-pipeline | 8bac57adae84615d9d79ad19b2b591c2e46879f9 | [
"MIT"
] | 1 | 2019-10-20T02:58:27.000Z | 2019-10-20T02:58:27.000Z | pipeline/compilers/livescript.py | jnis77diver/django-pipeline | 8bac57adae84615d9d79ad19b2b591c2e46879f9 | [
"MIT"
] | 1 | 2021-09-20T22:02:21.000Z | 2021-09-21T13:55:41.000Z | pipeline/compilers/livescript.py | jnis77diver/django-pipeline | 8bac57adae84615d9d79ad19b2b591c2e46879f9 | [
"MIT"
] | 1 | 2021-09-18T01:39:48.000Z | 2021-09-18T01:39:48.000Z | from __future__ import unicode_literals
from pipeline.conf import settings
from pipeline.compilers import SubProcessCompiler
class LiveScriptCompiler(SubProcessCompiler):
output_extension = 'js'
def match_file(self, path):
return path.endswith('.ls')
def compile_file(self, infile, outfile, outdated=False, force=False):
if not outdated and not force:
return # File doesn't need to be recompiled
command = (
settings.LIVE_SCRIPT_BINARY,
"-cp",
settings.LIVE_SCRIPT_ARGUMENTS,
infile,
)
return self.execute_command(command, stdout_captured=outfile)
| 28.956522 | 73 | 0.672673 | from __future__ import unicode_literals
from pipeline.conf import settings
from pipeline.compilers import SubProcessCompiler
class LiveScriptCompiler(SubProcessCompiler):
output_extension = 'js'
def match_file(self, path):
return path.endswith('.ls')
def compile_file(self, infile, outfile, outdated=False, force=False):
if not outdated and not force:
return
command = (
settings.LIVE_SCRIPT_BINARY,
"-cp",
settings.LIVE_SCRIPT_ARGUMENTS,
infile,
)
return self.execute_command(command, stdout_captured=outfile)
| true | true |
f72f8af5b3ccf2010b8feadf774b09fd508c9661 | 32,775 | py | Python | WhoopClient.py | lcintron/WhoopClient | 46ccc6c3e3b98f4b6c82cf8938056d72a22bd6b6 | [
"MIT"
] | null | null | null | WhoopClient.py | lcintron/WhoopClient | 46ccc6c3e3b98f4b6c82cf8938056d72a22bd6b6 | [
"MIT"
] | null | null | null | WhoopClient.py | lcintron/WhoopClient | 46ccc6c3e3b98f4b6c82cf8938056d72a22bd6b6 | [
"MIT"
] | null | null | null | import requests
import pandas as pd
import numpy as np
import configparser
from datetime import datetime
from dateutil import relativedelta, parser, rrule
from dateutil.rrule import WEEKLY
class WhoopClient:
'''A class to allow a user to login and store their authorization code,
then perform pulls using the code in order to access different types of data'''
def __init__(self,
auth_code=None,
whoop_id=None,
current_datetime=datetime.utcnow()):
self.auth_code = auth_code
self.whoop_id = whoop_id
self.current_datetime = current_datetime
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def reset(self):
self.auth_code = None
self.whoop_id = None
self.current_datetime = datetime.utcnow()
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def pull_api(self, url, df=False):
auth_code = self.auth_code
headers = {'authorization': auth_code}
pull = requests.get(url, headers=headers)
if pull.status_code == 200 and len(pull.content) > 1:
if df:
d = pd.json_normalize(pull.json())
return d
else:
return pull.json()
else:
return "no response"
def pull_sleep_main(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
main_df = pd.json_normalize(sleep)
return main_df
def pull_sleep_events(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
events_df = pd.json_normalize(sleep['events'])
events_df['id'] = sleep_id
return events_df
def get_authorization(self, user_ini):
'''
Function to get the authorization token and user id.
This must be completed before a user can query the api
'''
config = configparser.ConfigParser()
config.read(user_ini)
username = config['whoop']['username']
password = config['whoop']['password']
headers = {
"username": username,
"password": password,
"grant_type": "password",
"issueRefresh": False
}
auth = requests.post("https://api-7.whoop.com/oauth/token",
json=headers)
if auth.status_code == 200:
content = auth.json()
user_id = content['user']['id']
token = content['access_token']
start_time = content['user']['profile']['createdAt']
self.whoop_id = user_id
self.auth_code = 'bearer ' + token
self.start_datetime = start_time
print("Whoop: Authentication successful")
else:
print(
"Authentication failed - please double check your credentials")
def get_keydata_all(self):
'''
This function returns a dataframe of WHOOP metrics for each day of WHOOP membership.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
if self.start_datetime:
if self.all_data is not None:
## All data already pulled
return self.all_data
else:
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
all_data = pd.DataFrame()
for dates in date_range:
cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(
self.whoop_id, dates[1], dates[0])
data = self.pull_api(cycle_url, df=True)
all_data = pd.concat([all_data, data])
all_data.reset_index(drop=True, inplace=True)
## fixing the day column so it's not a list
all_data['days'] = all_data['days'].map(lambda d: d[0])
all_data.rename(columns={"days": 'day'}, inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols = [
'qualityDuration', 'needBreakdown.baseline',
'needBreakdown.debt', 'needBreakdown.naps',
'needBreakdown.strain', 'needBreakdown.total'
]
for sleep_col in sleep_cols:
all_data['sleep.' + sleep_col] = all_data[
'sleep.' + sleep_col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
## Making nap variable
all_data['nap_duration'] = all_data['sleep.naps'].apply(
lambda x: x[0]['qualityDuration'] / 60000
if len(x) == 1 else (sum([
y['qualityDuration'] for y in x
if y['qualityDuration'] is not None
]) / 60000 if len(x) > 1 else 0))
all_data.drop(['sleep.naps'], axis=1, inplace=True)
## dropping duplicates subsetting because of list columns
all_data.drop_duplicates(subset=['day', 'sleep.id'],
inplace=True)
self.all_data = all_data
return all_data
else:
print("Please run the authorization function first")
def get_activities_all(self):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe'''
if self.sport_dict:
sport_dict = self.sport_dict
else:
sports = self.pull_api('https://api-7.whoop.com/sports')
sport_dict = {sport['id']: sport['name'] for sport in sports}
self.sport_dict = self.sport_dict
if self.start_datetime:
## process activity data
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull all data to process activities
data = self.get_keydata_all()
## now process activities data
act_data = pd.json_normalize(
data[data['strain.workouts'].apply(len) > 0]
['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper', 'during.lower'
]] = act_data[['during.upper',
'during.lower']].apply(pd.to_datetime)
act_data['total_minutes'] = act_data.apply(
lambda x:
(x['during.upper'] - x['during.lower']).total_seconds() / 60.0,
axis=1)
for z in range(0, 6):
act_data['zone{}_minutes'.format(
z + 1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)
act_data['sport_name'] = act_data.sportId.apply(
lambda x: sport_dict[x])
act_data['day'] = act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities = act_data
return act_data
else:
print("Whoop: Please run the authorization function first")
def get_sleep_all(self):
'''
This function returns all sleep metrics in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents one night of sleep
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep is not None:
## All sleep data already pulled
return self.all_sleep
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
all_sleep = pd.DataFrame()
for s in sleep_list:
m = self.pull_sleep_main(s)
all_sleep = pd.concat([all_sleep, m])
## Cleaning sleep data
sleep_update = [
'qualityDuration', 'latency', 'debtPre', 'debtPost',
'needFromStrain', 'sleepNeed', 'habitualSleepNeed',
'timeInBed', 'lightSleepDuration', 'slowWaveSleepDuration',
'remSleepDuration', 'wakeDuration', 'arousalTime',
'noDataDuration', 'creditFromNaps', 'projectedSleep'
]
for col in sleep_update:
all_sleep[col] = all_sleep[col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
all_sleep.drop(['during.bounds'], axis=1, inplace=True)
self.all_sleep = all_sleep.copy(deep=True)
all_sleep.drop(['events'], axis=1, inplace=True)
return all_sleep
else:
print("Whoop: Please run the authorization function first")
def get_sleep_events_all(self):
'''
This function returns all sleep events in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents an individual sleep event within an individual night of sleep.
Sleep events can be joined against the sleep or main datasets by sleep id.
All sleep times are returned in minutes.
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep_events is not None:
## All sleep data already pulled
return self.all_sleep_events
else:
if self.all_sleep is not None:
sleep_events = self.all_sleep[['activityId', 'events']]
all_sleep_events = pd.concat([
pd.concat([
pd.json_normalize(events),
pd.DataFrame({'id': len(events) * [sleep]})
],
axis=1) for events, sleep in
zip(sleep_events['events'], sleep_events['activityId'])
])
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [
int(x) for x in sleep_ids if pd.isna(x) == False
]
all_sleep_events = pd.DataFrame()
for s in sleep_list:
events = self.pull_sleep_events(s)
all_sleep_events = pd.concat(
[all_sleep_events, events])
## Cleaning sleep events data
all_sleep_events['during.lower'] = pd.to_datetime(
all_sleep_events['during.lower'])
all_sleep_events['during.upper'] = pd.to_datetime(
all_sleep_events['during.upper'])
all_sleep_events.drop(['during.bounds'], axis=1, inplace=True)
all_sleep_events['total_minutes'] = all_sleep_events.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
self.all_sleep_events = all_sleep_events
return all_sleep_events
else:
print("Whoop: Please run the authorization function first")
#returnTYpe = df, json
def get_hr_all(self, returnType=None):
'''
This function will pull every heart rate measurement recorded for the life of WHOOP membership.
The default return for this function is a list of lists, where each "row" contains the date, time, and hr value.
The measurements are spaced out every ~6 seconds on average.
To return a dataframe, set df=True. This will take a bit longer, but will return a data frame.
NOTE: This api pull takes about 6 seconds per week of data ... or 1 minutes for 10 weeks of data,
so be careful when you pull, it may take a while.
'''
if self.start_datetime:
athlete_id = self.whoop_id
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d + relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
hr_list = []
for dates in date_range:
start = dates[0]
end = dates[1]
ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(
athlete_id, end, start)
hr_vals = self.pull_api(ul)['values']
hr_values = [[
datetime.utcfromtimestamp(h['time'] / 1e3).date(),
datetime.utcfromtimestamp(h['time'] / 1e3).time(),
h['data']
] for h in hr_vals]
hr_list.extend(hr_values)
if returnType == "df":
hr_df = pd.DataFrame(hr_list)
hr_df.columns = ['date', 'time', 'hr']
return hr_df
elif returnType == "json":
hr_json = [{
'datetime': str(h[0]) + 'T' + str(h[1]),
'hr': h[2]
} for h in hr_list]
return hr_json
else:
return hr_list
else:
print("Please run the authorization function first")
def get_keydata_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
'''
This function returns a dataframe of WHOOP metrics for each day in a specified time period.
To use this function, provide a start and end date in string format as follows "YYYY-MM-DD".
If no end date is specified, it will default to today's date.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print(
"Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=e,
dtstart=st)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals if d <= e]
time_data = pd.DataFrame()
for dates in date_range:
cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(
self.whoop_id, dates[1], dates[0])
data = self.pull_api(cycle_url, df=True)
time_data = pd.concat([time_data, data])
time_data.reset_index(drop=True, inplace=True)
## fixing the day column so it's not a list
time_data['days'] = time_data['days'].map(lambda d: d[0])
time_data.rename(columns={"days": 'day'}, inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols = [
'qualityDuration', 'needBreakdown.baseline',
'needBreakdown.debt', 'needBreakdown.naps',
'needBreakdown.strain', 'needBreakdown.total'
]
for sleep_col in sleep_cols:
time_data['sleep.' + sleep_col] = time_data[
'sleep.' + sleep_col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
## Making nap variable
time_data['nap_duration'] = time_data['sleep.naps'].apply(
lambda x: x[0]['qualityDuration'] / 60000
if len(x) == 1 else (sum([
y['qualityDuration'] for y in x
if y['qualityDuration'] is not None
]) / 60000 if len(x) > 1 else 0))
time_data.drop(['sleep.naps'], axis=1, inplace=True)
## removing duplicates
time_data.drop_duplicates(subset=['day', 'sleep.id'],
inplace=True)
return time_data
else:
print("Whoop: Please run the authorization function first")
def get_activities_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe
If no end date is specified, it will default to today's date.
'''
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print(
"Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.sport_dict:
sport_dict = self.sport_dict
else:
sports = self.pull_api('https://api-7.whoop.com/sports')
sport_dict = {
sport['id']: sport['name']
for sport in sports
}
self.sport_dict = self.sport_dict
## process activity data
if self.all_data is not None:
## use existing
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
## pull timeframe data
data = self.get_keydata_timeframe(start, end)
## now process activities data
act_data = pd.json_normalize(
data[data['strain.workouts'].apply(len) > 0]
['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper', 'during.lower'
]] = act_data[['during.upper',
'during.lower']].apply(pd.to_datetime)
act_data['total_minutes'] = act_data.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
for z in range(0, 6):
act_data['zone{}_minutes'.format(
z +
1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)
act_data['sport_name'] = act_data.sportId.apply(
lambda x: sport_dict[x])
act_data['day'] = act_data['during.lower'].dt.strftime(
'%Y-%m-%d')
act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities = act_data
return act_data
else:
print("Whoop: Please run the authorization function first")
def get_sleep_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
'''
This function returns sleep metrics in a data frame, for timeframe specified by the user.
Each row in the data frame represents one night of sleep.
If no end date is specified, it will default to today's date.
All sleep times are returned in minutes.
'''
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
## pull timeframe data
data = self.get_keydata_timeframe(start, end)
## getting all the sleep ids
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
if self.all_sleep is not None:
## All sleep data already pulled so just filter
all_sleep = self.all_sleep
time_sleep = all_sleep[all_sleep.activityId.isin(
sleep_list)]
return time_sleep
else:
time_sleep = pd.DataFrame()
for s in sleep_list:
m = self.pull_sleep_main(s)
time_sleep = pd.concat([time_sleep, m])
## Cleaning sleep data
sleep_update = [
'qualityDuration', 'latency', 'debtPre', 'debtPost',
'needFromStrain', 'sleepNeed', 'habitualSleepNeed',
'timeInBed', 'lightSleepDuration',
'slowWaveSleepDuration', 'remSleepDuration',
'wakeDuration', 'arousalTime', 'noDataDuration',
'creditFromNaps', 'projectedSleep'
]
for col in sleep_update:
time_sleep[col] = time_sleep[col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
time_sleep.drop(['during.bounds', 'events'],
axis=1,
inplace=True)
return time_sleep
else:
print("Whoop: Please run the authorization function first")
def get_sleep_events_timeframe(self,
start,
end=datetime.strftime(
datetime.utcnow(), "%Y-%m-%d")):
'''
This function returns sleep events in a data frame, for the time frame specified by the user.
Each row in the data frame represents an individual sleep event within an individual night of sleep.
Sleep events can be joined against the sleep or main datasets by sleep id.
If no end date is specified, it will default to today's date.
'''
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
## pull timeframe data
data = self.get_keydata_timeframe(start, end)
## getting all the sleep ids
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
if self.all_sleep_events is not None:
## All sleep data already pulled so just filter
all_sleep_events = self.all_sleep_events
time_sleep_events = all_sleep_events[
all_sleep_events.id.isin(sleep_list)]
return time_sleep_events
else:
if self.all_sleep is not None:
sleep_events = self.all_sleep[['activityId', 'events']]
time_sleep = sleep_events[sleep_events.id.isin(
sleep_list)]
time_sleep_events = pd.concat([
pd.concat([
pd.json_normalize(events),
pd.DataFrame({'id': len(events) * [sleep]})
],
axis=1) for events, sleep in
zip(time_sleep['events'], time_sleep['activityId'])
])
else:
time_sleep_events = pd.DataFrame()
for s in sleep_list:
events = self.pull_sleep_events(s)
time_sleep_events = pd.concat(
[time_sleep_events, events])
## Cleaning sleep events data
time_sleep_events['during.lower'] = pd.to_datetime(
time_sleep_events['during.lower'])
time_sleep_events['during.upper'] = pd.to_datetime(
time_sleep_events['during.upper'])
time_sleep_events.drop(['during.bounds'],
axis=1,
inplace=True)
time_sleep_events[
'total_minutes'] = time_sleep_events.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
return time_sleep_events
else:
print("Whoop: Please run the authorization function first")
def get_hr_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(), "%Y-%m-%d"),
returnType=None):
'''
This function will pull every heart rate measurement recorded, for the time frame specified by the user.
The default return for this function is a list of lists, where each "row" contains the date, time, and hr value.
The measurements are spaced out every ~6 seconds on average.
To return a dataframe, set df=True. This will take a bit longer, but will return a data frame.
If no end date is specified, it will default to today's date.
NOTE: This api pull takes about 6 seconds per week of data ... or 1 minutes for 10 weeks of data,
so be careful when you pull, it may take a while.
'''
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.start_datetime:
athlete_id = self.whoop_id
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
## using the st and e since it needs the datetime formatted date
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=e,
dtstart=st)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
hr_list = []
for dates in date_range:
start = dates[0]
end = dates[1]
ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(
athlete_id, end, start)
hr_vals = self.pull_api(ul)['values']
hr_values = [[
str(datetime.utcfromtimestamp(h['time'] / 1e3).date()),
str(datetime.utcfromtimestamp(h['time'] / 1e3).time()),
h['data']
] for h in hr_vals]
hr_list.extend(hr_values)
if returnType == "df":
hr_df = pd.DataFrame(hr_list)
hr_df.columns = ['date', 'time', 'hr']
return hr_df
elif returnType == "json":
hr_json = [{
'datetime': str(h[0]) + 'T' + str(h[1]),
'hr': h[2]
} for h in hr_list]
return hr_json
else:
return hr_list
else:
print("Whoop: Please run the authorization function first")
| 43.993289 | 121 | 0.49251 | import requests
import pandas as pd
import numpy as np
import configparser
from datetime import datetime
from dateutil import relativedelta, parser, rrule
from dateutil.rrule import WEEKLY
class WhoopClient:
def __init__(self,
auth_code=None,
whoop_id=None,
current_datetime=datetime.utcnow()):
self.auth_code = auth_code
self.whoop_id = whoop_id
self.current_datetime = current_datetime
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def reset(self):
self.auth_code = None
self.whoop_id = None
self.current_datetime = datetime.utcnow()
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def pull_api(self, url, df=False):
auth_code = self.auth_code
headers = {'authorization': auth_code}
pull = requests.get(url, headers=headers)
if pull.status_code == 200 and len(pull.content) > 1:
if df:
d = pd.json_normalize(pull.json())
return d
else:
return pull.json()
else:
return "no response"
def pull_sleep_main(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
main_df = pd.json_normalize(sleep)
return main_df
def pull_sleep_events(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
events_df = pd.json_normalize(sleep['events'])
events_df['id'] = sleep_id
return events_df
def get_authorization(self, user_ini):
config = configparser.ConfigParser()
config.read(user_ini)
username = config['whoop']['username']
password = config['whoop']['password']
headers = {
"username": username,
"password": password,
"grant_type": "password",
"issueRefresh": False
}
auth = requests.post("https://api-7.whoop.com/oauth/token",
json=headers)
if auth.status_code == 200:
content = auth.json()
user_id = content['user']['id']
token = content['access_token']
start_time = content['user']['profile']['createdAt']
self.whoop_id = user_id
self.auth_code = 'bearer ' + token
self.start_datetime = start_time
print("Whoop: Authentication successful")
else:
print(
"Authentication failed - please double check your credentials")
def get_keydata_all(self):
if self.start_datetime:
if self.all_data is not None:
elf.all_data
else:
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
all_data = pd.DataFrame()
for dates in date_range:
cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(
self.whoop_id, dates[1], dates[0])
data = self.pull_api(cycle_url, df=True)
all_data = pd.concat([all_data, data])
all_data.reset_index(drop=True, inplace=True)
ta['days'].map(lambda d: d[0])
all_data.rename(columns={"days": 'day'}, inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols = [
'qualityDuration', 'needBreakdown.baseline',
'needBreakdown.debt', 'needBreakdown.naps',
'needBreakdown.strain', 'needBreakdown.total'
]
for sleep_col in sleep_cols:
all_data['sleep.' + sleep_col] = all_data[
'sleep.' + sleep_col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
## Making nap variable
all_data['nap_duration'] = all_data['sleep.naps'].apply(
lambda x: x[0]['qualityDuration'] / 60000
if len(x) == 1 else (sum([
y['qualityDuration'] for y in x
if y['qualityDuration'] is not None
]) / 60000 if len(x) > 1 else 0))
all_data.drop(['sleep.naps'], axis=1, inplace=True)
## dropping duplicates subsetting because of list columns
all_data.drop_duplicates(subset=['day', 'sleep.id'],
inplace=True)
self.all_data = all_data
return all_data
else:
print("Please run the authorization function first")
def get_activities_all(self):
if self.sport_dict:
sport_dict = self.sport_dict
else:
sports = self.pull_api('https://api-7.whoop.com/sports')
sport_dict = {sport['id']: sport['name'] for sport in sports}
self.sport_dict = self.sport_dict
if self.start_datetime:
## process activity data
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull all data to process activities
data = self.get_keydata_all()
## now process activities data
act_data = pd.json_normalize(
data[data['strain.workouts'].apply(len) > 0]
['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper', 'during.lower'
]] = act_data[['during.upper',
'during.lower']].apply(pd.to_datetime)
act_data['total_minutes'] = act_data.apply(
lambda x:
(x['during.upper'] - x['during.lower']).total_seconds() / 60.0,
axis=1)
for z in range(0, 6):
act_data['zone{}_minutes'.format(
z + 1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)
act_data['sport_name'] = act_data.sportId.apply(
lambda x: sport_dict[x])
act_data['day'] = act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities = act_data
return act_data
else:
print("Whoop: Please run the authorization function first")
def get_sleep_all(self):
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep is not None:
## All sleep data already pulled
return self.all_sleep
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
all_sleep = pd.DataFrame()
for s in sleep_list:
m = self.pull_sleep_main(s)
all_sleep = pd.concat([all_sleep, m])
## Cleaning sleep data
sleep_update = [
'qualityDuration', 'latency', 'debtPre', 'debtPost',
'needFromStrain', 'sleepNeed', 'habitualSleepNeed',
'timeInBed', 'lightSleepDuration', 'slowWaveSleepDuration',
'remSleepDuration', 'wakeDuration', 'arousalTime',
'noDataDuration', 'creditFromNaps', 'projectedSleep'
]
for col in sleep_update:
all_sleep[col] = all_sleep[col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
all_sleep.drop(['during.bounds'], axis=1, inplace=True)
self.all_sleep = all_sleep.copy(deep=True)
all_sleep.drop(['events'], axis=1, inplace=True)
return all_sleep
else:
print("Whoop: Please run the authorization function first")
def get_sleep_events_all(self):
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep_events is not None:
## All sleep data already pulled
return self.all_sleep_events
else:
if self.all_sleep is not None:
sleep_events = self.all_sleep[['activityId', 'events']]
all_sleep_events = pd.concat([
pd.concat([
pd.json_normalize(events),
pd.DataFrame({'id': len(events) * [sleep]})
],
axis=1) for events, sleep in
zip(sleep_events['events'], sleep_events['activityId'])
])
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [
int(x) for x in sleep_ids if pd.isna(x) == False
]
all_sleep_events = pd.DataFrame()
for s in sleep_list:
events = self.pull_sleep_events(s)
all_sleep_events = pd.concat(
[all_sleep_events, events])
## Cleaning sleep events data
all_sleep_events['during.lower'] = pd.to_datetime(
all_sleep_events['during.lower'])
all_sleep_events['during.upper'] = pd.to_datetime(
all_sleep_events['during.upper'])
all_sleep_events.drop(['during.bounds'], axis=1, inplace=True)
all_sleep_events['total_minutes'] = all_sleep_events.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
self.all_sleep_events = all_sleep_events
return all_sleep_events
else:
print("Whoop: Please run the authorization function first")
#returnTYpe = df, json
def get_hr_all(self, returnType=None):
if self.start_datetime:
athlete_id = self.whoop_id
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d + relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
hr_list = []
for dates in date_range:
start = dates[0]
end = dates[1]
ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(
athlete_id, end, start)
hr_vals = self.pull_api(ul)['values']
hr_values = [[
datetime.utcfromtimestamp(h['time'] / 1e3).date(),
datetime.utcfromtimestamp(h['time'] / 1e3).time(),
h['data']
] for h in hr_vals]
hr_list.extend(hr_values)
if returnType == "df":
hr_df = pd.DataFrame(hr_list)
hr_df.columns = ['date', 'time', 'hr']
return hr_df
elif returnType == "json":
hr_json = [{
'datetime': str(h[0]) + 'T' + str(h[1]),
'hr': h[2]
} for h in hr_list]
return hr_json
else:
return hr_list
else:
print("Please run the authorization function first")
def get_keydata_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print(
"Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=e,
dtstart=st)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals if d <= e]
time_data = pd.DataFrame()
for dates in date_range:
cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(
self.whoop_id, dates[1], dates[0])
data = self.pull_api(cycle_url, df=True)
time_data = pd.concat([time_data, data])
time_data.reset_index(drop=True, inplace=True)
## fixing the day column so it's not a list
time_data['days'] = time_data['days'].map(lambda d: d[0])
time_data.rename(columns={"days": 'day'}, inplace=True)
alityDuration', 'needBreakdown.baseline',
'needBreakdown.debt', 'needBreakdown.naps',
'needBreakdown.strain', 'needBreakdown.total'
]
for sleep_col in sleep_cols:
time_data['sleep.' + sleep_col] = time_data[
'sleep.' + sleep_col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
_data['nap_duration'] = time_data['sleep.naps'].apply(
lambda x: x[0]['qualityDuration'] / 60000
if len(x) == 1 else (sum([
y['qualityDuration'] for y in x
if y['qualityDuration'] is not None
]) / 60000 if len(x) > 1 else 0))
time_data.drop(['sleep.naps'], axis=1, inplace=True)
_data.drop_duplicates(subset=['day', 'sleep.id'],
inplace=True)
return time_data
else:
print("Whoop: Please run the authorization function first")
def get_activities_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print(
"Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.sport_dict:
sport_dict = self.sport_dict
else:
sports = self.pull_api('https://api-7.whoop.com/sports')
sport_dict = {
sport['id']: sport['name']
for sport in sports
}
self.sport_dict = self.sport_dict
f.all_data is not None:
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
data = self.get_keydata_timeframe(start, end)
d.json_normalize(
data[data['strain.workouts'].apply(len) > 0]
['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper', 'during.lower'
]] = act_data[['during.upper',
'during.lower']].apply(pd.to_datetime)
act_data['total_minutes'] = act_data.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
for z in range(0, 6):
act_data['zone{}_minutes'.format(
z +
1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)
act_data['sport_name'] = act_data.sportId.apply(
lambda x: sport_dict[x])
act_data['day'] = act_data['during.lower'].dt.strftime(
'%Y-%m-%d')
act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities = act_data
return act_data
else:
print("Whoop: Please run the authorization function first")
def get_sleep_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.all_data is not None:
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
data = self.get_keydata_timeframe(start, end)
= data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
if self.all_sleep is not None:
p
time_sleep = all_sleep[all_sleep.activityId.isin(
sleep_list)]
return time_sleep
else:
time_sleep = pd.DataFrame()
for s in sleep_list:
m = self.pull_sleep_main(s)
time_sleep = pd.concat([time_sleep, m])
sleep_update = [
'qualityDuration', 'latency', 'debtPre', 'debtPost',
'needFromStrain', 'sleepNeed', 'habitualSleepNeed',
'timeInBed', 'lightSleepDuration',
'slowWaveSleepDuration', 'remSleepDuration',
'wakeDuration', 'arousalTime', 'noDataDuration',
'creditFromNaps', 'projectedSleep'
]
for col in sleep_update:
time_sleep[col] = time_sleep[col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
time_sleep.drop(['during.bounds', 'events'],
axis=1,
inplace=True)
return time_sleep
else:
print("Whoop: Please run the authorization function first")
def get_sleep_events_timeframe(self,
start,
end=datetime.strftime(
datetime.utcnow(), "%Y-%m-%d")):
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.all_data is not None:
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
data = self.get_keydata_timeframe(start, end)
= data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
if self.all_sleep_events is not None:
ll_sleep_events
time_sleep_events = all_sleep_events[
all_sleep_events.id.isin(sleep_list)]
return time_sleep_events
else:
if self.all_sleep is not None:
sleep_events = self.all_sleep[['activityId', 'events']]
time_sleep = sleep_events[sleep_events.id.isin(
sleep_list)]
time_sleep_events = pd.concat([
pd.concat([
pd.json_normalize(events),
pd.DataFrame({'id': len(events) * [sleep]})
],
axis=1) for events, sleep in
zip(time_sleep['events'], time_sleep['activityId'])
])
else:
time_sleep_events = pd.DataFrame()
for s in sleep_list:
events = self.pull_sleep_events(s)
time_sleep_events = pd.concat(
[time_sleep_events, events])
eep_events['during.lower'] = pd.to_datetime(
time_sleep_events['during.lower'])
time_sleep_events['during.upper'] = pd.to_datetime(
time_sleep_events['during.upper'])
time_sleep_events.drop(['during.bounds'],
axis=1,
inplace=True)
time_sleep_events[
'total_minutes'] = time_sleep_events.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
return time_sleep_events
else:
print("Whoop: Please run the authorization function first")
def get_hr_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(), "%Y-%m-%d"),
returnType=None):
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.start_datetime:
athlete_id = self.whoop_id
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
interval=1,
until=e,
dtstart=st)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
hr_list = []
for dates in date_range:
start = dates[0]
end = dates[1]
ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(
athlete_id, end, start)
hr_vals = self.pull_api(ul)['values']
hr_values = [[
str(datetime.utcfromtimestamp(h['time'] / 1e3).date()),
str(datetime.utcfromtimestamp(h['time'] / 1e3).time()),
h['data']
] for h in hr_vals]
hr_list.extend(hr_values)
if returnType == "df":
hr_df = pd.DataFrame(hr_list)
hr_df.columns = ['date', 'time', 'hr']
return hr_df
elif returnType == "json":
hr_json = [{
'datetime': str(h[0]) + 'T' + str(h[1]),
'hr': h[2]
} for h in hr_list]
return hr_json
else:
return hr_list
else:
print("Whoop: Please run the authorization function first")
| true | true |
f72f8c8616ebfc02596b74d7d800b811e4d3bb3f | 18,406 | py | Python | third_party/texar-0.2.0/examples/bert/utils/data_utils.py | swidi/poemo-generation | 3a349ac3a6fc3e82b24410013bced60a24c2d8bf | [
"MIT"
] | null | null | null | third_party/texar-0.2.0/examples/bert/utils/data_utils.py | swidi/poemo-generation | 3a349ac3a6fc3e82b24410013bced60a24c2d8bf | [
"MIT"
] | null | null | null | third_party/texar-0.2.0/examples/bert/utils/data_utils.py | swidi/poemo-generation | 3a349ac3a6fc3e82b24410013bced60a24c2d8bf | [
"MIT"
] | null | null | null | """
This is the Data Loading Pipeline for Sentence Classifier Task from
https://github.com/google-research/bert/blob/master/run_classifier.py
"""
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import collections
import sys
sys.path.append(os.path.dirname(__file__))
import tokenization
import tensorflow as tf
class InputExample():
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence.
For single sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second
sequence. Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures():
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class SSTProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
if set_type == 'train' or set_type == 'dev':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[0])
# Single sentence classification, text_b doesn't exist
text_b = None
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
if set_type == 'test':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[1])
# Single sentence classification, text_b doesn't exist
text_b = None
label = '0' # arbitrary set as 0
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type,
tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention rule is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# segment_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# sigment_ids: 0 0 0 0 0 0 0
#
# Where "segment_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
# here we disable the verbose printing of the data
if ex_index < 0:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_ids length: %d" % len(input_ids))
tf.logging.info("input_mask: %s" %\
" ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" %\
" ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal
# percent of tokens from each, since if one sequence is very short then
# each token that's truncated likely contains more information than a
# longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def prepare_TFRecord_data(processor, tokenizer,
data_dir, max_seq_length, output_dir):
"""
Args:
processor: Data Preprocessor, which must have get_lables,
get_train/dev/test/examples methods defined.
tokenizer: The Sentence Tokenizer. Generally should be
SentencePiece Model.
data_dir: The input data directory.
max_seq_length: Max sequence length.
batch_size: mini-batch size.
model: `train`, `eval` or `test`.
output_dir: The directory to save the TFRecord in.
"""
label_list = processor.get_labels()
train_examples = processor.get_train_examples(data_dir)
train_file = os.path.join(output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, max_seq_length,
tokenizer, train_file)
eval_examples = processor.get_dev_examples(data_dir)
eval_file = os.path.join(output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list,
max_seq_length, tokenizer, eval_file)
test_examples = processor.get_test_examples(data_dir)
test_file = os.path.join(output_dir, "predict.tf_record")
file_based_convert_examples_to_features(
test_examples, label_list,
max_seq_length, tokenizer, test_file)
| 38.10766 | 80 | 0.60703 |
import os
import csv
import collections
import sys
sys.path.append(os.path.dirname(__file__))
import tokenization
import tensorflow as tf
class InputExample():
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures():
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_test_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class SSTProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
if set_type == 'train' or set_type == 'dev':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = None
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
if set_type == 'test':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[1])
# Single sentence classification, text_b doesn't exist
text_b = None
label = '0'
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class XnliProcessor(DataProcessor):
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")),
"test")
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type,
tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")),
"test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")),
"test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
s.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 0:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_ids length: %d" % len(input_ids))
tf.logging.info("input_mask: %s" %\
" ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" %\
" ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
# longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def prepare_TFRecord_data(processor, tokenizer,
data_dir, max_seq_length, output_dir):
label_list = processor.get_labels()
train_examples = processor.get_train_examples(data_dir)
train_file = os.path.join(output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, max_seq_length,
tokenizer, train_file)
eval_examples = processor.get_dev_examples(data_dir)
eval_file = os.path.join(output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list,
max_seq_length, tokenizer, eval_file)
test_examples = processor.get_test_examples(data_dir)
test_file = os.path.join(output_dir, "predict.tf_record")
file_based_convert_examples_to_features(
test_examples, label_list,
max_seq_length, tokenizer, test_file)
| true | true |
f72f8d11340df1f1d9eb56840c1b60800b76a5a8 | 827 | py | Python | Calculator.py | EdvinAlvarado/Code-Wars | a3a06a44cda004052b5c0930f3693678c5c92e21 | [
"BSD-2-Clause"
] | null | null | null | Calculator.py | EdvinAlvarado/Code-Wars | a3a06a44cda004052b5c0930f3693678c5c92e21 | [
"BSD-2-Clause"
] | null | null | null | Calculator.py | EdvinAlvarado/Code-Wars | a3a06a44cda004052b5c0930f3693678c5c92e21 | [
"BSD-2-Clause"
] | null | null | null |
class Calculator(object):
def evaluate(self, string):
print(string)
cmd = [int(s) if s.isdigit() else s for s in string.split(" ")]
cmd = [float(s) if isinstance(s, str) and s.find('.') != -1 else s for s in cmd]
print(cmd)
for i in range(sum([1 if s == '*' or s == '/' else 0 for s in cmd])):
for i, p in enumerate(cmd):
if p == '*':
cmd[i - 1] = cmd[i - 1] * cmd[i + 1]
del cmd[i:i+2]
break
elif p == '/':
cmd[i - 1] = cmd[i - 1] / cmd[i + 1]
del cmd[i:i+2]
break
for i in range(sum([1 if s == '+' or s == '-' else 0 for s in cmd])):
for i, p in enumerate(cmd):
if p == '+':
cmd[i - 1] = cmd[i - 1] + cmd[i + 1]
del cmd[i:i+2]
break
elif p == '-':
cmd[i - 1] = cmd[i - 1] - cmd[i + 1]
del cmd[i:i+2]
break
return cmd[0] | 22.972222 | 82 | 0.481258 |
class Calculator(object):
def evaluate(self, string):
print(string)
cmd = [int(s) if s.isdigit() else s for s in string.split(" ")]
cmd = [float(s) if isinstance(s, str) and s.find('.') != -1 else s for s in cmd]
print(cmd)
for i in range(sum([1 if s == '*' or s == '/' else 0 for s in cmd])):
for i, p in enumerate(cmd):
if p == '*':
cmd[i - 1] = cmd[i - 1] * cmd[i + 1]
del cmd[i:i+2]
break
elif p == '/':
cmd[i - 1] = cmd[i - 1] / cmd[i + 1]
del cmd[i:i+2]
break
for i in range(sum([1 if s == '+' or s == '-' else 0 for s in cmd])):
for i, p in enumerate(cmd):
if p == '+':
cmd[i - 1] = cmd[i - 1] + cmd[i + 1]
del cmd[i:i+2]
break
elif p == '-':
cmd[i - 1] = cmd[i - 1] - cmd[i + 1]
del cmd[i:i+2]
break
return cmd[0] | true | true |
f72f8d12e338c378aeef5bdcbc21e4ce4e2a2aa1 | 7,335 | py | Python | src/vmware/azext_vmware/_help.py | sanmishra18/azure-cli-extensions | 05499b7931a1fe4cd4536a6b83fa4f8f13663996 | [
"MIT"
] | 1 | 2021-04-22T09:20:58.000Z | 2021-04-22T09:20:58.000Z | src/vmware/azext_vmware/_help.py | sanmishra18/azure-cli-extensions | 05499b7931a1fe4cd4536a6b83fa4f8f13663996 | [
"MIT"
] | 1 | 2020-07-30T06:44:01.000Z | 2020-07-30T06:44:01.000Z | src/vmware/azext_vmware/_help.py | Juliehzl/azure-cli-extensions | b0b33f4d45c2e4c50ece782851291d967e1f36e2 | [
"MIT"
] | 1 | 2020-11-09T17:17:42.000Z | 2020-11-09T17:17:42.000Z | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
helps['vmware'] = """
type: group
short-summary: Commands to manage Azure VMware Solution.
"""
helps['vmware private-cloud'] = """
type: group
short-summary: Commands to manage private clouds.
"""
helps['vmware cluster'] = """
type: group
short-summary: Commands to manage clusters in a private cloud.
"""
helps['vmware authorization'] = """
type: group
short-summary: Commands to manage the authorizations of an ExpressRoute Circuit for a private cloud.
"""
helps['vmware hcx-enterprise-site'] = """
type: group
short-summary: Commands to manage HCX Enterprise Sites in a private cloud.
"""
helps['vmware location'] = """
type: group
short-summary: Commands to check availability by location.
"""
helps['vmware datastore'] = """
type: group
short-summary: Commands to manage a datastore in a private cloud cluster.
"""
helps['vmware cluster create'] = """
type: command
short-summary: Create a cluster in a private cloud. The maximum number of clusters is 4.
"""
helps['vmware cluster delete'] = """
type: command
short-summary: Delete a cluster in a private cloud.
"""
helps['vmware cluster list'] = """
type: command
short-summary: List clusters in a private cloud.
"""
helps['vmware cluster show'] = """
type: command
short-summary: Show details of a cluster in a private cloud.
"""
helps['vmware cluster update'] = """
type: command
short-summary: Update a cluster in a private cloud.
"""
helps['vmware private-cloud addidentitysource'] = """
type: command
short-summary: Add a vCenter Single Sign On Identity Source to a private cloud.
"""
helps['vmware private-cloud create'] = """
type: command
short-summary: Create a private cloud.
"""
helps['vmware private-cloud delete'] = """
type: command
short-summary: Delete a private cloud.
"""
helps['vmware private-cloud deleteidentitysource'] = """
type: command
short-summary: Delete a vCenter Single Sign On Identity Source for a private cloud.
"""
helps['vmware private-cloud list'] = """
type: command
short-summary: List the private clouds.
"""
helps['vmware private-cloud listadmincredentials'] = """
type: command
short-summary: List the admin credentials for the private cloud.
"""
helps['vmware private-cloud show'] = """
type: command
short-summary: Show details of a private cloud.
"""
helps['vmware private-cloud update'] = """
type: command
short-summary: Update a private cloud.
"""
helps['vmware private-cloud rotate-vcenter-password'] = """
type: command
short-summary: Rotate the vCenter password.
examples:
- name: Rotate the vCenter password.
text: az vmware private-cloud rotate-vcenter-password --resource-group MyResourceGroup --private-cloud MyPrivateCloud
"""
helps['vmware private-cloud rotate-nsxt-password'] = """
type: command
short-summary: Rotate the NSX-T Manager password.
examples:
- name: Rotate the NSX-T Manager password.
text: az vmware private-cloud rotate-nsxt-password --resource-group MyResourceGroup --private-cloud MyPrivateCloud
"""
helps['vmware authorization create'] = """
type: command
short-summary: Create an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization list'] = """
type: command
short-summary: List authorizations for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization show'] = """
type: command
short-summary: Show details of an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization delete'] = """
type: command
short-summary: Delete an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware hcx-enterprise-site create'] = """
type: command
short-summary: Create an HCX Enterprise Site in a private cloud.
"""
helps['vmware hcx-enterprise-site list'] = """
type: command
short-summary: List HCX Enterprise Sites in a private cloud.
"""
helps['vmware hcx-enterprise-site show'] = """
type: command
short-summary: Show details of an HCX Enterprise Site in a private cloud.
"""
helps['vmware hcx-enterprise-site delete'] = """
type: command
short-summary: Delete an HCX Enterprise Site in a private cloud.
"""
helps['vmware location checkquotaavailability'] = """
type: command
short-summary: Return quota for subscription by region.
"""
helps['vmware location checktrialavailability'] = """
type: command
short-summary: Return trial status for subscription by region.
"""
helps['vmware datastore create'] = """
type: command
short-summary: Create a datastore in a private cloud cluster.
examples:
- name: Create a new Microsoft.StoragePool provided disk pool based iSCSI datastore.
text: az vmware datastore create --name iSCSIDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --endpoints 10.10.0.1:3260 --lun-name lun0
- name: Create a new Microsoft.StoragePool provided disk pool based iSCSI datastore with multiple endpoints.
text: az vmware datastore create --name iSCSIDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --endpoints 10.10.0.1:3260 10.10.0.2:3260 --lun-name lun0
- name: Create a new Microsoft.NetApp provided NetApp volume based NFSv3 datastore.
text: az vmware datastore create --name ANFDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --nfs-file-path ANFVol1FilePath --nfs-provider-ip 10.10.0.1
"""
helps['vmware datastore show'] = """
type: command
short-summary: Show details of a datastore in a private cloud cluster.
examples:
- name: Show the details of an iSCSI or NFS based datastore.
text: az vmware datastore show --name MyCloudSANDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
helps['vmware datastore list'] = """
type: command
short-summary: List datastores in a private cloud cluster.
examples:
- name: List all iSCSI or NFS based datastores under Cluster-1.
text: az vmware datastore list --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
helps['vmware datastore delete'] = """
type: command
short-summary: Delete a datastore in a private cloud cluster.
examples:
- name: Delete an iSCSI or NFS based datastore.
text: az vmware datastore delete --name MyCloudSANDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
| 34.763033 | 204 | 0.671438 |
from knack.help_files import helps
helps['vmware'] = """
type: group
short-summary: Commands to manage Azure VMware Solution.
"""
helps['vmware private-cloud'] = """
type: group
short-summary: Commands to manage private clouds.
"""
helps['vmware cluster'] = """
type: group
short-summary: Commands to manage clusters in a private cloud.
"""
helps['vmware authorization'] = """
type: group
short-summary: Commands to manage the authorizations of an ExpressRoute Circuit for a private cloud.
"""
helps['vmware hcx-enterprise-site'] = """
type: group
short-summary: Commands to manage HCX Enterprise Sites in a private cloud.
"""
helps['vmware location'] = """
type: group
short-summary: Commands to check availability by location.
"""
helps['vmware datastore'] = """
type: group
short-summary: Commands to manage a datastore in a private cloud cluster.
"""
helps['vmware cluster create'] = """
type: command
short-summary: Create a cluster in a private cloud. The maximum number of clusters is 4.
"""
helps['vmware cluster delete'] = """
type: command
short-summary: Delete a cluster in a private cloud.
"""
helps['vmware cluster list'] = """
type: command
short-summary: List clusters in a private cloud.
"""
helps['vmware cluster show'] = """
type: command
short-summary: Show details of a cluster in a private cloud.
"""
helps['vmware cluster update'] = """
type: command
short-summary: Update a cluster in a private cloud.
"""
helps['vmware private-cloud addidentitysource'] = """
type: command
short-summary: Add a vCenter Single Sign On Identity Source to a private cloud.
"""
helps['vmware private-cloud create'] = """
type: command
short-summary: Create a private cloud.
"""
helps['vmware private-cloud delete'] = """
type: command
short-summary: Delete a private cloud.
"""
helps['vmware private-cloud deleteidentitysource'] = """
type: command
short-summary: Delete a vCenter Single Sign On Identity Source for a private cloud.
"""
helps['vmware private-cloud list'] = """
type: command
short-summary: List the private clouds.
"""
helps['vmware private-cloud listadmincredentials'] = """
type: command
short-summary: List the admin credentials for the private cloud.
"""
helps['vmware private-cloud show'] = """
type: command
short-summary: Show details of a private cloud.
"""
helps['vmware private-cloud update'] = """
type: command
short-summary: Update a private cloud.
"""
helps['vmware private-cloud rotate-vcenter-password'] = """
type: command
short-summary: Rotate the vCenter password.
examples:
- name: Rotate the vCenter password.
text: az vmware private-cloud rotate-vcenter-password --resource-group MyResourceGroup --private-cloud MyPrivateCloud
"""
helps['vmware private-cloud rotate-nsxt-password'] = """
type: command
short-summary: Rotate the NSX-T Manager password.
examples:
- name: Rotate the NSX-T Manager password.
text: az vmware private-cloud rotate-nsxt-password --resource-group MyResourceGroup --private-cloud MyPrivateCloud
"""
helps['vmware authorization create'] = """
type: command
short-summary: Create an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization list'] = """
type: command
short-summary: List authorizations for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization show'] = """
type: command
short-summary: Show details of an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization delete'] = """
type: command
short-summary: Delete an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware hcx-enterprise-site create'] = """
type: command
short-summary: Create an HCX Enterprise Site in a private cloud.
"""
helps['vmware hcx-enterprise-site list'] = """
type: command
short-summary: List HCX Enterprise Sites in a private cloud.
"""
helps['vmware hcx-enterprise-site show'] = """
type: command
short-summary: Show details of an HCX Enterprise Site in a private cloud.
"""
helps['vmware hcx-enterprise-site delete'] = """
type: command
short-summary: Delete an HCX Enterprise Site in a private cloud.
"""
helps['vmware location checkquotaavailability'] = """
type: command
short-summary: Return quota for subscription by region.
"""
helps['vmware location checktrialavailability'] = """
type: command
short-summary: Return trial status for subscription by region.
"""
helps['vmware datastore create'] = """
type: command
short-summary: Create a datastore in a private cloud cluster.
examples:
- name: Create a new Microsoft.StoragePool provided disk pool based iSCSI datastore.
text: az vmware datastore create --name iSCSIDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --endpoints 10.10.0.1:3260 --lun-name lun0
- name: Create a new Microsoft.StoragePool provided disk pool based iSCSI datastore with multiple endpoints.
text: az vmware datastore create --name iSCSIDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --endpoints 10.10.0.1:3260 10.10.0.2:3260 --lun-name lun0
- name: Create a new Microsoft.NetApp provided NetApp volume based NFSv3 datastore.
text: az vmware datastore create --name ANFDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --nfs-file-path ANFVol1FilePath --nfs-provider-ip 10.10.0.1
"""
helps['vmware datastore show'] = """
type: command
short-summary: Show details of a datastore in a private cloud cluster.
examples:
- name: Show the details of an iSCSI or NFS based datastore.
text: az vmware datastore show --name MyCloudSANDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
helps['vmware datastore list'] = """
type: command
short-summary: List datastores in a private cloud cluster.
examples:
- name: List all iSCSI or NFS based datastores under Cluster-1.
text: az vmware datastore list --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
helps['vmware datastore delete'] = """
type: command
short-summary: Delete a datastore in a private cloud cluster.
examples:
- name: Delete an iSCSI or NFS based datastore.
text: az vmware datastore delete --name MyCloudSANDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
| true | true |
f72f8d9198afb6938ccdaa898c7765d5213e95ab | 1,699 | py | Python | docs/source/example_models.py | 2degrees/djeneralize | ef456292397888bd0476786d9f4ab2854b3604ef | [
"BSD-3-Clause"
] | 4 | 2015-10-19T10:49:47.000Z | 2017-03-22T13:06:32.000Z | docs/source/example_models.py | 2degrees/djeneralize | ef456292397888bd0476786d9f4ab2854b3604ef | [
"BSD-3-Clause"
] | 1 | 2015-04-16T17:37:15.000Z | 2015-04-17T08:05:21.000Z | docs/source/example_models.py | 2degrees/djeneralize | ef456292397888bd0476786d9f4ab2854b3604ef | [
"BSD-3-Clause"
] | 3 | 2015-08-12T12:28:03.000Z | 2015-09-30T09:45:40.000Z | from django.db import models
from djeneralize.models import BaseGeneralizationModel
from djeneralize.fields import SpecializedForeignKey
#{ General model
class WritingImplement(BaseGeneralizationModel):
name = models.CharField(max_length=30)
length = models.IntegerField()
holder = SpecializedForeignKey(
'WritingImplementHolder', null=True, blank=True)
def __unicode__(self):
return self.name
#{ Direct children of WritingImplement, i.e. first specialization
class Pencil(WritingImplement):
lead = models.CharField(max_length=2) # i.e. HB, B2, H5
class Meta:
specialization = 'pencil'
class Pen(WritingImplement):
ink_colour = models.CharField(max_length=30)
class Meta:
specialization = 'pen'
#{ Grand-children of WritingImplement, i.e. second degree of specialization
class FountainPen(Pen):
nib_width = models.DecimalField(max_digits=3, decimal_places=2)
class Meta:
specialization = 'fountain_pen'
class BallPointPen(Pen):
replaceable_insert = models.BooleanField(default=False)
class Meta:
specialization = 'ballpoint_pen'
#{ Writing implement holders general model
class WritingImplementHolder(BaseGeneralizationModel):
name = models.CharField(max_length=30)
def __unicode__(self):
return self.name
#{ Writing implement holders specializations
class StationaryCupboard(WritingImplementHolder):
volume = models.FloatField()
class Meta:
specialization = 'stationary_cupboard'
class PencilCase(WritingImplementHolder):
colour = models.CharField(max_length=30)
class Meta:
specialization = 'pencil_case'
#}
| 18.877778 | 75 | 0.727487 | from django.db import models
from djeneralize.models import BaseGeneralizationModel
from djeneralize.fields import SpecializedForeignKey
class WritingImplement(BaseGeneralizationModel):
name = models.CharField(max_length=30)
length = models.IntegerField()
holder = SpecializedForeignKey(
'WritingImplementHolder', null=True, blank=True)
def __unicode__(self):
return self.name
class Pencil(WritingImplement):
lead = models.CharField(max_length=2)
class Meta:
specialization = 'pencil'
class Pen(WritingImplement):
ink_colour = models.CharField(max_length=30)
class Meta:
specialization = 'pen'
class FountainPen(Pen):
nib_width = models.DecimalField(max_digits=3, decimal_places=2)
class Meta:
specialization = 'fountain_pen'
class BallPointPen(Pen):
replaceable_insert = models.BooleanField(default=False)
class Meta:
specialization = 'ballpoint_pen'
class WritingImplementHolder(BaseGeneralizationModel):
name = models.CharField(max_length=30)
def __unicode__(self):
return self.name
class StationaryCupboard(WritingImplementHolder):
volume = models.FloatField()
class Meta:
specialization = 'stationary_cupboard'
class PencilCase(WritingImplementHolder):
colour = models.CharField(max_length=30)
class Meta:
specialization = 'pencil_case'
| true | true |
f72f8e30417092439e50e50b97f2efb35888640b | 2,204 | py | Python | laikaboss/modules/explode_re_sub.py | sandialabs/laikaboss | 3064ac1176911651d61c5176e9bd83eacec36b16 | [
"Apache-2.0"
] | 2 | 2019-11-02T23:40:23.000Z | 2019-12-01T22:24:57.000Z | laikaboss/modules/explode_re_sub.py | sandialabs/laikaboss | 3064ac1176911651d61c5176e9bd83eacec36b16 | [
"Apache-2.0"
] | null | null | null | laikaboss/modules/explode_re_sub.py | sandialabs/laikaboss | 3064ac1176911651d61c5176e9bd83eacec36b16 | [
"Apache-2.0"
] | 3 | 2017-08-09T23:58:40.000Z | 2019-12-01T22:25:06.000Z | # Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import binascii
from laikaboss.si_module import SI_MODULE
from laikaboss.objectmodel import ModuleObject, ExternalVars
import laikaboss.util
class EXPLODE_RE_SUB(SI_MODULE):
'''
module around re.sub
'''
def __init__(self,):
self.module_name = "EXPLODE_RE_SUB"
self.re_pattern = None
def _run(self, scanObject, result, depth, args):
moduleResult = []
buffer = scanObject.buffer
pattern = laikaboss.util.get_option(args, 'pattern', 'resub_pattern', "uhm").encode('utf-8')
replacement = laikaboss.util.get_option(args, 'replacement', 'resub_resplacement', "").encode('utf-8')
pattern_hex = laikaboss.util.get_option(args, 'pattern_hex', 'resub_pattern_hex', "")
if pattern_hex:
pattern = binascii.unhexlify(pattern_hex)
replacement_hex = laikaboss.util.get_option(args, 'replacement_hex', 'resub_replacement_hex', "")
if replacement_hex:
replacement = binascii.unhexlify(replacement_hex)
name = laikaboss.util.get_option(args, 'name', 'resub_name', "resub")
if not self.re_pattern:
self.re_pattern = re.compile(pattern)
newdata = self.re_pattern.sub(replacement, buffer)
moduleResult.append(ModuleObject(buffer=newdata,externalVars=ExternalVars(filename=scanObject.filename + "_" + name)))
return moduleResult
| 36.733333 | 126 | 0.696915 |
import re
import binascii
from laikaboss.si_module import SI_MODULE
from laikaboss.objectmodel import ModuleObject, ExternalVars
import laikaboss.util
class EXPLODE_RE_SUB(SI_MODULE):
def __init__(self,):
self.module_name = "EXPLODE_RE_SUB"
self.re_pattern = None
def _run(self, scanObject, result, depth, args):
moduleResult = []
buffer = scanObject.buffer
pattern = laikaboss.util.get_option(args, 'pattern', 'resub_pattern', "uhm").encode('utf-8')
replacement = laikaboss.util.get_option(args, 'replacement', 'resub_resplacement', "").encode('utf-8')
pattern_hex = laikaboss.util.get_option(args, 'pattern_hex', 'resub_pattern_hex', "")
if pattern_hex:
pattern = binascii.unhexlify(pattern_hex)
replacement_hex = laikaboss.util.get_option(args, 'replacement_hex', 'resub_replacement_hex', "")
if replacement_hex:
replacement = binascii.unhexlify(replacement_hex)
name = laikaboss.util.get_option(args, 'name', 'resub_name', "resub")
if not self.re_pattern:
self.re_pattern = re.compile(pattern)
newdata = self.re_pattern.sub(replacement, buffer)
moduleResult.append(ModuleObject(buffer=newdata,externalVars=ExternalVars(filename=scanObject.filename + "_" + name)))
return moduleResult
| true | true |
f72f8e94a0df815f7d517e2b81ffc86c5c545f07 | 2,893 | py | Python | tensorflow/contrib/autograph/utils/multiple_dispatch_test.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/contrib/autograph/utils/multiple_dispatch_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/contrib/autograph/utils/multiple_dispatch_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multiple_dispatch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.autograph.utils import multiple_dispatch
from tensorflow.python.client.session import Session
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.platform import test
class MultipleDispatchTest(test.TestCase):
def test_dynamic_is_python(self):
a = np.eye(3)
also_a = a
not_actually_a = np.eye(3)
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_dynamic_is_tf(self):
with Session().as_default():
a = constant([2.0])
also_a = a
not_actually_a = constant([2.0])
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_run_cond_python(self):
true_fn = lambda: (2,)
false_fn = lambda: (3,)
self.assertEqual(multiple_dispatch.run_cond(True, true_fn, false_fn), 2)
self.assertEqual(multiple_dispatch.run_cond(False, true_fn, false_fn), 3)
def test_run_cond_tf(self):
true_fn = lambda: (constant(2),)
false_fn = lambda: (constant(3),)
with Session() as sess:
out = multiple_dispatch.run_cond(constant(True), true_fn, false_fn)
self.assertEqual(sess.run(out), 2)
out = multiple_dispatch.run_cond(constant(False), true_fn, false_fn)
self.assertEqual(sess.run(out), 3)
if __name__ == '__main__':
test.main()
| 38.065789 | 80 | 0.733495 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.autograph.utils import multiple_dispatch
from tensorflow.python.client.session import Session
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.platform import test
class MultipleDispatchTest(test.TestCase):
def test_dynamic_is_python(self):
a = np.eye(3)
also_a = a
not_actually_a = np.eye(3)
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_dynamic_is_tf(self):
with Session().as_default():
a = constant([2.0])
also_a = a
not_actually_a = constant([2.0])
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_run_cond_python(self):
true_fn = lambda: (2,)
false_fn = lambda: (3,)
self.assertEqual(multiple_dispatch.run_cond(True, true_fn, false_fn), 2)
self.assertEqual(multiple_dispatch.run_cond(False, true_fn, false_fn), 3)
def test_run_cond_tf(self):
true_fn = lambda: (constant(2),)
false_fn = lambda: (constant(3),)
with Session() as sess:
out = multiple_dispatch.run_cond(constant(True), true_fn, false_fn)
self.assertEqual(sess.run(out), 2)
out = multiple_dispatch.run_cond(constant(False), true_fn, false_fn)
self.assertEqual(sess.run(out), 3)
if __name__ == '__main__':
test.main()
| true | true |
f72f8f3096b8b8f70f02fbf592b95d3864e87c01 | 594 | py | Python | papy/misc.py | ArcturusB/papy | 360e4cc6b5c8473f8a5e8bce3153931f1a54a558 | [
"MIT"
] | 3 | 2022-03-10T08:13:07.000Z | 2022-03-10T08:13:13.000Z | papy/misc.py | ArcturusB/papy | 360e4cc6b5c8473f8a5e8bce3153931f1a54a558 | [
"MIT"
] | null | null | null | papy/misc.py | ArcturusB/papy | 360e4cc6b5c8473f8a5e8bce3153931f1a54a558 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import functools
import time
class Chrono():
def __init__(self, msg=None):
if msg:
print(msg)
self.t0 = time.time()
self.t = self.t0
def lap(self, name=None):
now = time.time()
if name:
print(name, end=': ')
msg = '{:.2g} s (total: {:.2g} s)'
msg = msg.format(now - self.t, now - self.t0)
print(msg)
self.t = now
def cached_property(func):
''' Decorator composition of @property with @functools.lru_cache() '''
return property(functools.lru_cache()(func))
| 24.75 | 74 | 0.552189 |
import functools
import time
class Chrono():
def __init__(self, msg=None):
if msg:
print(msg)
self.t0 = time.time()
self.t = self.t0
def lap(self, name=None):
now = time.time()
if name:
print(name, end=': ')
msg = '{:.2g} s (total: {:.2g} s)'
msg = msg.format(now - self.t, now - self.t0)
print(msg)
self.t = now
def cached_property(func):
return property(functools.lru_cache()(func))
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.