hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71469034f1006ce1d48aedf70acee846deb393a
| 6,692
|
py
|
Python
|
pyelastic/pendulum.py
|
tacox5/elastic_pendulum
|
c2058444ca161a420466b531b008fe247a87db60
|
[
"BSD-2-Clause"
] | null | null | null |
pyelastic/pendulum.py
|
tacox5/elastic_pendulum
|
c2058444ca161a420466b531b008fe247a87db60
|
[
"BSD-2-Clause"
] | 8
|
2021-06-11T15:26:47.000Z
|
2021-07-29T23:52:01.000Z
|
pyelastic/pendulum.py
|
tyler-a-cox/elastic-pendulum
|
c2058444ca161a420466b531b008fe247a87db60
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
from scipy.interpolate import interp1d
from .settings import *
class ElasticPendulum:
"""Class that handles the simulation of springy, double pendulums. This class
handles a number of initial conditions from starting angle to pendulum mass"""
def __init__(self, **kwargs):
"""Animate
Args:
alpha_0 : float
Inital angle of the top pendulum in radians
beta_0 : float
Inital angle of the bottom pendulum in radians
alpha_1 : float, default=True
Inital angular velocity of the top pendulum in radians
beta_1 : float, default=True
Inital angular velocity of the top pendulum in radians
k1 : boolean, default=True
Spring constant of the top pendulum in arbitrary units
k2 : boolean, default=True
Spring constant of the top pendulum in arbitrary units
l1 : boolean, default=True
Length of the top pendulum in arbitrary units
l2 : boolean, default=True
Length of the bottom pendulum in arbitrary units
m1 : float, default=1.0
Mass of the top pendulum in arbitrary units
m2 : float, default=1.0
Mass of the bottom pendulum in arbitrary units
a0 : boolean, default=True
b0 : boolean, default=True
a1 : boolean, default=True
b1 : boolean, default=True
t_end : float, default=2
Length of the simulation in seconds
fps : int, default=24
Frame rate of the video simulation. Sets the resolution of the integrator
and helps to visualize the results later
"""
prop_defaults = {
"alpha_0": np.random.uniform(-np.pi, np.pi),
"beta_0": np.random.uniform(-np.pi, np.pi),
"alpha_1": 0.0,
"beta_1": 0.0,
"k1": np.random.uniform(35, 55),
"k2": np.random.uniform(35, 55),
"l1": 1.0,
"l2": 1.0,
"m1": 1.0,
"m2": 1.0,
"a0": 1.0,
"b0": 1.0,
"a1": 1.0,
"b1": 1.0,
"t_end": 2,
"fps": 24,
"g": GRAVITY,
}
for (prop, default) in prop_defaults.items():
setattr(self, prop, kwargs.get(prop, default))
self.dt = 1.0 / self.fps
self.t_eval = np.arange(0, self.t_end, self.dt)
def _spherical_to_cartesian(self, array, interpolate=True):
"""Transforms from 2D spherical coordinate system to a cartesian coordinate system
Args:
array : np.ndarray
Output array from integration function in spherical coordinates
interpolate : boolean, default=True
Returns:
None
"""
x1 = array[:, 2] * np.sin(array[:, 0])
x2 = x1 + array[:, 3] * np.sin(array[:, 1])
y1 = -array[:, 2] * np.cos(array[:, 0])
y2 = y1 - array[:, 3] * np.cos(array[:, 1])
if interpolate:
self.fx1 = interp1d(np.arange(0, x1.shape[0]), x1)
self.fy1 = interp1d(np.arange(0, x1.shape[0]), y1)
self.fx2 = interp1d(np.arange(0, x1.shape[0]), x2)
self.fy2 = interp1d(np.arange(0, x1.shape[0]), y2)
return x1, x2, y1, y2
def _alpha_pp(self, t, Y):
""" """
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, _ = Y
return -(
self.g * self.m1 * np.sin(alpha_0)
- self.k2 * self.l2 * np.sin(alpha_0 - beta_0)
+ self.k2 * b0 * np.sin(alpha_0 - beta_0)
+ 2 * self.m1 * a1 * alpha_1
) / (self.m1 * a0)
def _beta_pp(self, t, Y):
""" """
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
-self.k1 * self.l1 * np.sin(alpha_0 - beta_0)
+ self.k1 * a0 * np.sin(alpha_0 - beta_0)
- 2.0 * self.m1 * b1 * beta_1
) / (self.m1 * b0)
def _a_pp(self, t, Y):
""" """
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
self.k1 * self.l1
+ self.g * self.m1 * np.cos(alpha_0)
- self.k2 * self.l2 * np.cos(alpha_0 - beta_0)
+ self.k2 * b0 * np.cos(alpha_0 - beta_0)
+ a0 * (-self.k1 + self.m1 * alpha_1 ** 2)
) / self.m1
def _b_pp(self, t, Y):
""" """
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
self.k2 * self.l2 * self.m1
+ self.k2 * self.l2 * self.m2 * np.cos(alpha_0 - beta_0)
+ self.k1 * self.m2 * a0 * np.cos(alpha_0 - beta_0)
- b0 * (self.k2 * (self.m1 + self.m2) - self.m1 * self.m2 * beta_1 ** 2)
) / (self.m1 * self.m2)
def _lagrangian(self, t, Y):
"""Set of differential equations to integrate to solve the equations of motion
for the pendulum masses. Incorporates
Args:
t : np.ndarray
Evaluation time array
Y : np.ndarray
Initial conditions of the pendulum masses
Returns:
list :
Evaluation of the differential equations
"""
return [
Y[1],
self._alpha_pp(t, Y),
Y[3],
self._beta_pp(t, Y),
Y[5],
self._a_pp(t, Y),
Y[7],
self._b_pp(t, Y),
]
def integrate(self, method="LSODA", interpolate=True):
"""Main
Args:
method : str, default=LSODA
Integrator type to integrate the set of differential equations. Options
are: RK45, RK23, DOP853, Radu, BDF, and LSODA. For more information, see
scipy.integrate.solve_ivp documentation
interpolate : boolean, default=True
Whether to interpolate the final results. Useful for animation
Returns:
None
"""
Y0 = [
self.alpha_0,
self.alpha_1,
self.beta_0,
self.beta_1,
self.a0,
self.a1,
self.b0,
self.b1,
]
self.solution = solve_ivp(
self._lagrangian, [0, self.t_end], Y0, t_eval=self.t_eval, method=method
)
self.x1, self.x2, self.y1, self.y2 = self._spherical_to_cartesian(
self.solution.y[[0, 2, 4, 6]].T, interpolate=interpolate
)
| 34.494845
| 90
| 0.51569
|
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
from scipy.interpolate import interp1d
from .settings import *
class ElasticPendulum:
def __init__(self, **kwargs):
prop_defaults = {
"alpha_0": np.random.uniform(-np.pi, np.pi),
"beta_0": np.random.uniform(-np.pi, np.pi),
"alpha_1": 0.0,
"beta_1": 0.0,
"k1": np.random.uniform(35, 55),
"k2": np.random.uniform(35, 55),
"l1": 1.0,
"l2": 1.0,
"m1": 1.0,
"m2": 1.0,
"a0": 1.0,
"b0": 1.0,
"a1": 1.0,
"b1": 1.0,
"t_end": 2,
"fps": 24,
"g": GRAVITY,
}
for (prop, default) in prop_defaults.items():
setattr(self, prop, kwargs.get(prop, default))
self.dt = 1.0 / self.fps
self.t_eval = np.arange(0, self.t_end, self.dt)
def _spherical_to_cartesian(self, array, interpolate=True):
x1 = array[:, 2] * np.sin(array[:, 0])
x2 = x1 + array[:, 3] * np.sin(array[:, 1])
y1 = -array[:, 2] * np.cos(array[:, 0])
y2 = y1 - array[:, 3] * np.cos(array[:, 1])
if interpolate:
self.fx1 = interp1d(np.arange(0, x1.shape[0]), x1)
self.fy1 = interp1d(np.arange(0, x1.shape[0]), y1)
self.fx2 = interp1d(np.arange(0, x1.shape[0]), x2)
self.fy2 = interp1d(np.arange(0, x1.shape[0]), y2)
return x1, x2, y1, y2
def _alpha_pp(self, t, Y):
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, _ = Y
return -(
self.g * self.m1 * np.sin(alpha_0)
- self.k2 * self.l2 * np.sin(alpha_0 - beta_0)
+ self.k2 * b0 * np.sin(alpha_0 - beta_0)
+ 2 * self.m1 * a1 * alpha_1
) / (self.m1 * a0)
def _beta_pp(self, t, Y):
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
-self.k1 * self.l1 * np.sin(alpha_0 - beta_0)
+ self.k1 * a0 * np.sin(alpha_0 - beta_0)
- 2.0 * self.m1 * b1 * beta_1
) / (self.m1 * b0)
def _a_pp(self, t, Y):
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
self.k1 * self.l1
+ self.g * self.m1 * np.cos(alpha_0)
- self.k2 * self.l2 * np.cos(alpha_0 - beta_0)
+ self.k2 * b0 * np.cos(alpha_0 - beta_0)
+ a0 * (-self.k1 + self.m1 * alpha_1 ** 2)
) / self.m1
def _b_pp(self, t, Y):
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
self.k2 * self.l2 * self.m1
+ self.k2 * self.l2 * self.m2 * np.cos(alpha_0 - beta_0)
+ self.k1 * self.m2 * a0 * np.cos(alpha_0 - beta_0)
- b0 * (self.k2 * (self.m1 + self.m2) - self.m1 * self.m2 * beta_1 ** 2)
) / (self.m1 * self.m2)
def _lagrangian(self, t, Y):
return [
Y[1],
self._alpha_pp(t, Y),
Y[3],
self._beta_pp(t, Y),
Y[5],
self._a_pp(t, Y),
Y[7],
self._b_pp(t, Y),
]
def integrate(self, method="LSODA", interpolate=True):
Y0 = [
self.alpha_0,
self.alpha_1,
self.beta_0,
self.beta_1,
self.a0,
self.a1,
self.b0,
self.b1,
]
self.solution = solve_ivp(
self._lagrangian, [0, self.t_end], Y0, t_eval=self.t_eval, method=method
)
self.x1, self.x2, self.y1, self.y2 = self._spherical_to_cartesian(
self.solution.y[[0, 2, 4, 6]].T, interpolate=interpolate
)
| true
| true
|
f71469867882f63d4249d507eeda5f87798b2b79
| 535
|
py
|
Python
|
restful_test.py
|
Corey0606/FlaskSite
|
3c547b6e69a955d281451f18a9db8dde65013bd3
|
[
"CC-BY-3.0"
] | null | null | null |
restful_test.py
|
Corey0606/FlaskSite
|
3c547b6e69a955d281451f18a9db8dde65013bd3
|
[
"CC-BY-3.0"
] | null | null | null |
restful_test.py
|
Corey0606/FlaskSite
|
3c547b6e69a955d281451f18a9db8dde65013bd3
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Time : 2020/12/11 11:57
@Author : Corey
"""
from flask import Flask, request
from flask_restful import Api, Resource, marshal, fields, reqparse
app = Flask(__name__)
# restful接口方法
api = Api(app)
class UserApi(Resource):
def get(self):
return 'get restful api data'
def post(self):
return 'update restful api data'
def delete(self):
return 'delete restful api data '
api.add_resource(UserApi, '/users', endpoint='user')
if __name__ == '__main__':
app.run()
| 19.814815
| 66
| 0.648598
|
from flask import Flask, request
from flask_restful import Api, Resource, marshal, fields, reqparse
app = Flask(__name__)
api = Api(app)
class UserApi(Resource):
def get(self):
return 'get restful api data'
def post(self):
return 'update restful api data'
def delete(self):
return 'delete restful api data '
api.add_resource(UserApi, '/users', endpoint='user')
if __name__ == '__main__':
app.run()
| true
| true
|
f71469a4c65abe3c8976410b9d79a0dd097398f2
| 379
|
py
|
Python
|
dbaas/physical/admin/vip_instance_group.py
|
amintasvrp/database-as-a-service
|
8221df604f9252ddf877cd2216bdf1e3f76220ba
|
[
"BSD-3-Clause"
] | 303
|
2015-01-08T10:35:54.000Z
|
2022-02-28T08:54:06.000Z
|
dbaas/physical/admin/vip_instance_group.py
|
amintasvrp/database-as-a-service
|
8221df604f9252ddf877cd2216bdf1e3f76220ba
|
[
"BSD-3-Clause"
] | 124
|
2015-01-14T12:56:15.000Z
|
2022-03-22T20:45:11.000Z
|
dbaas/physical/admin/vip_instance_group.py
|
amintasvrp/database-as-a-service
|
8221df604f9252ddf877cd2216bdf1e3f76220ba
|
[
"BSD-3-Clause"
] | 110
|
2015-01-02T11:59:48.000Z
|
2022-02-28T08:54:06.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
class VipInstanceGroupAdmin(admin.ModelAdmin):
search_fields = ("name", "identifier",)
list_display = ("name", "identifier", "vip", )
search_fields = ("name", "identifier", "vip__infra__name")
list_filter = ('vip__infra',)
#save_on_top = True
| 31.583333
| 62
| 0.699208
|
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
class VipInstanceGroupAdmin(admin.ModelAdmin):
search_fields = ("name", "identifier",)
list_display = ("name", "identifier", "vip", )
search_fields = ("name", "identifier", "vip__infra__name")
list_filter = ('vip__infra',)
| true
| true
|
f71469ad5f5395756c71d0408eb3aba396919d37
| 3,078
|
py
|
Python
|
denorm/join_defer.py
|
rivethealth/denorm
|
c9b9070730e3cc7fbe78927d34db7ffa384aed42
|
[
"MIT"
] | 11
|
2021-03-29T14:27:48.000Z
|
2022-01-01T00:31:40.000Z
|
denorm/join_defer.py
|
rivethealth/denorm
|
c9b9070730e3cc7fbe78927d34db7ffa384aed42
|
[
"MIT"
] | null | null | null |
denorm/join_defer.py
|
rivethealth/denorm
|
c9b9070730e3cc7fbe78927d34db7ffa384aed42
|
[
"MIT"
] | null | null | null |
import typing
from pg_sql import SqlId, SqlNumber, SqlObject, SqlString, sql_list
from .format import format
from .join_common import JoinTarget, Key, Structure
from .join_key import KeyConsumer, TargetRefresh
from .sql import SqlTableExpr
from .sql_query import sync_query, upsert_query
from .string import indent
def create_refresh_function(
id: str,
structure: Structure,
refresh: TargetRefresh,
):
refresh_function = structure.refresh_function()
refresh_table = structure.refresh_table()
key_table = structure.key_table()
refresh_sql = refresh.sql(f"TABLE {key_table}", None)
yield f"""
CREATE FUNCTION {refresh_function} () RETURNS trigger
LANGUAGE plpgsql AS $$
BEGIN
-- analyze
ANALYZE {refresh_table};
-- refresh
{indent(str(refresh_sql), 2)}
-- clear refresh
DELETE FROM {refresh_table};
RETURN NULL;
END;
$$
""".strip()
yield f"""
COMMENT ON FUNCTION {refresh_function} IS {SqlString(f'Refresh {id}')}
""".strip()
def create_setup_function(
structure: Structure,
id: str,
key: Key,
target: JoinTarget,
):
key_table = structure.key_table()
refresh_constraint = structure.refresh_constraint()
refresh_function = structure.refresh_function()
refresh_table = structure.refresh_table()
setup_function = structure.setup_function()
yield f"""
CREATE FUNCTION {setup_function} () RETURNS void
LANGUAGE plpgsql AS $$
BEGIN
IF to_regclass({SqlString(str(refresh_table))}) IS NOT NULL THEN
RETURN;
END IF;
CREATE TEMP TABLE {key_table}
ON COMMIT DELETE ROWS
AS {key.definition}
WITH NO DATA;
ALTER TABLE {key_table}
ADD PRIMARY KEY ({sql_list([SqlId(name) for name in key.names])});
CREATE TEMP TABLE {refresh_table} (
) ON COMMIT DELETE ROWS;
CREATE CONSTRAINT TRIGGER {refresh_constraint} AFTER INSERT ON {refresh_table}
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW EXECUTE PROCEDURE {refresh_function}();
END
$$
""".strip()
yield f"""
COMMENT ON FUNCTION {setup_function} IS {SqlString(f"Set up temp tables for {id}")}
""".strip()
class DeferredKeys(KeyConsumer):
def __init__(self, key: typing.List[str], structure: Structure):
self._key = key
self._structure = structure
def sql(
self,
key_query: str,
table_id: str,
exprs: typing.List[SqlTableExpr] = [],
last_expr: typing.Optional[str] = None,
):
setup_function = self._structure.setup_function()
refresh_table = self._structure.refresh_table()
query = upsert_query(
columns=self._key,
key=self._key,
query=key_query,
target=self._structure.key_table(),
)
for expr in reversed(exprs):
query.prepend(expr)
if last_expr is not None:
query.append(SqlId("_other"), last_expr)
return f"""
PERFORM {setup_function}();
{query};
INSERT INTO {refresh_table}
SELECT
WHERE NOT EXISTS (TABLE {refresh_table});
""".strip()
| 24.428571
| 83
| 0.668291
|
import typing
from pg_sql import SqlId, SqlNumber, SqlObject, SqlString, sql_list
from .format import format
from .join_common import JoinTarget, Key, Structure
from .join_key import KeyConsumer, TargetRefresh
from .sql import SqlTableExpr
from .sql_query import sync_query, upsert_query
from .string import indent
def create_refresh_function(
id: str,
structure: Structure,
refresh: TargetRefresh,
):
refresh_function = structure.refresh_function()
refresh_table = structure.refresh_table()
key_table = structure.key_table()
refresh_sql = refresh.sql(f"TABLE {key_table}", None)
yield f"""
CREATE FUNCTION {refresh_function} () RETURNS trigger
LANGUAGE plpgsql AS $$
BEGIN
-- analyze
ANALYZE {refresh_table};
-- refresh
{indent(str(refresh_sql), 2)}
-- clear refresh
DELETE FROM {refresh_table};
RETURN NULL;
END;
$$
""".strip()
yield f"""
COMMENT ON FUNCTION {refresh_function} IS {SqlString(f'Refresh {id}')}
""".strip()
def create_setup_function(
structure: Structure,
id: str,
key: Key,
target: JoinTarget,
):
key_table = structure.key_table()
refresh_constraint = structure.refresh_constraint()
refresh_function = structure.refresh_function()
refresh_table = structure.refresh_table()
setup_function = structure.setup_function()
yield f"""
CREATE FUNCTION {setup_function} () RETURNS void
LANGUAGE plpgsql AS $$
BEGIN
IF to_regclass({SqlString(str(refresh_table))}) IS NOT NULL THEN
RETURN;
END IF;
CREATE TEMP TABLE {key_table}
ON COMMIT DELETE ROWS
AS {key.definition}
WITH NO DATA;
ALTER TABLE {key_table}
ADD PRIMARY KEY ({sql_list([SqlId(name) for name in key.names])});
CREATE TEMP TABLE {refresh_table} (
) ON COMMIT DELETE ROWS;
CREATE CONSTRAINT TRIGGER {refresh_constraint} AFTER INSERT ON {refresh_table}
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW EXECUTE PROCEDURE {refresh_function}();
END
$$
""".strip()
yield f"""
COMMENT ON FUNCTION {setup_function} IS {SqlString(f"Set up temp tables for {id}")}
""".strip()
class DeferredKeys(KeyConsumer):
def __init__(self, key: typing.List[str], structure: Structure):
self._key = key
self._structure = structure
def sql(
self,
key_query: str,
table_id: str,
exprs: typing.List[SqlTableExpr] = [],
last_expr: typing.Optional[str] = None,
):
setup_function = self._structure.setup_function()
refresh_table = self._structure.refresh_table()
query = upsert_query(
columns=self._key,
key=self._key,
query=key_query,
target=self._structure.key_table(),
)
for expr in reversed(exprs):
query.prepend(expr)
if last_expr is not None:
query.append(SqlId("_other"), last_expr)
return f"""
PERFORM {setup_function}();
{query};
INSERT INTO {refresh_table}
SELECT
WHERE NOT EXISTS (TABLE {refresh_table});
""".strip()
| true
| true
|
f7146a3b8bd5c67332e642ddcaf6c0846b506b4b
| 1,568
|
py
|
Python
|
harstorage/config/environment.py
|
beenanner/harstorage
|
c45e735d9d28cb951e70d0c783d5678996ef31ad
|
[
"Apache-2.0"
] | null | null | null |
harstorage/config/environment.py
|
beenanner/harstorage
|
c45e735d9d28cb951e70d0c783d5678996ef31ad
|
[
"Apache-2.0"
] | null | null | null |
harstorage/config/environment.py
|
beenanner/harstorage
|
c45e735d9d28cb951e70d0c783d5678996ef31ad
|
[
"Apache-2.0"
] | null | null | null |
import os
from mako.lookup import TemplateLookup
from pylons.configuration import PylonsConfig
from pylons.error import handle_mako_error
import harstorage.lib.app_globals as app_globals
import harstorage.lib.helpers
from harstorage.config.routing import make_map
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config`` object"""
config = PylonsConfig()
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, "controllers"),
static_files=os.path.join(root, "public"),
templates=[os.path.join(root, "templates")])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package="harstorage", paths=paths)
config["routes.map"] = make_map(config)
config["pylons.app_globals"] = app_globals.Globals(config)
config["pylons.h"] = harstorage.lib.helpers
# Setup cache object as early as possible
import pylons
pylons.cache._push_object(config["pylons.app_globals"].cache)
# Create the Mako TemplateLookup, with the default auto-escaping
config["pylons.app_globals"].mako_lookup = TemplateLookup(
directories=paths["templates"],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf["cache_dir"], "templates"),
input_encoding="utf-8",
default_filters=["escape"],
imports=["from webhelpers.html import escape"])
return config
| 34.844444
| 77
| 0.711097
|
import os
from mako.lookup import TemplateLookup
from pylons.configuration import PylonsConfig
from pylons.error import handle_mako_error
import harstorage.lib.app_globals as app_globals
import harstorage.lib.helpers
from harstorage.config.routing import make_map
def load_environment(global_conf, app_conf):
config = PylonsConfig()
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, "controllers"),
static_files=os.path.join(root, "public"),
templates=[os.path.join(root, "templates")])
config.init_app(global_conf, app_conf, package="harstorage", paths=paths)
config["routes.map"] = make_map(config)
config["pylons.app_globals"] = app_globals.Globals(config)
config["pylons.h"] = harstorage.lib.helpers
import pylons
pylons.cache._push_object(config["pylons.app_globals"].cache)
config["pylons.app_globals"].mako_lookup = TemplateLookup(
directories=paths["templates"],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf["cache_dir"], "templates"),
input_encoding="utf-8",
default_filters=["escape"],
imports=["from webhelpers.html import escape"])
return config
| true
| true
|
f7146ac1b88ddd7645aeea7aa509f16d3d4bf02a
| 553
|
py
|
Python
|
code/exampleStrats/forgivingGrimTrigger.py
|
Texashokies/PrisonersDilemmaTournament
|
096c131bc774cc5efd29a3b723f9f73ab5a874be
|
[
"MIT"
] | null | null | null |
code/exampleStrats/forgivingGrimTrigger.py
|
Texashokies/PrisonersDilemmaTournament
|
096c131bc774cc5efd29a3b723f9f73ab5a874be
|
[
"MIT"
] | null | null | null |
code/exampleStrats/forgivingGrimTrigger.py
|
Texashokies/PrisonersDilemmaTournament
|
096c131bc774cc5efd29a3b723f9f73ab5a874be
|
[
"MIT"
] | null | null | null |
# Strategy known as "Forrgviing Grim Trigger" or "Grudger".
# We will cooperate repeatedly until our opponent betrays us twice.
# Then, we will get angry and defect for the rest of time.
# Memory is the number of times the strategy has been wronged
def strategy(history, memory):
wronged = memory
if history.shape[1] ==0:
wronged = 0
if history.shape[1] >= 1 and history[1,-1] == 0: # Just got wronged.
wronged += 1
if wronged >= 2:
return 0, wronged
else:
return 1, wronged
| 32.529412
| 73
| 0.625678
|
def strategy(history, memory):
wronged = memory
if history.shape[1] ==0:
wronged = 0
if history.shape[1] >= 1 and history[1,-1] == 0:
wronged += 1
if wronged >= 2:
return 0, wronged
else:
return 1, wronged
| true
| true
|
f7146c422cdc8ea2780342f0d121cce5a78ee0fb
| 516
|
py
|
Python
|
src/apps/datasets/views.py
|
binfeng1018/competitions-v2
|
173ea6053b7eda5de3a9f1a687dfb0d43bfc4e9c
|
[
"Apache-2.0"
] | 19
|
2018-07-27T19:14:10.000Z
|
2021-12-08T16:34:42.000Z
|
src/apps/datasets/views.py
|
binfeng1018/competitions-v2
|
173ea6053b7eda5de3a9f1a687dfb0d43bfc4e9c
|
[
"Apache-2.0"
] | 516
|
2017-07-27T15:45:43.000Z
|
2022-02-10T07:57:46.000Z
|
src/apps/datasets/views.py
|
binfeng1018/competitions-v2
|
173ea6053b7eda5de3a9f1a687dfb0d43bfc4e9c
|
[
"Apache-2.0"
] | 16
|
2018-01-01T19:07:01.000Z
|
2021-09-17T07:59:59.000Z
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from datasets.models import Data
from utils.data import make_url_sassy
class DataManagement(LoginRequiredMixin, TemplateView):
template_name = 'datasets/management.html'
def download(request, key):
data = get_object_or_404(Data, key=key)
return HttpResponseRedirect(make_url_sassy(data.data_file.name))
| 30.352941
| 68
| 0.827519
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from datasets.models import Data
from utils.data import make_url_sassy
class DataManagement(LoginRequiredMixin, TemplateView):
template_name = 'datasets/management.html'
def download(request, key):
data = get_object_or_404(Data, key=key)
return HttpResponseRedirect(make_url_sassy(data.data_file.name))
| true
| true
|
f7146d634f0c817722831bea4a10f95b951b6a3b
| 310
|
py
|
Python
|
file_and_io/tell.py
|
daiyadong/python2_learn
|
6930ddc3dd2edfc31064b1a8001ad826a775e912
|
[
"Apache-2.0"
] | null | null | null |
file_and_io/tell.py
|
daiyadong/python2_learn
|
6930ddc3dd2edfc31064b1a8001ad826a775e912
|
[
"Apache-2.0"
] | null | null | null |
file_and_io/tell.py
|
daiyadong/python2_learn
|
6930ddc3dd2edfc31064b1a8001ad826a775e912
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# 打开一个文件
fo = open("foo.txt", "r+")
str = fo.read(10)
print "读取的字符串是 : ", str
# 查找当前位置
position = fo.tell()
print "当前文件位置 : ", position
# 把指针再次重新定位到文件开头
position = fo.seek(0, 0)
str = fo.read(10)
print "重新读取字符串 : ", str
# 关闭打开的文件
fo.close()
| 17.222222
| 28
| 0.580645
|
fo = open("foo.txt", "r+")
str = fo.read(10)
print "读取的字符串是 : ", str
position = fo.tell()
print "当前文件位置 : ", position
position = fo.seek(0, 0)
str = fo.read(10)
print "重新读取字符串 : ", str
fo.close()
| false
| true
|
f7146d93278ba64bba1fb20d47d61c403f8494f0
| 32,956
|
py
|
Python
|
qa/rpc-tests/fundrawtransaction.py
|
Neslin247/Draupnir
|
1ffd83f4d96be293a6bceb5620d6daf7cb892e42
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/fundrawtransaction.py
|
Neslin247/Draupnir
|
1ffd83f4d96be293a6bceb5620d6daf7cb892e42
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/fundrawtransaction.py
|
Neslin247/Draupnir
|
1ffd83f4d96be293a6bceb5620d6daf7cb892e42
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():500000})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
# Draupnir: Fee is exact, do not use tolerance
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-5, "changeAddress must be a valid draupnir address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
# Draupnir: Reduce this output so the fee doesn't leave us with no change
outputs = { self.nodes[0].getnewaddress() : Decimal(25) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
utx2 = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
utx2 = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():110,self.nodes[1].getnewaddress():120,self.nodes[1].getnewaddress():10,self.nodes[1].getnewaddress():130,self.nodes[1].getnewaddress():20,self.nodes[1].getnewaddress():30}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 2)
stop_node(self.nodes[2], 3)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 12)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('500011.00000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
# Draupnir: TX size rounding gives us a fee of 4 RINGS
outputs = {self.nodes[0].getnewaddress():15,self.nodes[0].getnewaddress():4}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 2)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():15,self.nodes[0].getnewaddress():4}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500019.00000000'), self.nodes[0].getbalance()) #19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].generate(1)
self.sync_all()
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / round_tx_size(count_bytes(result['hex']))
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
#############################
# Test address reuse option #
#############################
result3 = self.nodes[3].fundrawtransaction(rawtx, {"reserveChangeKey": False})
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# frt should not have removed the key from the keypool
assert(changeaddress == nextaddr)
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 10}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (10, 11, 12, 13)}
keys = list(outputs.keys())
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 44.176944
| 220
| 0.571155
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():500000})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.nodes[0].generate(1)
self.sync_all()
ransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
| true
| true
|
f7146e28902a4ba376e0f04a00b98ce31bf4575b
| 28,745
|
py
|
Python
|
ansible/roles/search_head/files/timestamp_app/lib/splunklib/searchcommands/internals.py
|
umbrio/attack_range
|
8bf1bbe5f6db051d3c8cf5d3f3c07cc38ca85bf6
|
[
"Apache-2.0"
] | 12
|
2017-04-04T11:51:09.000Z
|
2021-11-05T02:07:58.000Z
|
ansible/roles/search_head/files/timestamp_app/lib/splunklib/searchcommands/internals.py
|
umbrio/attack_range
|
8bf1bbe5f6db051d3c8cf5d3f3c07cc38ca85bf6
|
[
"Apache-2.0"
] | 21
|
2017-03-28T04:32:54.000Z
|
2021-09-01T03:52:53.000Z
|
ansible/roles/search_head/files/timestamp_app/lib/splunklib/searchcommands/internals.py
|
umbrio/attack_range
|
8bf1bbe5f6db051d3c8cf5d3f3c07cc38ca85bf6
|
[
"Apache-2.0"
] | 9
|
2018-07-31T04:15:22.000Z
|
2020-10-06T13:43:22.000Z
|
# coding=utf-8
#
# Copyright © 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
from io import TextIOWrapper
from collections import deque, namedtuple
from splunklib import six
try:
from collections import OrderedDict # must be python 2.7
except ImportError:
from ..ordereddict import OrderedDict
from splunklib.six.moves import StringIO
from itertools import chain
from splunklib.six.moves import map as imap
from json import JSONDecoder, JSONEncoder
from json.encoder import encode_basestring_ascii as json_encode_string
from splunklib.six.moves import urllib
import csv
import gzip
import os
import re
import sys
import warnings
from . import environment
csv.field_size_limit(10485760) # The default value is 128KB; upping to 10MB. See SPL-12117 for background on this issue
def set_binary_mode(fh):
""" Helper method to set up binary mode for file handles.
Emphasis being sys.stdin, sys.stdout, sys.stderr.
For python3, we want to return .buffer
For python2+windows we want to set os.O_BINARY
"""
typefile = TextIOWrapper if sys.version_info >= (3, 0) else file
# check for file handle
if not isinstance(fh, typefile):
return fh
# check for python3 and buffer
if sys.version_info >= (3, 0) and hasattr(fh, 'buffer'):
return fh.buffer
# check for python3
elif sys.version_info >= (3, 0):
pass
# check for windows python2. SPL-175233 -- python3 stdout is already binary
elif sys.platform == 'win32':
# Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
# binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
# all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
from platform import python_implementation
implementation = python_implementation()
if implementation == 'PyPy':
return os.fdopen(fh.fileno(), 'wb', 0)
else:
import msvcrt
msvcrt.setmode(fh.fileno(), os.O_BINARY)
return fh
class CommandLineParser(object):
r""" Parses the arguments to a search command.
A search command line is described by the following syntax.
**Syntax**::
command = command-name *[wsp option] *[wsp [dquote] field-name [dquote]]
command-name = alpha *( alpha / digit )
option = option-name [wsp] "=" [wsp] option-value
option-name = alpha *( alpha / digit / "_" )
option-value = word / quoted-string
word = 1*( %01-%08 / %0B / %0C / %0E-1F / %21 / %23-%FF ) ; Any character but DQUOTE and WSP
quoted-string = dquote *( word / wsp / "\" dquote / dquote dquote ) dquote
field-name = ( "_" / alpha ) *( alpha / digit / "_" / "." / "-" )
**Note:**
This syntax is constrained to an 8-bit character set.
**Note:**
This syntax does not show that `field-name` values may be comma-separated when in fact they can be. This is
because Splunk strips commas from the command line. A custom search command will never see them.
**Example:**
countmatches fieldname = word_count pattern = \w+ some_text_field
Option names are mapped to properties in the targeted ``SearchCommand``. It is the responsibility of the property
setters to validate the values they receive. Property setters may also produce side effects. For example,
setting the built-in `log_level` immediately changes the `log_level`.
"""
@classmethod
def parse(cls, command, argv):
""" Splits an argument list into an options dictionary and a fieldname
list.
The argument list, `argv`, must be of the form::
*[option]... *[<field-name>]
Options are validated and assigned to items in `command.options`. Field names are validated and stored in the
list of `command.fieldnames`.
#Arguments:
:param command: Search command instance.
:type command: ``SearchCommand``
:param argv: List of search command arguments.
:type argv: ``list``
:return: ``None``
#Exceptions:
``SyntaxError``: Argument list is incorrectly formed.
``ValueError``: Unrecognized option/field name, or an illegal field value.
"""
debug = environment.splunklib_logger.debug
command_class = type(command).__name__
# Prepare
debug('Parsing %s command line: %r', command_class, argv)
command.fieldnames = None
command.options.reset()
argv = ' '.join(argv)
command_args = cls._arguments_re.match(argv)
if command_args is None:
raise SyntaxError('Syntax error: {}'.format(argv))
# Parse options
for option in cls._options_re.finditer(command_args.group('options')):
name, value = option.group('name'), option.group('value')
if name not in command.options:
raise ValueError(
'Unrecognized {} command option: {}={}'.format(command.name, name, json_encode_string(value)))
command.options[name].value = cls.unquote(value)
missing = command.options.get_missing()
if missing is not None:
if len(missing) > 1:
raise ValueError(
'Values for these {} command options are required: {}'.format(command.name, ', '.join(missing)))
raise ValueError('A value for {} command option {} is required'.format(command.name, missing[0]))
# Parse field names
fieldnames = command_args.group('fieldnames')
if fieldnames is None:
command.fieldnames = []
else:
command.fieldnames = [cls.unquote(value.group(0)) for value in cls._fieldnames_re.finditer(fieldnames)]
debug(' %s: %s', command_class, command)
@classmethod
def unquote(cls, string):
""" Removes quotes from a quoted string.
Splunk search command quote rules are applied. The enclosing double-quotes, if present, are removed. Escaped
double-quotes ('\"' or '""') are replaced by a single double-quote ('"').
**NOTE**
We are not using a json.JSONDecoder because Splunk quote rules are different than JSON quote rules. A
json.JSONDecoder does not recognize a pair of double-quotes ('""') as an escaped quote ('"') and will
decode single-quoted strings ("'") in addition to double-quoted ('"') strings.
"""
if len(string) == 0:
return ''
if string[0] == '"':
if len(string) == 1 or string[-1] != '"':
raise SyntaxError('Poorly formed string literal: ' + string)
string = string[1:-1]
if len(string) == 0:
return ''
def replace(match):
value = match.group(0)
if value == '""':
return '"'
if len(value) < 2:
raise SyntaxError('Poorly formed string literal: ' + string)
return value[1]
result = re.sub(cls._escaped_character_re, replace, string)
return result
# region Class variables
_arguments_re = re.compile(r"""
^\s*
(?P<options> # Match a leading set of name/value pairs
(?:
(?:(?=\w)[^\d]\w*) # name
\s*=\s* # =
(?:"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+)\s* # value
)*
)\s*
(?P<fieldnames> # Match a trailing set of field names
(?:
(?:"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+)\s*
)*
)\s*$
""", re.VERBOSE | re.UNICODE)
_escaped_character_re = re.compile(r'(\\.|""|[\\"])')
_fieldnames_re = re.compile(r"""("(?:\\.|""|[^"])+"|(?:\\.|[^\s"])+)""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
(?P<name>(?:(?=\w)[^\d]\w*)) # name
\s*=\s* # =
(?P<value>"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+) # value
""", re.VERBOSE | re.UNICODE)
# endregion
class ConfigurationSettingsType(type):
""" Metaclass for constructing ConfigurationSettings classes.
Instances of :class:`ConfigurationSettingsType` construct :class:`ConfigurationSettings` classes from classes from
a base :class:`ConfigurationSettings` class and a dictionary of configuration settings. The settings in the
dictionary are validated against the settings in the base class. You cannot add settings, you can only change their
backing-field values and you cannot modify settings without backing-field values. These are considered fixed
configuration setting values.
This is an internal class used in two places:
+ :meth:`decorators.Configuration.__call__`
Adds a ConfigurationSettings attribute to a :class:`SearchCommand` class.
+ :meth:`reporting_command.ReportingCommand.fix_up`
Adds a ConfigurationSettings attribute to a :meth:`ReportingCommand.map` method, if there is one.
"""
def __new__(mcs, module, name, bases):
mcs = super(ConfigurationSettingsType, mcs).__new__(mcs, str(name), bases, {})
return mcs
def __init__(cls, module, name, bases):
super(ConfigurationSettingsType, cls).__init__(name, bases, None)
cls.__module__ = module
@staticmethod
def validate_configuration_setting(specification, name, value):
if not isinstance(value, specification.type):
if isinstance(specification.type, type):
type_names = specification.type.__name__
else:
type_names = ', '.join(imap(lambda t: t.__name__, specification.type))
raise ValueError('Expected {} value, not {}={}'.format(type_names, name, repr(value)))
if specification.constraint and not specification.constraint(value):
raise ValueError('Illegal value: {}={}'.format(name, repr(value)))
return value
specification = namedtuple(
'ConfigurationSettingSpecification', (
'type',
'constraint',
'supporting_protocols'))
# P1 [ ] TODO: Review ConfigurationSettingsType.specification_matrix for completeness and correctness
specification_matrix = {
'clear_required_fields': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'distributed': specification(
type=bool,
constraint=None,
supporting_protocols=[2]),
'generates_timeorder': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'generating': specification(
type=bool,
constraint=None,
supporting_protocols=[1, 2]),
'local': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'maxinputs': specification(
type=int,
constraint=lambda value: 0 <= value <= six.MAXSIZE,
supporting_protocols=[2]),
'overrides_timeorder': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'required_fields': specification(
type=(list, set, tuple),
constraint=None,
supporting_protocols=[1, 2]),
'requires_preop': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'retainsevents': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'run_in_preview': specification(
type=bool,
constraint=None,
supporting_protocols=[2]),
'streaming': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'streaming_preop': specification(
type=(bytes, six.text_type),
constraint=None,
supporting_protocols=[1, 2]),
'type': specification(
type=(bytes, six.text_type),
constraint=lambda value: value in ('events', 'reporting', 'streaming'),
supporting_protocols=[2])}
class CsvDialect(csv.Dialect):
""" Describes the properties of Splunk CSV streams """
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
if sys.version_info >= (3, 0) and sys.platform == 'win32':
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
class InputHeader(dict):
""" Represents a Splunk input header as a collection of name/value pairs.
"""
def __str__(self):
return '\n'.join([name + ':' + value for name, value in six.iteritems(self)])
def read(self, ifile):
""" Reads an input header from an input file.
The input header is read as a sequence of *<name>***:***<value>* pairs separated by a newline. The end of the
input header is signalled by an empty line or an end-of-file.
:param ifile: File-like object that supports iteration over lines.
"""
name, value = None, None
for line in ifile:
if line == '\n':
break
item = line.split(':', 1)
if len(item) == 2:
# start of a new item
if name is not None:
self[name] = value[:-1] # value sans trailing newline
name, value = item[0], urllib.parse.unquote(item[1])
elif name is not None:
# continuation of the current item
value += urllib.parse.unquote(line)
if name is not None:
self[name] = value[:-1] if value[-1] == '\n' else value
Message = namedtuple('Message', ('type', 'text'))
class MetadataDecoder(JSONDecoder):
def __init__(self):
JSONDecoder.__init__(self, object_hook=self._object_hook)
@staticmethod
def _object_hook(dictionary):
object_view = ObjectView(dictionary)
stack = deque()
stack.append((None, None, dictionary))
while len(stack):
instance, member_name, dictionary = stack.popleft()
for name, value in six.iteritems(dictionary):
if isinstance(value, dict):
stack.append((dictionary, name, value))
if instance is not None:
instance[member_name] = ObjectView(dictionary)
return object_view
class MetadataEncoder(JSONEncoder):
def __init__(self):
JSONEncoder.__init__(self, separators=MetadataEncoder._separators)
def default(self, o):
return o.__dict__ if isinstance(o, ObjectView) else JSONEncoder.default(self, o)
_separators = (',', ':')
class ObjectView(object):
def __init__(self, dictionary):
self.__dict__ = dictionary
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return str(self.__dict__)
class Recorder(object):
def __init__(self, path, f):
self._recording = gzip.open(path + '.gz', 'wb')
self._file = f
def __getattr__(self, name):
return getattr(self._file, name)
def __iter__(self):
for line in self._file:
self._recording.write(line)
self._recording.flush()
yield line
def read(self, size=None):
value = self._file.read() if size is None else self._file.read(size)
self._recording.write(value)
self._recording.flush()
return value
def readline(self, size=None):
value = self._file.readline() if size is None else self._file.readline(size)
if len(value) > 0:
self._recording.write(value)
self._recording.flush()
return value
def record(self, *args):
for arg in args:
self._recording.write(arg)
def write(self, text):
self._recording.write(text)
self._file.write(text)
self._recording.flush()
class RecordWriter(object):
def __init__(self, ofile, maxresultrows=None):
self._maxresultrows = 50000 if maxresultrows is None else maxresultrows
self._ofile = set_binary_mode(ofile)
self._fieldnames = None
self._buffer = StringIO()
self._writer = csv.writer(self._buffer, dialect=CsvDialect)
self._writerow = self._writer.writerow
self._finished = False
self._flushed = False
self._inspector = OrderedDict()
self._chunk_count = 0
self._pending_record_count = 0
self._committed_record_count = 0
@property
def is_flushed(self):
return self._flushed
@is_flushed.setter
def is_flushed(self, value):
self._flushed = True if value else False
@property
def ofile(self):
return self._ofile
@ofile.setter
def ofile(self, value):
self._ofile = set_binary_mode(value)
@property
def pending_record_count(self):
return self._pending_record_count
@property
def _record_count(self):
warnings.warn(
"_record_count will be deprecated soon. Use pending_record_count instead.",
PendingDeprecationWarning
)
return self.pending_record_count
@property
def committed_record_count(self):
return self._committed_record_count
@property
def _total_record_count(self):
warnings.warn(
"_total_record_count will be deprecated soon. Use committed_record_count instead.",
PendingDeprecationWarning
)
return self.committed_record_count
def write(self, data):
bytes_type = bytes if sys.version_info >= (3, 0) else str
if not isinstance(data, bytes_type):
data = data.encode('utf-8')
self.ofile.write(data)
def flush(self, finished=None, partial=None):
assert finished is None or isinstance(finished, bool)
assert partial is None or isinstance(partial, bool)
assert not (finished is None and partial is None)
assert finished is None or partial is None
self._ensure_validity()
def write_message(self, message_type, message_text, *args, **kwargs):
self._ensure_validity()
self._inspector.setdefault('messages', []).append((message_type, message_text.format(*args, **kwargs)))
def write_record(self, record):
self._ensure_validity()
self._write_record(record)
def write_records(self, records):
self._ensure_validity()
write_record = self._write_record
for record in records:
write_record(record)
def _clear(self):
self._buffer.seek(0)
self._buffer.truncate()
self._inspector.clear()
self._pending_record_count = 0
def _ensure_validity(self):
if self._finished is True:
assert self._record_count == 0 and len(self._inspector) == 0
raise RuntimeError('I/O operation on closed record writer')
def _write_record(self, record):
fieldnames = self._fieldnames
if fieldnames is None:
self._fieldnames = fieldnames = list(record.keys())
value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
get_value = record.get
values = []
for fieldname in fieldnames:
value = get_value(fieldname, None)
if value is None:
values += (None, None)
continue
value_t = type(value)
if issubclass(value_t, (list, tuple)):
if len(value) == 0:
values += (None, None)
continue
if len(value) > 1:
value_list = value
sv = ''
mv = '$'
for value in value_list:
if value is None:
sv += '\n'
mv += '$;$'
continue
value_t = type(value)
if value_t is not bytes:
if value_t is bool:
value = str(value.real)
elif value_t is six.text_type:
value = value
elif isinstance(value, six.integer_types) or value_t is float or value_t is complex:
value = str(value)
elif issubclass(value_t, (dict, list, tuple)):
value = str(''.join(RecordWriter._iterencode_json(value, 0)))
else:
value = repr(value).encode('utf-8', errors='backslashreplace')
sv += value + '\n'
mv += value.replace('$', '$$') + '$;$'
values += (sv[:-1], mv[:-2])
continue
value = value[0]
value_t = type(value)
if value_t is bool:
values += (str(value.real), None)
continue
if value_t is bytes:
values += (value, None)
continue
if value_t is six.text_type:
if six.PY2:
value = value.encode('utf-8')
values += (value, None)
continue
if isinstance(value, six.integer_types) or value_t is float or value_t is complex:
values += (str(value), None)
continue
if issubclass(value_t, dict):
values += (str(''.join(RecordWriter._iterencode_json(value, 0))), None)
continue
values += (repr(value), None)
self._writerow(values)
self._pending_record_count += 1
if self.pending_record_count >= self._maxresultrows:
self.flush(partial=True)
try:
# noinspection PyUnresolvedReferences
from _json import make_encoder
except ImportError:
# We may be running under PyPy 2.5 which does not include the _json module
_iterencode_json = JSONEncoder(separators=(',', ':')).iterencode
else:
# Creating _iterencode_json this way yields a two-fold performance improvement on Python 2.7.9 and 2.7.10
from json.encoder import encode_basestring_ascii
@staticmethod
def _default(o):
raise TypeError(repr(o) + ' is not JSON serializable')
_iterencode_json = make_encoder(
{}, # markers (for detecting circular references)
_default, # object_encoder
encode_basestring_ascii, # string_encoder
None, # indent
':', ',', # separators
False, # sort_keys
False, # skip_keys
True # allow_nan
)
del make_encoder
class RecordWriterV1(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
if self.pending_record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
messages = self._inspector.get('messages')
if self._chunk_count == 0:
# Messages are written to the messages header when we write the first chunk of data
# Guarantee: These messages are displayed by splunkweb and the job inspector
if messages is not None:
message_level = RecordWriterV1._message_level.get
for level, text in messages:
self.write(message_level(level, level))
self.write('=')
self.write(text)
self.write('\r\n')
self.write('\r\n')
elif messages is not None:
# Messages are written to the messages header when we write subsequent chunks of data
# Guarantee: These messages are displayed by splunkweb and the job inspector, if and only if the
# command is configured with
#
# stderr_dest = message
#
# stderr_dest is a static configuration setting. This means that it can only be set in commands.conf.
# It cannot be set in code.
stderr = sys.stderr
for level, text in messages:
print(level, text, file=stderr)
self.write(self._buffer.getvalue())
self._chunk_count += 1
self._committed_record_count += self.pending_record_count
self._clear()
self._finished = finished is True
_message_level = {
'DEBUG': 'debug_message',
'ERROR': 'error_message',
'FATAL': 'error_message',
'INFO': 'info_message',
'WARN': 'warn_message'
}
class RecordWriterV2(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
if partial or not finished:
# Don't flush partial chunks, since the SCP v2 protocol does not
# provide a way to send partial chunks yet.
return
if not self.is_flushed:
self.write_chunk(finished=True)
def write_chunk(self, finished=None):
inspector = self._inspector
self._committed_record_count += self.pending_record_count
self._chunk_count += 1
# TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
# ChunkedExternProcessor (See SPL-103525)
#
# We will need to replace the following block of code with this block:
#
# metadata = [item for item in (('inspector', inspector), ('finished', finished), ('partial', partial))]
#
# if partial is True:
# finished = False
if len(inspector) == 0:
inspector = None
metadata = [item for item in (('inspector', inspector), ('finished', finished))]
self._write_chunk(metadata, self._buffer.getvalue())
self._clear()
def write_metadata(self, configuration):
self._ensure_validity()
metadata = chain(six.iteritems(configuration), (('inspector', self._inspector if self._inspector else None),))
self._write_chunk(metadata, '')
self.write('\n')
self._clear()
def write_metric(self, name, value):
self._ensure_validity()
self._inspector['metric.' + name] = value
def _clear(self):
super(RecordWriterV2, self)._clear()
self._fieldnames = None
def _write_chunk(self, metadata, body):
if metadata:
metadata = str(''.join(self._iterencode_json(dict([(n, v) for n, v in metadata if v is not None]), 0)))
if sys.version_info >= (3, 0):
metadata = metadata.encode('utf-8')
metadata_length = len(metadata)
else:
metadata_length = 0
if sys.version_info >= (3, 0):
body = body.encode('utf-8')
body_length = len(body)
if not (metadata_length > 0 or body_length > 0):
return
start_line = 'chunked 1.0,%s,%s\n' % (metadata_length, body_length)
self.write(start_line)
self.write(metadata)
self.write(body)
self._ofile.flush()
self._flushed = True
| 34.017751
| 120
| 0.586085
|
from __future__ import absolute_import, division, print_function
from io import TextIOWrapper
from collections import deque, namedtuple
from splunklib import six
try:
from collections import OrderedDict
except ImportError:
from ..ordereddict import OrderedDict
from splunklib.six.moves import StringIO
from itertools import chain
from splunklib.six.moves import map as imap
from json import JSONDecoder, JSONEncoder
from json.encoder import encode_basestring_ascii as json_encode_string
from splunklib.six.moves import urllib
import csv
import gzip
import os
import re
import sys
import warnings
from . import environment
csv.field_size_limit(10485760)
def set_binary_mode(fh):
typefile = TextIOWrapper if sys.version_info >= (3, 0) else file
if not isinstance(fh, typefile):
return fh
if sys.version_info >= (3, 0) and hasattr(fh, 'buffer'):
return fh.buffer
elif sys.version_info >= (3, 0):
pass
elif sys.platform == 'win32':
from platform import python_implementation
implementation = python_implementation()
if implementation == 'PyPy':
return os.fdopen(fh.fileno(), 'wb', 0)
else:
import msvcrt
msvcrt.setmode(fh.fileno(), os.O_BINARY)
return fh
class CommandLineParser(object):
@classmethod
def parse(cls, command, argv):
debug = environment.splunklib_logger.debug
command_class = type(command).__name__
debug('Parsing %s command line: %r', command_class, argv)
command.fieldnames = None
command.options.reset()
argv = ' '.join(argv)
command_args = cls._arguments_re.match(argv)
if command_args is None:
raise SyntaxError('Syntax error: {}'.format(argv))
for option in cls._options_re.finditer(command_args.group('options')):
name, value = option.group('name'), option.group('value')
if name not in command.options:
raise ValueError(
'Unrecognized {} command option: {}={}'.format(command.name, name, json_encode_string(value)))
command.options[name].value = cls.unquote(value)
missing = command.options.get_missing()
if missing is not None:
if len(missing) > 1:
raise ValueError(
'Values for these {} command options are required: {}'.format(command.name, ', '.join(missing)))
raise ValueError('A value for {} command option {} is required'.format(command.name, missing[0]))
fieldnames = command_args.group('fieldnames')
if fieldnames is None:
command.fieldnames = []
else:
command.fieldnames = [cls.unquote(value.group(0)) for value in cls._fieldnames_re.finditer(fieldnames)]
debug(' %s: %s', command_class, command)
@classmethod
def unquote(cls, string):
if len(string) == 0:
return ''
if string[0] == '"':
if len(string) == 1 or string[-1] != '"':
raise SyntaxError('Poorly formed string literal: ' + string)
string = string[1:-1]
if len(string) == 0:
return ''
def replace(match):
value = match.group(0)
if value == '""':
return '"'
if len(value) < 2:
raise SyntaxError('Poorly formed string literal: ' + string)
return value[1]
result = re.sub(cls._escaped_character_re, replace, string)
return result
# region Class variables
_arguments_re = re.compile(r"""
^\s*
(?P<options> # Match a leading set of name/value pairs
(?:
(?:(?=\w)[^\d]\w*) # name
\s*=\s* # =
(?:"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+)\s* # value
)*
)\s*
(?P<fieldnames> # Match a trailing set of field names
(?:
(?:"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+)\s*
)*
)\s*$
""", re.VERBOSE | re.UNICODE)
_escaped_character_re = re.compile(r'(\\.|""|[\\"])')
_fieldnames_re = re.compile(r"""("(?:\\.|""|[^"])+"|(?:\\.|[^\s"])+)""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
(?P<name>(?:(?=\w)[^\d]\w*)) # name
\s*=\s* # =
(?P<value>"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+) # value
""", re.VERBOSE | re.UNICODE)
class ConfigurationSettingsType(type):
def __new__(mcs, module, name, bases):
mcs = super(ConfigurationSettingsType, mcs).__new__(mcs, str(name), bases, {})
return mcs
def __init__(cls, module, name, bases):
super(ConfigurationSettingsType, cls).__init__(name, bases, None)
cls.__module__ = module
@staticmethod
def validate_configuration_setting(specification, name, value):
if not isinstance(value, specification.type):
if isinstance(specification.type, type):
type_names = specification.type.__name__
else:
type_names = ', '.join(imap(lambda t: t.__name__, specification.type))
raise ValueError('Expected {} value, not {}={}'.format(type_names, name, repr(value)))
if specification.constraint and not specification.constraint(value):
raise ValueError('Illegal value: {}={}'.format(name, repr(value)))
return value
specification = namedtuple(
'ConfigurationSettingSpecification', (
'type',
'constraint',
'supporting_protocols'))
specification_matrix = {
'clear_required_fields': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'distributed': specification(
type=bool,
constraint=None,
supporting_protocols=[2]),
'generates_timeorder': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'generating': specification(
type=bool,
constraint=None,
supporting_protocols=[1, 2]),
'local': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'maxinputs': specification(
type=int,
constraint=lambda value: 0 <= value <= six.MAXSIZE,
supporting_protocols=[2]),
'overrides_timeorder': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'required_fields': specification(
type=(list, set, tuple),
constraint=None,
supporting_protocols=[1, 2]),
'requires_preop': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'retainsevents': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'run_in_preview': specification(
type=bool,
constraint=None,
supporting_protocols=[2]),
'streaming': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'streaming_preop': specification(
type=(bytes, six.text_type),
constraint=None,
supporting_protocols=[1, 2]),
'type': specification(
type=(bytes, six.text_type),
constraint=lambda value: value in ('events', 'reporting', 'streaming'),
supporting_protocols=[2])}
class CsvDialect(csv.Dialect):
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
if sys.version_info >= (3, 0) and sys.platform == 'win32':
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
class InputHeader(dict):
def __str__(self):
return '\n'.join([name + ':' + value for name, value in six.iteritems(self)])
def read(self, ifile):
name, value = None, None
for line in ifile:
if line == '\n':
break
item = line.split(':', 1)
if len(item) == 2:
# start of a new item
if name is not None:
self[name] = value[:-1] # value sans trailing newline
name, value = item[0], urllib.parse.unquote(item[1])
elif name is not None:
# continuation of the current item
value += urllib.parse.unquote(line)
if name is not None:
self[name] = value[:-1] if value[-1] == '\n' else value
Message = namedtuple('Message', ('type', 'text'))
class MetadataDecoder(JSONDecoder):
def __init__(self):
JSONDecoder.__init__(self, object_hook=self._object_hook)
@staticmethod
def _object_hook(dictionary):
object_view = ObjectView(dictionary)
stack = deque()
stack.append((None, None, dictionary))
while len(stack):
instance, member_name, dictionary = stack.popleft()
for name, value in six.iteritems(dictionary):
if isinstance(value, dict):
stack.append((dictionary, name, value))
if instance is not None:
instance[member_name] = ObjectView(dictionary)
return object_view
class MetadataEncoder(JSONEncoder):
def __init__(self):
JSONEncoder.__init__(self, separators=MetadataEncoder._separators)
def default(self, o):
return o.__dict__ if isinstance(o, ObjectView) else JSONEncoder.default(self, o)
_separators = (',', ':')
class ObjectView(object):
def __init__(self, dictionary):
self.__dict__ = dictionary
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return str(self.__dict__)
class Recorder(object):
def __init__(self, path, f):
self._recording = gzip.open(path + '.gz', 'wb')
self._file = f
def __getattr__(self, name):
return getattr(self._file, name)
def __iter__(self):
for line in self._file:
self._recording.write(line)
self._recording.flush()
yield line
def read(self, size=None):
value = self._file.read() if size is None else self._file.read(size)
self._recording.write(value)
self._recording.flush()
return value
def readline(self, size=None):
value = self._file.readline() if size is None else self._file.readline(size)
if len(value) > 0:
self._recording.write(value)
self._recording.flush()
return value
def record(self, *args):
for arg in args:
self._recording.write(arg)
def write(self, text):
self._recording.write(text)
self._file.write(text)
self._recording.flush()
class RecordWriter(object):
def __init__(self, ofile, maxresultrows=None):
self._maxresultrows = 50000 if maxresultrows is None else maxresultrows
self._ofile = set_binary_mode(ofile)
self._fieldnames = None
self._buffer = StringIO()
self._writer = csv.writer(self._buffer, dialect=CsvDialect)
self._writerow = self._writer.writerow
self._finished = False
self._flushed = False
self._inspector = OrderedDict()
self._chunk_count = 0
self._pending_record_count = 0
self._committed_record_count = 0
@property
def is_flushed(self):
return self._flushed
@is_flushed.setter
def is_flushed(self, value):
self._flushed = True if value else False
@property
def ofile(self):
return self._ofile
@ofile.setter
def ofile(self, value):
self._ofile = set_binary_mode(value)
@property
def pending_record_count(self):
return self._pending_record_count
@property
def _record_count(self):
warnings.warn(
"_record_count will be deprecated soon. Use pending_record_count instead.",
PendingDeprecationWarning
)
return self.pending_record_count
@property
def committed_record_count(self):
return self._committed_record_count
@property
def _total_record_count(self):
warnings.warn(
"_total_record_count will be deprecated soon. Use committed_record_count instead.",
PendingDeprecationWarning
)
return self.committed_record_count
def write(self, data):
bytes_type = bytes if sys.version_info >= (3, 0) else str
if not isinstance(data, bytes_type):
data = data.encode('utf-8')
self.ofile.write(data)
def flush(self, finished=None, partial=None):
assert finished is None or isinstance(finished, bool)
assert partial is None or isinstance(partial, bool)
assert not (finished is None and partial is None)
assert finished is None or partial is None
self._ensure_validity()
def write_message(self, message_type, message_text, *args, **kwargs):
self._ensure_validity()
self._inspector.setdefault('messages', []).append((message_type, message_text.format(*args, **kwargs)))
def write_record(self, record):
self._ensure_validity()
self._write_record(record)
def write_records(self, records):
self._ensure_validity()
write_record = self._write_record
for record in records:
write_record(record)
def _clear(self):
self._buffer.seek(0)
self._buffer.truncate()
self._inspector.clear()
self._pending_record_count = 0
def _ensure_validity(self):
if self._finished is True:
assert self._record_count == 0 and len(self._inspector) == 0
raise RuntimeError('I/O operation on closed record writer')
def _write_record(self, record):
fieldnames = self._fieldnames
if fieldnames is None:
self._fieldnames = fieldnames = list(record.keys())
value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
get_value = record.get
values = []
for fieldname in fieldnames:
value = get_value(fieldname, None)
if value is None:
values += (None, None)
continue
value_t = type(value)
if issubclass(value_t, (list, tuple)):
if len(value) == 0:
values += (None, None)
continue
if len(value) > 1:
value_list = value
sv = ''
mv = '$'
for value in value_list:
if value is None:
sv += '\n'
mv += '$;$'
continue
value_t = type(value)
if value_t is not bytes:
if value_t is bool:
value = str(value.real)
elif value_t is six.text_type:
value = value
elif isinstance(value, six.integer_types) or value_t is float or value_t is complex:
value = str(value)
elif issubclass(value_t, (dict, list, tuple)):
value = str(''.join(RecordWriter._iterencode_json(value, 0)))
else:
value = repr(value).encode('utf-8', errors='backslashreplace')
sv += value + '\n'
mv += value.replace('$', '$$') + '$;$'
values += (sv[:-1], mv[:-2])
continue
value = value[0]
value_t = type(value)
if value_t is bool:
values += (str(value.real), None)
continue
if value_t is bytes:
values += (value, None)
continue
if value_t is six.text_type:
if six.PY2:
value = value.encode('utf-8')
values += (value, None)
continue
if isinstance(value, six.integer_types) or value_t is float or value_t is complex:
values += (str(value), None)
continue
if issubclass(value_t, dict):
values += (str(''.join(RecordWriter._iterencode_json(value, 0))), None)
continue
values += (repr(value), None)
self._writerow(values)
self._pending_record_count += 1
if self.pending_record_count >= self._maxresultrows:
self.flush(partial=True)
try:
# noinspection PyUnresolvedReferences
from _json import make_encoder
except ImportError:
# We may be running under PyPy 2.5 which does not include the _json module
_iterencode_json = JSONEncoder(separators=(',', ':')).iterencode
else:
# Creating _iterencode_json this way yields a two-fold performance improvement on Python 2.7.9 and 2.7.10
from json.encoder import encode_basestring_ascii
@staticmethod
def _default(o):
raise TypeError(repr(o) + ' is not JSON serializable')
_iterencode_json = make_encoder(
{}, # markers (for detecting circular references)
_default, # object_encoder
encode_basestring_ascii, # string_encoder
None, # indent
':', ',', # separators
False, # sort_keys
False, # skip_keys
True # allow_nan
)
del make_encoder
class RecordWriterV1(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
if self.pending_record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
messages = self._inspector.get('messages')
if self._chunk_count == 0:
# Messages are written to the messages header when we write the first chunk of data
# Guarantee: These messages are displayed by splunkweb and the job inspector
if messages is not None:
message_level = RecordWriterV1._message_level.get
for level, text in messages:
self.write(message_level(level, level))
self.write('=')
self.write(text)
self.write('\r\n')
self.write('\r\n')
elif messages is not None:
# Messages are written to the messages header when we write subsequent chunks of data
# Guarantee: These messages are displayed by splunkweb and the job inspector, if and only if the
# command is configured with
#
# stderr_dest = message
#
# stderr_dest is a static configuration setting. This means that it can only be set in commands.conf.
# It cannot be set in code.
stderr = sys.stderr
for level, text in messages:
print(level, text, file=stderr)
self.write(self._buffer.getvalue())
self._chunk_count += 1
self._committed_record_count += self.pending_record_count
self._clear()
self._finished = finished is True
_message_level = {
'DEBUG': 'debug_message',
'ERROR': 'error_message',
'FATAL': 'error_message',
'INFO': 'info_message',
'WARN': 'warn_message'
}
class RecordWriterV2(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
if partial or not finished:
# Don't flush partial chunks, since the SCP v2 protocol does not
# provide a way to send partial chunks yet.
return
if not self.is_flushed:
self.write_chunk(finished=True)
def write_chunk(self, finished=None):
inspector = self._inspector
self._committed_record_count += self.pending_record_count
self._chunk_count += 1
# TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
# ChunkedExternProcessor (See SPL-103525)
#
# We will need to replace the following block of code with this block:
#
# metadata = [item for item in (('inspector', inspector), ('finished', finished), ('partial', partial))]
#
# if partial is True:
# finished = False
if len(inspector) == 0:
inspector = None
metadata = [item for item in (('inspector', inspector), ('finished', finished))]
self._write_chunk(metadata, self._buffer.getvalue())
self._clear()
def write_metadata(self, configuration):
self._ensure_validity()
metadata = chain(six.iteritems(configuration), (('inspector', self._inspector if self._inspector else None),))
self._write_chunk(metadata, '')
self.write('\n')
self._clear()
def write_metric(self, name, value):
self._ensure_validity()
self._inspector['metric.' + name] = value
def _clear(self):
super(RecordWriterV2, self)._clear()
self._fieldnames = None
def _write_chunk(self, metadata, body):
if metadata:
metadata = str(''.join(self._iterencode_json(dict([(n, v) for n, v in metadata if v is not None]), 0)))
if sys.version_info >= (3, 0):
metadata = metadata.encode('utf-8')
metadata_length = len(metadata)
else:
metadata_length = 0
if sys.version_info >= (3, 0):
body = body.encode('utf-8')
body_length = len(body)
if not (metadata_length > 0 or body_length > 0):
return
start_line = 'chunked 1.0,%s,%s\n' % (metadata_length, body_length)
self.write(start_line)
self.write(metadata)
self.write(body)
self._ofile.flush()
self._flushed = True
| true
| true
|
f7146fa04286aa454a33161690efd3bc2e7b2b70
| 5,851
|
py
|
Python
|
src/compas/robots/model/tool.py
|
ricardoavelino/compas
|
e3c7f004b8839f96bf01f9f6b21a75786c3f59fa
|
[
"MIT"
] | null | null | null |
src/compas/robots/model/tool.py
|
ricardoavelino/compas
|
e3c7f004b8839f96bf01f9f6b21a75786c3f59fa
|
[
"MIT"
] | null | null | null |
src/compas/robots/model/tool.py
|
ricardoavelino/compas
|
e3c7f004b8839f96bf01f9f6b21a75786c3f59fa
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.geometry import Frame
from compas.geometry import Transformation
from compas.robots.model.robot import RobotModel
class ToolModel(RobotModel):
"""Represents a tool to be attached to the robot's flange.
Attributes
----------
visual : :class:`~compas.datastructures.Mesh`
The visual mesh of the tool.
frame : :class:`~compas.geometry.Frame`
The frame of the tool in tool0 frame.
collision : :class:`~compas.datastructures.Mesh`
The collision mesh representation of the tool.
name : str
The name of the `ToolModel`. Defaults to 'attached_tool'.
link_name : str
The name of the `Link` to which the tool is attached. Defaults to ``None``.
Examples
--------
>>> import compas
>>> from compas.datastructures import Mesh
>>> from compas.geometry import Frame
>>> mesh = Mesh.from_stl(compas.get('cone.stl'))
>>> frame = Frame([0.14, 0, 0], [0, 1, 0], [0, 0, 1])
>>> tool = ToolModel(mesh, frame)
"""
def __init__(self, visual, frame_in_tool0_frame, collision=None, name="attached_tool", link_name=None):
collision = collision or visual
super(ToolModel, self).__init__(name)
self.add_link("attached_tool_link", visual_mesh=visual, collision_mesh=collision)
self._rebuild_tree()
self._create(self.root, Transformation())
self.frame = frame_in_tool0_frame
self.link_name = link_name
@classmethod
def from_robot_model(cls, robot, frame_in_tool0_frame, link_name=None):
"""Creates a ``ToolModel`` from a :class:`~compas.robots.RobotModel` instance.
Parameters
----------
robot : :class:`~compas.robots.RobotModel`
frame_in_tool0_frame : str
The frame of the tool in tool0 frame.
link_name : str
The name of the `Link` to which the tool is attached.
Defaults to ``None``.
"""
data = robot.data
data['frame'] = frame_in_tool0_frame.data
data['link_name'] = link_name
return cls.from_data(data)
@property
def data(self):
"""Returns the data dictionary that represents the tool.
Returns
-------
dict
The tool data.
"""
return self._get_data()
def _get_data(self):
data = super(ToolModel, self)._get_data()
data['frame'] = self.frame.data
data['link_name'] = self.link_name
return data
@data.setter
def data(self, data):
self._set_data(data)
def _set_data(self, data):
super(ToolModel, self)._set_data(data)
self.frame = Frame.from_data(data['frame'])
self.name = self.name or 'attached_tool'
self.link_name = data['link_name'] if 'link_name' in data else None
@classmethod
def from_data(cls, data):
"""Construct a `ToolModel` from its data representation.
To be used in conjunction with the :meth:`to_data` method.
Parameters
----------
data : dict
The data dictionary.
Returns
-------
:class:`ToolModel`
The constructed `ToolModel`.
"""
tool = cls(None, None)
tool.data = data
return tool
def from_tcf_to_t0cf(self, frames_tcf):
"""Converts a list of frames at the robot's tool tip (tcf frame) to frames at the robot's flange (tool0 frame).
Parameters
----------
frames_tcf : list[:class:`~compas.geometry.Frame`]
Frames (in WCF) at the robot's tool tip (tcf).
Returns
-------
list[:class:`~compas.geometry.Frame`]
Frames (in WCF) at the robot's flange (tool0).
Examples
--------
>>> import compas
>>> from compas.datastructures import Mesh
>>> from compas.geometry import Frame
>>> mesh = Mesh.from_stl(compas.get('cone.stl'))
>>> frame = Frame([0.14, 0, 0], [0, 1, 0], [0, 0, 1])
>>> tool = ToolModel(mesh, frame)
>>> frames_tcf = [Frame((-0.309, -0.046, -0.266), (0.276, 0.926, -0.256), (0.879, -0.136, 0.456))]
>>> tool.from_tcf_to_t0cf(frames_tcf)
[Frame(Point(-0.363, 0.003, -0.147), Vector(0.388, -0.351, -0.852), Vector(0.276, 0.926, -0.256))]
"""
Te = Transformation.from_frame_to_frame(self.frame, Frame.worldXY())
return [Frame.from_transformation(Transformation.from_frame(f) * Te) for f in frames_tcf]
def from_t0cf_to_tcf(self, frames_t0cf):
"""Converts frames at the robot's flange (tool0 frame) to frames at the robot's tool tip (tcf frame).
Parameters
----------
frames_t0cf : list[:class:`~compas.geometry.Frame`]
Frames (in WCF) at the robot's flange (tool0).
Returns
-------
list[:class:`~compas.geometry.Frame`]
Frames (in WCF) at the robot's tool tip (tcf).
Examples
--------
>>> import compas
>>> from compas.datastructures import Mesh
>>> from compas.geometry import Frame
>>> mesh = Mesh.from_stl(compas.get('cone.stl'))
>>> frame = Frame([0.14, 0, 0], [0, 1, 0], [0, 0, 1])
>>> tool = ToolModel(mesh, frame)
>>> frames_t0cf = [Frame((-0.363, 0.003, -0.147), (0.388, -0.351, -0.852), (0.276, 0.926, -0.256))]
>>> tool.from_t0cf_to_tcf(frames_t0cf)
[Frame(Point(-0.309, -0.046, -0.266), Vector(0.276, 0.926, -0.256), Vector(0.879, -0.136, 0.456))]
"""
Te = Transformation.from_frame_to_frame(Frame.worldXY(), self.frame)
return [Frame.from_transformation(Transformation.from_frame(f) * Te) for f in frames_t0cf]
| 33.820809
| 119
| 0.592719
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.geometry import Frame
from compas.geometry import Transformation
from compas.robots.model.robot import RobotModel
class ToolModel(RobotModel):
def __init__(self, visual, frame_in_tool0_frame, collision=None, name="attached_tool", link_name=None):
collision = collision or visual
super(ToolModel, self).__init__(name)
self.add_link("attached_tool_link", visual_mesh=visual, collision_mesh=collision)
self._rebuild_tree()
self._create(self.root, Transformation())
self.frame = frame_in_tool0_frame
self.link_name = link_name
@classmethod
def from_robot_model(cls, robot, frame_in_tool0_frame, link_name=None):
data = robot.data
data['frame'] = frame_in_tool0_frame.data
data['link_name'] = link_name
return cls.from_data(data)
@property
def data(self):
return self._get_data()
def _get_data(self):
data = super(ToolModel, self)._get_data()
data['frame'] = self.frame.data
data['link_name'] = self.link_name
return data
@data.setter
def data(self, data):
self._set_data(data)
def _set_data(self, data):
super(ToolModel, self)._set_data(data)
self.frame = Frame.from_data(data['frame'])
self.name = self.name or 'attached_tool'
self.link_name = data['link_name'] if 'link_name' in data else None
@classmethod
def from_data(cls, data):
tool = cls(None, None)
tool.data = data
return tool
def from_tcf_to_t0cf(self, frames_tcf):
Te = Transformation.from_frame_to_frame(self.frame, Frame.worldXY())
return [Frame.from_transformation(Transformation.from_frame(f) * Te) for f in frames_tcf]
def from_t0cf_to_tcf(self, frames_t0cf):
Te = Transformation.from_frame_to_frame(Frame.worldXY(), self.frame)
return [Frame.from_transformation(Transformation.from_frame(f) * Te) for f in frames_t0cf]
| true
| true
|
f714701d95b7e531e20aa26475084c4b139b2eb8
| 417
|
py
|
Python
|
mainapp/migrations/0004_auto_20181224_1636.py
|
ploggingdev/finitecoins
|
60f69cc563e1a26be8c659d4400579025219a223
|
[
"MIT"
] | null | null | null |
mainapp/migrations/0004_auto_20181224_1636.py
|
ploggingdev/finitecoins
|
60f69cc563e1a26be8c659d4400579025219a223
|
[
"MIT"
] | 5
|
2020-02-11T23:31:23.000Z
|
2021-06-10T21:03:24.000Z
|
mainapp/migrations/0004_auto_20181224_1636.py
|
ploggingdev/finitecoins
|
60f69cc563e1a26be8c659d4400579025219a223
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.4 on 2018-12-24 16:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0003_game_description_html'),
]
operations = [
migrations.AlterField(
model_name='game',
name='description',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| 21.947368
| 75
| 0.613909
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0003_game_description_html'),
]
operations = [
migrations.AlterField(
model_name='game',
name='description',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| true
| true
|
f7147066a672f718ec342f39cba6fdc815170b9d
| 2,054
|
py
|
Python
|
motion_detector/main.py
|
Mark-Kinyua/python_public
|
25c4eff3a6f93c35a949f94a2f9c3df3202a3113
|
[
"MIT"
] | null | null | null |
motion_detector/main.py
|
Mark-Kinyua/python_public
|
25c4eff3a6f93c35a949f94a2f9c3df3202a3113
|
[
"MIT"
] | null | null | null |
motion_detector/main.py
|
Mark-Kinyua/python_public
|
25c4eff3a6f93c35a949f94a2f9c3df3202a3113
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
# A motion detecetor, yup... lol.
# Remember to use an old python version < 3.6
image_path = 'room_people.jpg' # Photo
# The model was already formulated, just need to loaad it into the system.
prototxt_path = 'models/MobileNetSSD_deploy.prototxt' # Load Model
model_path = 'models/MobileNetSSD_deploy.caffemodel'
min_confidence = 0.2
# Things it can identify
classes = ["background","aeroplane","bicycle","bird","boat","bottle","bus","car","cat","chair","cow","diningtable","dog","horse",
"motorbike","person","pottedplant","sheep","sofa","train","tvmonitor"]
np.random.seed(543210) # Same Colors
colors = np.random.uniform(0, 255, size=(len(classes), 3))
net = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
# img = cv2.imread(image_path)
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
height, width = img.shape[0], img.shape[1]
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 0.007, (300,300), 130)
net.setInput(blob)
detected_objects = net.forward()
for i in range(detected_objects.shape[2]):
confidence = detected_objects[0][0][i][2]
if confidence > min_confidence:
class_index = int(detected_objects[0,0,i,1])
upper_left_x = int(detected_objects[0, 0, i, 3] * width)
upper_left_y = int(detected_objects[0, 0, i, 3] * height)
lower_right_x = int(detected_objects[0, 0, i, 5] * width)
lower_right_y = int(detected_objects[0, 0, i, 6] * height)
prediction_text = f"{classes[class_index]}: {confidence:.2f}%"
cv2.rectangle(img, (upper_left_x, upper_left_y), (lower_right_x, lower_right_y), colors[class_index], 3)
cv2.putText(img, prediction_text, (upper_left_x,
upper_left_y- 15 if upper_left_y > 30 else upper_left_y + 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, colors[class_index], 2)
cv2.imshow("Detected Objects", img)
cv2.waitKey(5)
cv2.destroyAllWindows()
cap.release()
| 32.09375
| 129
| 0.656767
|
import numpy as np
import cv2
image_path = 'room_people.jpg'
prototxt_path = 'models/MobileNetSSD_deploy.prototxt'
model_path = 'models/MobileNetSSD_deploy.caffemodel'
min_confidence = 0.2
classes = ["background","aeroplane","bicycle","bird","boat","bottle","bus","car","cat","chair","cow","diningtable","dog","horse",
"motorbike","person","pottedplant","sheep","sofa","train","tvmonitor"]
np.random.seed(543210)
colors = np.random.uniform(0, 255, size=(len(classes), 3))
net = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
height, width = img.shape[0], img.shape[1]
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 0.007, (300,300), 130)
net.setInput(blob)
detected_objects = net.forward()
for i in range(detected_objects.shape[2]):
confidence = detected_objects[0][0][i][2]
if confidence > min_confidence:
class_index = int(detected_objects[0,0,i,1])
upper_left_x = int(detected_objects[0, 0, i, 3] * width)
upper_left_y = int(detected_objects[0, 0, i, 3] * height)
lower_right_x = int(detected_objects[0, 0, i, 5] * width)
lower_right_y = int(detected_objects[0, 0, i, 6] * height)
prediction_text = f"{classes[class_index]}: {confidence:.2f}%"
cv2.rectangle(img, (upper_left_x, upper_left_y), (lower_right_x, lower_right_y), colors[class_index], 3)
cv2.putText(img, prediction_text, (upper_left_x,
upper_left_y- 15 if upper_left_y > 30 else upper_left_y + 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, colors[class_index], 2)
cv2.imshow("Detected Objects", img)
cv2.waitKey(5)
cv2.destroyAllWindows()
cap.release()
| true
| true
|
f714719a188b3cff193941c546d8d5b9cbeb3c7f
| 227
|
py
|
Python
|
chia/wallet/puzzles/rom_bootstrap_generator.py
|
ForestCrazy/chia-blockchain-remote-plot
|
0ba838b7a8ea2b5410d438ac70295df699a30dae
|
[
"Apache-2.0"
] | 11,902
|
2019-12-05T00:14:29.000Z
|
2022-03-31T23:25:37.000Z
|
chia/wallet/puzzles/rom_bootstrap_generator.py
|
jcteng/ext9-blockchain
|
46506bc5778e14cbc373de39438b0c6f794a49c5
|
[
"Apache-2.0"
] | 5,246
|
2019-12-05T04:00:03.000Z
|
2022-03-31T21:33:30.000Z
|
chia/wallet/puzzles/rom_bootstrap_generator.py
|
jcteng/ext9-blockchain
|
46506bc5778e14cbc373de39438b0c6f794a49c5
|
[
"Apache-2.0"
] | 2,149
|
2019-12-05T11:12:53.000Z
|
2022-03-31T06:08:34.000Z
|
from chia.types.blockchain_format.program import SerializedProgram
from .load_clvm import load_clvm
MOD = SerializedProgram.from_bytes(load_clvm("rom_bootstrap_generator.clvm").as_bin())
def get_generator():
return MOD
| 22.7
| 86
| 0.814978
|
from chia.types.blockchain_format.program import SerializedProgram
from .load_clvm import load_clvm
MOD = SerializedProgram.from_bytes(load_clvm("rom_bootstrap_generator.clvm").as_bin())
def get_generator():
return MOD
| true
| true
|
f71471f3e75074020a7c0fdf86353776fcede027
| 3,324
|
py
|
Python
|
src/Yowsup/ConnectionIO/connectionengine.py
|
philklc/yowsup
|
a1736ccbdadfccbf9066964f3a9cb51f3337c840
|
[
"MIT"
] | 1
|
2018-12-27T23:35:52.000Z
|
2018-12-27T23:35:52.000Z
|
src/Yowsup/ConnectionIO/connectionengine.py
|
philklc/yowsup
|
a1736ccbdadfccbf9066964f3a9cb51f3337c840
|
[
"MIT"
] | null | null | null |
src/Yowsup/ConnectionIO/connectionengine.py
|
philklc/yowsup
|
a1736ccbdadfccbf9066964f3a9cb51f3337c840
|
[
"MIT"
] | null | null | null |
'''
Copyright (c) <2012> Tarek Galal <tare2.galal@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import socket;
import sys
from bintreenode import BinTreeNodeReader, BinTreeNodeWriter
from Yowsup.Common.debugger import Debugger
from ioexceptions import ConnectionClosedException
class ConnectionEngine(socket.socket):
def __init__(self):
Debugger.attach(self)
self.reader = BinTreeNodeReader(self)
self.writer = BinTreeNodeWriter(self)
self.readSize = 1;
self.buf = [];
self.maxBufRead = 0;
self.connected = 0
self.jid = ""
super(ConnectionEngine,self).__init__(socket.AF_INET, socket.SOCK_STREAM);
def getId(self):
return self.id
def setId(self, idx):
self.id = idx
def flush(self):
'''FLUSH'''
self.write();
def getBuffer(self):
return self.buffer;
def reset(self):
self.buffer = "";
def write(self,data):
if type(data) is int:
try:
self.sendall(chr(data));
except:
raise ConnectionClosedException("socket 1 write crashed, reason: %s" % sys.exc_info()[1])
self._d("socket 1 write crashed, reason: %s" % sys.exc_info()[1])
else:
tmp = "";
for d in data:
tmp += chr(d)
try:
self.sendall(tmp);
except:
raise ConnectionClosedException("socket 2 write crashed, reason: %s" % sys.exc_info()[1])
self._d("socket 2 write crashed, reason: %s" % sys.exc_info()[1])
def setReadSize(self,size):
self.readSize = size;
def read(self, socketOnly = 0):
x = ""
try:
x = self.recv(self.readSize);
except:
raise ConnectionClosedException("socket read crashed, reason %s " % sys.exc_info()[1])
self._d("socket read crashed, reason %s " % sys.exc_info()[1])
#x= self.recvX(self.readSize);
if len(x) == 1:
#Utilities.debug("GOT "+str(ord((x))));
return ord(x);
else:
raise ConnectionClosedException("Got 0 bytes, connection closed");
#return x;
def read2(self,b,off,length):
'''reads into a buffer'''
if off < 0 or length < 0 or (off+length)>len(b):
raise Exception("Out of bounds");
if length == 0:
return 0;
if b is None:
raise Exception("XNull pointerX");
count = 0;
while count < length:
#self.read();
#print "OKIIIIIIIIIIII";
#exit();
b[off+count]=self.read(0);
count= count+1;
return count;
| 25.569231
| 93
| 0.691637
|
import socket;
import sys
from bintreenode import BinTreeNodeReader, BinTreeNodeWriter
from Yowsup.Common.debugger import Debugger
from ioexceptions import ConnectionClosedException
class ConnectionEngine(socket.socket):
def __init__(self):
Debugger.attach(self)
self.reader = BinTreeNodeReader(self)
self.writer = BinTreeNodeWriter(self)
self.readSize = 1;
self.buf = [];
self.maxBufRead = 0;
self.connected = 0
self.jid = ""
super(ConnectionEngine,self).__init__(socket.AF_INET, socket.SOCK_STREAM);
def getId(self):
return self.id
def setId(self, idx):
self.id = idx
def flush(self):
self.write();
def getBuffer(self):
return self.buffer;
def reset(self):
self.buffer = "";
def write(self,data):
if type(data) is int:
try:
self.sendall(chr(data));
except:
raise ConnectionClosedException("socket 1 write crashed, reason: %s" % sys.exc_info()[1])
self._d("socket 1 write crashed, reason: %s" % sys.exc_info()[1])
else:
tmp = "";
for d in data:
tmp += chr(d)
try:
self.sendall(tmp);
except:
raise ConnectionClosedException("socket 2 write crashed, reason: %s" % sys.exc_info()[1])
self._d("socket 2 write crashed, reason: %s" % sys.exc_info()[1])
def setReadSize(self,size):
self.readSize = size;
def read(self, socketOnly = 0):
x = ""
try:
x = self.recv(self.readSize);
except:
raise ConnectionClosedException("socket read crashed, reason %s " % sys.exc_info()[1])
self._d("socket read crashed, reason %s " % sys.exc_info()[1])
if len(x) == 1:
return ord(x);
else:
raise ConnectionClosedException("Got 0 bytes, connection closed");
def read2(self,b,off,length):
if off < 0 or length < 0 or (off+length)>len(b):
raise Exception("Out of bounds");
if length == 0:
return 0;
if b is None:
raise Exception("XNull pointerX");
count = 0;
while count < length:
b[off+count]=self.read(0);
count= count+1;
return count;
| true
| true
|
f7147205a286f1365d68f46e5f1a2e5554c58b41
| 1,577
|
py
|
Python
|
share/rpcauth/rpcauth.py
|
bitcointallkcoin/bitcointalkcoin
|
1c4edf2a6397531581663a9d3110a53fee87ca0b
|
[
"MIT"
] | null | null | null |
share/rpcauth/rpcauth.py
|
bitcointallkcoin/bitcointalkcoin
|
1c4edf2a6397531581663a9d3110a53fee87ca0b
|
[
"MIT"
] | null | null | null |
share/rpcauth/rpcauth.py
|
bitcointallkcoin/bitcointalkcoin
|
1c4edf2a6397531581663a9d3110a53fee87ca0b
|
[
"MIT"
] | 1
|
2020-02-27T15:51:47.000Z
|
2020-02-27T15:51:47.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Talkcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
from binascii import hexlify
from getpass import getpass
from os import urandom
import hmac
def generate_salt(size):
"""Create size byte hex salt"""
return hexlify(urandom(size)).decode()
def generate_password():
"""Create 32 byte b64 password"""
return urlsafe_b64encode(urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
parser.add_argument('username', help='the username for authentication')
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
args = parser.parse_args()
if not args.password:
args.password = generate_password()
elif args.password == '-':
args.password = getpass()
# Create 16 byte hex salt
salt = generate_salt(16)
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to talkcoin.conf:')
print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
print('Your password:\n{0}'.format(args.password))
if __name__ == '__main__':
main()
| 33.553191
| 134
| 0.714648
|
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
from binascii import hexlify
from getpass import getpass
from os import urandom
import hmac
def generate_salt(size):
return hexlify(urandom(size)).decode()
def generate_password():
return urlsafe_b64encode(urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
parser.add_argument('username', help='the username for authentication')
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
args = parser.parse_args()
if not args.password:
args.password = generate_password()
elif args.password == '-':
args.password = getpass()
salt = generate_salt(16)
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to talkcoin.conf:')
print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
print('Your password:\n{0}'.format(args.password))
if __name__ == '__main__':
main()
| true
| true
|
f71472389ed45e2198b8808678490dfe4d7a408f
| 787
|
py
|
Python
|
script/sklearn_like_toolkit/warpper/skClf_wrapper/skMultinomial_NBClf.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
script/sklearn_like_toolkit/warpper/skClf_wrapper/skMultinomial_NBClf.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
script/sklearn_like_toolkit/warpper/skClf_wrapper/skMultinomial_NBClf.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
from hyperopt import hp
from sklearn.naive_bayes import MultinomialNB as _skMultinomialNB
from script.sklearn_like_toolkit.warpper.base.BaseWrapperClf import BaseWrapperClf
from script.sklearn_like_toolkit.warpper.base.MixIn import MetaBaseWrapperClfWithABC
class skMultinomial_NBClf(BaseWrapperClf, _skMultinomialNB, metaclass=MetaBaseWrapperClfWithABC):
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
_skMultinomialNB.__init__(self, alpha, fit_prior, class_prior)
BaseWrapperClf.__init__(self)
HyperOpt_space = {
'alpha': hp.loguniform('alpha', -8, 1),
}
tuning_grid = {
'alpha': [0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0],
# 'class_prior': None,
# 'fit_prior': True
}
| 35.772727
| 98
| 0.70521
|
from hyperopt import hp
from sklearn.naive_bayes import MultinomialNB as _skMultinomialNB
from script.sklearn_like_toolkit.warpper.base.BaseWrapperClf import BaseWrapperClf
from script.sklearn_like_toolkit.warpper.base.MixIn import MetaBaseWrapperClfWithABC
class skMultinomial_NBClf(BaseWrapperClf, _skMultinomialNB, metaclass=MetaBaseWrapperClfWithABC):
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
_skMultinomialNB.__init__(self, alpha, fit_prior, class_prior)
BaseWrapperClf.__init__(self)
HyperOpt_space = {
'alpha': hp.loguniform('alpha', -8, 1),
}
tuning_grid = {
'alpha': [0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0],
}
| true
| true
|
f71472b726fa211be0ca1ee4ef01ed7553c09623
| 8,432
|
py
|
Python
|
pandas/tests/indexes/multi/test_formats.py
|
umangino/pandas
|
c492672699110fe711b7f76ded5828ff24bce5ab
|
[
"BSD-3-Clause"
] | 2
|
2022-02-27T04:02:18.000Z
|
2022-03-01T03:48:47.000Z
|
pandas/tests/indexes/multi/test_formats.py
|
umangino/pandas
|
c492672699110fe711b7f76ded5828ff24bce5ab
|
[
"BSD-3-Clause"
] | 1
|
2021-12-01T03:10:17.000Z
|
2021-12-23T20:27:21.000Z
|
pandas/tests/indexes/multi/test_formats.py
|
umangino/pandas
|
c492672699110fe711b7f76ded5828ff24bce5ab
|
[
"BSD-3-Clause"
] | 2
|
2022-02-27T04:02:19.000Z
|
2022-03-01T03:49:21.000Z
|
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
MultiIndex,
)
def test_format(idx):
idx.format()
idx[:0].format()
def test_format_integer_names():
index = MultiIndex(
levels=[[0, 1], [0, 1]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]
)
index.format(names=True)
def test_format_sparse_config(idx):
warn_filters = warnings.filters
warnings.filterwarnings("ignore", category=FutureWarning, module=".*format")
# GH1538
with pd.option_context("display.multi_sparse", False):
result = idx.format()
assert result[1] == "foo two"
warnings.filters = warn_filters
def test_format_sparse_display():
index = MultiIndex(
levels=[[0, 1], [0, 1], [0, 1], [0]],
codes=[
[0, 0, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
],
)
result = index.format()
assert result[3] == "1 0 0 0"
def test_repr_with_unicode_data():
with pd.option_context("display.encoding", "UTF-8"):
d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip_raises():
mi = MultiIndex.from_product([list("ab"), range(3)], names=["first", "second"])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
eval(repr(mi))
def test_unicode_string_with_unicode():
d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
str(idx)
def test_repr_max_seq_item_setting(idx):
# GH10182
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
class TestRepr:
def test_unicode_repr_issues(self):
levels = [Index(["a/\u03c3", "b/\u03c3", "c/\u03c3"]), Index([0, 1])]
codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, codes=codes)
repr(index.levels)
repr(index.get_level_values(1))
def test_repr_max_seq_items_equal_to_n(self, idx):
# display.max_seq_items == n
with pd.option_context("display.max_seq_items", 6):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
def test_repr(self, idx):
result = idx[:1].__repr__()
expected = """\
MultiIndex([('foo', 'one')],
names=['first', 'second'])"""
assert result == expected
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
with pd.option_context("display.max_seq_items", 5):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
...
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'], length=6)"""
assert result == expected
# display.max_seq_items == 1
with pd.option_context("display.max_seq_items", 1):
result = idx.__repr__()
expected = """\
MultiIndex([...
('qux', 'two')],
names=['first', ...], length=6)"""
assert result == expected
def test_rjust(self, narrow_multi_index):
mi = narrow_multi_index
result = mi[:1].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi[::500].__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:08:20'),
('abc', 10, '2000-01-01 00:16:40'),
('abc', 10, '2000-01-01 00:25:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:00:01'),
( 'a', 9, '2000-01-01 00:00:02'),
( 'a', 9, '2000-01-01 00:00:03'),
( 'a', 9, '2000-01-01 00:00:04'),
( 'a', 9, '2000-01-01 00:00:05'),
( 'a', 9, '2000-01-01 00:00:06'),
( 'a', 9, '2000-01-01 00:00:07'),
( 'a', 9, '2000-01-01 00:00:08'),
( 'a', 9, '2000-01-01 00:00:09'),
...
('abc', 10, '2000-01-01 00:33:10'),
('abc', 10, '2000-01-01 00:33:11'),
('abc', 10, '2000-01-01 00:33:12'),
('abc', 10, '2000-01-01 00:33:13'),
('abc', 10, '2000-01-01 00:33:14'),
('abc', 10, '2000-01-01 00:33:15'),
('abc', 10, '2000-01-01 00:33:16'),
('abc', 10, '2000-01-01 00:33:17'),
('abc', 10, '2000-01-01 00:33:18'),
('abc', 10, '2000-01-01 00:33:19')],
names=['a', 'b', 'dti'], length=2000)"""
assert result == expected
def test_tuple_width(self, wide_multi_index):
mi = wide_multi_index
result = mi[:1].__repr__()
expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi[:10].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...),
...
('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...),
('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...),
('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...),
('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...),
('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...),
('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...),
('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...),
('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),
('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),
('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)"""
assert result == expected
| 36.66087
| 95
| 0.468098
|
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
MultiIndex,
)
def test_format(idx):
idx.format()
idx[:0].format()
def test_format_integer_names():
index = MultiIndex(
levels=[[0, 1], [0, 1]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]
)
index.format(names=True)
def test_format_sparse_config(idx):
warn_filters = warnings.filters
warnings.filterwarnings("ignore", category=FutureWarning, module=".*format")
with pd.option_context("display.multi_sparse", False):
result = idx.format()
assert result[1] == "foo two"
warnings.filters = warn_filters
def test_format_sparse_display():
index = MultiIndex(
levels=[[0, 1], [0, 1], [0, 1], [0]],
codes=[
[0, 0, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
],
)
result = index.format()
assert result[3] == "1 0 0 0"
def test_repr_with_unicode_data():
with pd.option_context("display.encoding", "UTF-8"):
d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\" not in repr(index)
def test_repr_roundtrip_raises():
mi = MultiIndex.from_product([list("ab"), range(3)], names=["first", "second"])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
eval(repr(mi))
def test_unicode_string_with_unicode():
d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
str(idx)
def test_repr_max_seq_item_setting(idx):
# GH10182
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
class TestRepr:
def test_unicode_repr_issues(self):
levels = [Index(["a/\u03c3", "b/\u03c3", "c/\u03c3"]), Index([0, 1])]
codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, codes=codes)
repr(index.levels)
repr(index.get_level_values(1))
def test_repr_max_seq_items_equal_to_n(self, idx):
# display.max_seq_items == n
with pd.option_context("display.max_seq_items", 6):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
def test_repr(self, idx):
result = idx[:1].__repr__()
expected = """\
MultiIndex([('foo', 'one')],
names=['first', 'second'])"""
assert result == expected
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
with pd.option_context("display.max_seq_items", 5):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
...
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'], length=6)"""
assert result == expected
# display.max_seq_items == 1
with pd.option_context("display.max_seq_items", 1):
result = idx.__repr__()
expected = """\
MultiIndex([...
('qux', 'two')],
names=['first', ...], length=6)"""
assert result == expected
def test_rjust(self, narrow_multi_index):
mi = narrow_multi_index
result = mi[:1].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi[::500].__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:08:20'),
('abc', 10, '2000-01-01 00:16:40'),
('abc', 10, '2000-01-01 00:25:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:00:01'),
( 'a', 9, '2000-01-01 00:00:02'),
( 'a', 9, '2000-01-01 00:00:03'),
( 'a', 9, '2000-01-01 00:00:04'),
( 'a', 9, '2000-01-01 00:00:05'),
( 'a', 9, '2000-01-01 00:00:06'),
( 'a', 9, '2000-01-01 00:00:07'),
( 'a', 9, '2000-01-01 00:00:08'),
( 'a', 9, '2000-01-01 00:00:09'),
...
('abc', 10, '2000-01-01 00:33:10'),
('abc', 10, '2000-01-01 00:33:11'),
('abc', 10, '2000-01-01 00:33:12'),
('abc', 10, '2000-01-01 00:33:13'),
('abc', 10, '2000-01-01 00:33:14'),
('abc', 10, '2000-01-01 00:33:15'),
('abc', 10, '2000-01-01 00:33:16'),
('abc', 10, '2000-01-01 00:33:17'),
('abc', 10, '2000-01-01 00:33:18'),
('abc', 10, '2000-01-01 00:33:19')],
names=['a', 'b', 'dti'], length=2000)"""
assert result == expected
def test_tuple_width(self, wide_multi_index):
mi = wide_multi_index
result = mi[:1].__repr__()
expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi[:10].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...),
...
('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...),
('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...),
('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...),
('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...),
('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...),
('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...),
('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...),
('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),
('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),
('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)"""
assert result == expected
| true
| true
|
f71474cfffbbc364e0e62d899c3954f135303af3
| 8,393
|
py
|
Python
|
included_dependencies/cloudscraper/interpreters/native.py
|
AlexRiina/FanFicFare
|
2cd6f53f766e74052c6ca7ab5c2eabff24f59742
|
[
"Apache-2.0"
] | 82
|
2020-03-28T02:24:38.000Z
|
2022-03-30T04:18:42.000Z
|
included_dependencies/cloudscraper/interpreters/native.py
|
AlexRiina/FanFicFare
|
2cd6f53f766e74052c6ca7ab5c2eabff24f59742
|
[
"Apache-2.0"
] | 118
|
2020-03-14T17:34:11.000Z
|
2022-03-30T07:07:45.000Z
|
included_dependencies/cloudscraper/interpreters/native.py
|
AlexRiina/FanFicFare
|
2cd6f53f766e74052c6ca7ab5c2eabff24f59742
|
[
"Apache-2.0"
] | 30
|
2020-06-20T15:31:53.000Z
|
2022-03-06T06:23:55.000Z
|
from __future__ import absolute_import
import ast
import re
import operator as op
import pyparsing
from ..exceptions import CloudflareSolveError
from . import JavaScriptInterpreter
# ------------------------------------------------------------------------------- #
_OP_MAP = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Invert: op.neg,
}
# ------------------------------------------------------------------------------- #
class Calc(ast.NodeVisitor):
def visit_BinOp(self, node):
return _OP_MAP[type(node.op)](self.visit(node.left), self.visit(node.right))
# ------------------------------------------------------------------------------- #
def visit_Num(self, node):
return node.n
# ------------------------------------------------------------------------------- #
def visit_Expr(self, node):
return self.visit(node.value)
# ------------------------------------------------------------------------------- #
@classmethod
def doMath(cls, expression):
tree = ast.parse(expression)
calc = cls()
return calc.visit(tree.body[0])
# ------------------------------------------------------------------------------- #
class Parentheses(object):
def fix(self, s):
res = []
self.visited = set([s])
self.dfs(s, self.invalid(s), res)
return res
# ------------------------------------------------------------------------------- #
def dfs(self, s, n, res):
if n == 0:
res.append(s)
return
for i in range(len(s)):
if s[i] in ['(', ')']:
s_new = s[:i] + s[i + 1:]
if s_new not in self.visited and self.invalid(s_new) < n:
self.visited.add(s_new)
self.dfs(s_new, self.invalid(s_new), res)
# ------------------------------------------------------------------------------- #
def invalid(self, s):
plus = minus = 0
memo = {"(": 1, ")": -1}
for c in s:
plus += memo.get(c, 0)
minus += 1 if plus < 0 else 0
plus = max(0, plus)
return plus + minus
# ------------------------------------------------------------------------------- #
class ChallengeInterpreter(JavaScriptInterpreter):
def __init__(self):
super(ChallengeInterpreter, self).__init__('native')
# ------------------------------------------------------------------------------- #
def eval(self, body, domain):
operators = {
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.truediv
}
# ------------------------------------------------------------------------------- #
def flatten(lists):
return sum(map(flatten, lists), []) if isinstance(lists, list) else [lists]
# ------------------------------------------------------------------------------- #
def jsfuckToNumber(jsFuck):
# "Clean Up" JSFuck
jsFuck = jsFuck.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0')
jsFuck = jsFuck.lstrip('+').replace('(+', '(').replace(' ', '')
jsFuck = Parentheses().fix(jsFuck)[0]
# Hackery Parser for Math
stack = []
bstack = []
for i in flatten(pyparsing.nestedExpr().parseString(jsFuck).asList()):
if i == '+':
stack.append(bstack)
bstack = []
continue
bstack.append(i)
stack.append(bstack)
return int(''.join([str(Calc.doMath(''.join(i))) for i in stack]))
# ------------------------------------------------------------------------------- #
def divisorMath(payload, needle, domain):
jsfuckMath = payload.split('/')
if needle in jsfuckMath[1]:
expression = re.findall(r"^(.*?)(.)\(function", jsfuckMath[1])[0]
expression_value = operators[expression[1]](
float(jsfuckToNumber(expression[0])),
float(ord(domain[jsfuckToNumber(jsfuckMath[1][
jsfuckMath[1].find('"("+p+")")}') + len('"("+p+")")}'):-2
])]))
)
else:
expression_value = jsfuckToNumber(jsfuckMath[1])
expression_value = jsfuckToNumber(jsfuckMath[0]) / float(expression_value)
return expression_value
# ------------------------------------------------------------------------------- #
def challengeSolve(body, domain):
jschl_answer = 0
try:
jsfuckChallenge = re.search(
r"setTimeout\(function\(\){\s+var.*?f,\s*(?P<variable>\w+).*?:(?P<init>\S+)};"
r".*?\('challenge-form'\);.*?;(?P<challenge>.*?a\.value)\s*=\s*\S+\.toFixed\(10\);",
body,
re.DOTALL | re.MULTILINE
).groupdict()
except AttributeError:
raise CloudflareSolveError('There was an issue extracting "jsfuckChallenge" from the Cloudflare challenge.')
kJSFUCK = re.search(r'(;|)\s*k.=(?P<kJSFUCK>\S+);', jsfuckChallenge['challenge'], re.S | re.M)
if kJSFUCK:
try:
kJSFUCK = jsfuckToNumber(kJSFUCK.group('kJSFUCK'))
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kJSFUCK" from the Cloudflare challenge.')
try:
kID = re.search(r"\s*k\s*=\s*'(?P<kID>\S+)';", body).group('kID')
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kID" from the Cloudflare challenge.')
try:
r = re.compile(r'<div id="{}(?P<id>\d+)">\s*(?P<jsfuck>[^<>]*)</div>'.format(kID))
kValues = {}
for m in r.finditer(body):
kValues[int(m.group('id'))] = m.group('jsfuck')
jsfuckChallenge['k'] = kValues[kJSFUCK]
except (AttributeError, IndexError):
raise CloudflareSolveError('There was an issue extracting "kValues" from the Cloudflare challenge.')
jsfuckChallenge['challenge'] = re.finditer(
r'{}.*?([+\-*/])=(.*?);(?=a\.value|{})'.format(
jsfuckChallenge['variable'],
jsfuckChallenge['variable']
),
jsfuckChallenge['challenge']
)
# ------------------------------------------------------------------------------- #
if '/' in jsfuckChallenge['init']:
val = jsfuckChallenge['init'].split('/')
jschl_answer = jsfuckToNumber(val[0]) / float(jsfuckToNumber(val[1]))
else:
jschl_answer = jsfuckToNumber(jsfuckChallenge['init'])
# ------------------------------------------------------------------------------- #
for expressionMatch in jsfuckChallenge['challenge']:
oper, expression = expressionMatch.groups()
if '/' in expression:
expression_value = divisorMath(expression, 'function(p)', domain)
else:
if 'Element' in expression:
expression_value = divisorMath(jsfuckChallenge['k'], '"("+p+")")}', domain)
else:
expression_value = jsfuckToNumber(expression)
jschl_answer = operators[oper](jschl_answer, expression_value)
# ------------------------------------------------------------------------------- #
# if not jsfuckChallenge['k'] and '+ t.length' in body:
# jschl_answer += len(domain)
# ------------------------------------------------------------------------------- #
return '{0:.10f}'.format(jschl_answer)
# ------------------------------------------------------------------------------- #
return challengeSolve(body, domain)
# ------------------------------------------------------------------------------- #
ChallengeInterpreter()
| 35.867521
| 124
| 0.398546
|
from __future__ import absolute_import
import ast
import re
import operator as op
import pyparsing
from ..exceptions import CloudflareSolveError
from . import JavaScriptInterpreter
_OP_MAP = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Invert: op.neg,
}
class Calc(ast.NodeVisitor):
def visit_BinOp(self, node):
return _OP_MAP[type(node.op)](self.visit(node.left), self.visit(node.right))
def visit_Num(self, node):
return node.n
def visit_Expr(self, node):
return self.visit(node.value)
@classmethod
def doMath(cls, expression):
tree = ast.parse(expression)
calc = cls()
return calc.visit(tree.body[0])
class Parentheses(object):
def fix(self, s):
res = []
self.visited = set([s])
self.dfs(s, self.invalid(s), res)
return res
def dfs(self, s, n, res):
if n == 0:
res.append(s)
return
for i in range(len(s)):
if s[i] in ['(', ')']:
s_new = s[:i] + s[i + 1:]
if s_new not in self.visited and self.invalid(s_new) < n:
self.visited.add(s_new)
self.dfs(s_new, self.invalid(s_new), res)
def invalid(self, s):
plus = minus = 0
memo = {"(": 1, ")": -1}
for c in s:
plus += memo.get(c, 0)
minus += 1 if plus < 0 else 0
plus = max(0, plus)
return plus + minus
class ChallengeInterpreter(JavaScriptInterpreter):
def __init__(self):
super(ChallengeInterpreter, self).__init__('native')
def eval(self, body, domain):
operators = {
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.truediv
}
def flatten(lists):
return sum(map(flatten, lists), []) if isinstance(lists, list) else [lists]
def jsfuckToNumber(jsFuck):
jsFuck = jsFuck.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0')
jsFuck = jsFuck.lstrip('+').replace('(+', '(').replace(' ', '')
jsFuck = Parentheses().fix(jsFuck)[0]
stack = []
bstack = []
for i in flatten(pyparsing.nestedExpr().parseString(jsFuck).asList()):
if i == '+':
stack.append(bstack)
bstack = []
continue
bstack.append(i)
stack.append(bstack)
return int(''.join([str(Calc.doMath(''.join(i))) for i in stack]))
def divisorMath(payload, needle, domain):
jsfuckMath = payload.split('/')
if needle in jsfuckMath[1]:
expression = re.findall(r"^(.*?)(.)\(function", jsfuckMath[1])[0]
expression_value = operators[expression[1]](
float(jsfuckToNumber(expression[0])),
float(ord(domain[jsfuckToNumber(jsfuckMath[1][
jsfuckMath[1].find('"("+p+")")}') + len('"("+p+")")}'):-2
])]))
)
else:
expression_value = jsfuckToNumber(jsfuckMath[1])
expression_value = jsfuckToNumber(jsfuckMath[0]) / float(expression_value)
return expression_value
def challengeSolve(body, domain):
jschl_answer = 0
try:
jsfuckChallenge = re.search(
r"setTimeout\(function\(\){\s+var.*?f,\s*(?P<variable>\w+).*?:(?P<init>\S+)};"
r".*?\('challenge-form'\);.*?;(?P<challenge>.*?a\.value)\s*=\s*\S+\.toFixed\(10\);",
body,
re.DOTALL | re.MULTILINE
).groupdict()
except AttributeError:
raise CloudflareSolveError('There was an issue extracting "jsfuckChallenge" from the Cloudflare challenge.')
kJSFUCK = re.search(r'(;|)\s*k.=(?P<kJSFUCK>\S+);', jsfuckChallenge['challenge'], re.S | re.M)
if kJSFUCK:
try:
kJSFUCK = jsfuckToNumber(kJSFUCK.group('kJSFUCK'))
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kJSFUCK" from the Cloudflare challenge.')
try:
kID = re.search(r"\s*k\s*=\s*'(?P<kID>\S+)';", body).group('kID')
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kID" from the Cloudflare challenge.')
try:
r = re.compile(r'<div id="{}(?P<id>\d+)">\s*(?P<jsfuck>[^<>]*)</div>'.format(kID))
kValues = {}
for m in r.finditer(body):
kValues[int(m.group('id'))] = m.group('jsfuck')
jsfuckChallenge['k'] = kValues[kJSFUCK]
except (AttributeError, IndexError):
raise CloudflareSolveError('There was an issue extracting "kValues" from the Cloudflare challenge.')
jsfuckChallenge['challenge'] = re.finditer(
r'{}.*?([+\-*/])=(.*?);(?=a\.value|{})'.format(
jsfuckChallenge['variable'],
jsfuckChallenge['variable']
),
jsfuckChallenge['challenge']
)
if '/' in jsfuckChallenge['init']:
val = jsfuckChallenge['init'].split('/')
jschl_answer = jsfuckToNumber(val[0]) / float(jsfuckToNumber(val[1]))
else:
jschl_answer = jsfuckToNumber(jsfuckChallenge['init'])
for expressionMatch in jsfuckChallenge['challenge']:
oper, expression = expressionMatch.groups()
if '/' in expression:
expression_value = divisorMath(expression, 'function(p)', domain)
else:
if 'Element' in expression:
expression_value = divisorMath(jsfuckChallenge['k'], '"("+p+")")}', domain)
else:
expression_value = jsfuckToNumber(expression)
jschl_answer = operators[oper](jschl_answer, expression_value)
return '{0:.10f}'.format(jschl_answer)
return challengeSolve(body, domain)
ChallengeInterpreter()
| true
| true
|
f714758643422dfaacbc46a0e387395c3f0c97c1
| 10,009
|
py
|
Python
|
src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/schema_validation.py
|
StewartW/aws-deployment-framework
|
7511241664c946ce3b045db211a4931b1dbaac6d
|
[
"Apache-2.0"
] | 1
|
2021-11-28T09:27:16.000Z
|
2021-11-28T09:27:16.000Z
|
src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/schema_validation.py
|
StewartW/aws-deployment-framework
|
7511241664c946ce3b045db211a4931b1dbaac6d
|
[
"Apache-2.0"
] | null | null | null |
src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/schema_validation.py
|
StewartW/aws-deployment-framework
|
7511241664c946ce3b045db211a4931b1dbaac6d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""
Schema Validation for Deployment map files
"""
from schema import Schema, And, Use, Or, Optional, Regex
from logger import configure_logger
LOGGER = configure_logger(__name__)
NOTIFICATION_PROPS = {
Optional("target"): str,
Optional("type") : Or("lambda", "chat_bot")
}
# Pipeline Params
PARAM_SCHEMA = {
Optional("notification_endpoint"): Or(str, NOTIFICATION_PROPS),
Optional("schedule"): str,
Optional("restart_execution_on_update"): bool,
Optional("pipeline_type", default="default"): Or("default"),
}
AWS_ACCOUNT_ID_REGEX_STR = r"\A[0-9]{12}\Z"
AWS_ACCOUNT_ID_SCHEMA = Schema(
And(
Or(int, str),
Use(str),
Regex(
AWS_ACCOUNT_ID_REGEX_STR,
error=(
"The specified account id is incorrect. "
"This typically happens when you specify the account id as a "
"number, while the account id starts with a zero. If this is "
"the case, please wrap the account id in quotes to make it a "
"string. An AWS Account Id is a number of 12 digits, which "
"should start with a zero if the Account Id has a zero at "
"the start too. "
"The number shown to not match the regular expression could "
"be interpreted as an octal number due to the leading zero. "
"Therefore, it might not match the account id as specified "
"in the deployment map."
)
)
)
)
# CodeCommit Source
CODECOMMIT_SOURCE_PROPS = {
"account_id": AWS_ACCOUNT_ID_SCHEMA,
Optional("repository"): str,
Optional("branch"): str,
Optional("poll_for_changes"): bool,
Optional("owner"): str,
Optional("role"): str,
Optional("trigger_on_changes"): bool,
Optional("output_artifact_format", default=None): Or("CODEBUILD_CLONE_REF", "CODE_ZIP", None)
}
CODECOMMIT_SOURCE = {
"provider": 'codecommit',
"properties": CODECOMMIT_SOURCE_PROPS
}
# GitHub Source
GITHUB_SOURCE_PROPS = {
Optional("repository"): str,
Optional("branch"): str,
"owner": str,
"oauth_token_path": str,
"json_field": str,
Optional("trigger_on_changes"): bool,
}
GITHUB_SOURCE = {
"provider": 'github',
"properties": GITHUB_SOURCE_PROPS
}
# CodeStar Source
CODESTAR_SOURCE_PROPS = {
Optional("repository"): str,
Optional("branch"): str,
"owner": str,
"codestar_connection_path": str
}
CODESTAR_SOURCE = {
"provider": 'codestar',
"properties": CODESTAR_SOURCE_PROPS
}
# S3 Source
S3_SOURCE_PROPS = {
"account_id": AWS_ACCOUNT_ID_SCHEMA,
"bucket_name": str,
"object_key": str,
Optional("trigger_on_changes"): bool,
}
S3_SOURCE = {
"provider": 's3',
"properties": S3_SOURCE_PROPS
}
# CodeBuild
CODEBUILD_IMAGE_PROPS = {
"repository_arn": str, # arn:aws:ecr:region:012345678910:repository/test
Optional("tag"): str, # defaults to latest
}
CODEBUILD_PROPS = {
Optional("image"): Or(str, CODEBUILD_IMAGE_PROPS),
Optional("size"): Or('small', 'medium', 'large'),
Optional("spec_filename"): str,
Optional("environment_variables"): {Optional(str): Or(str, bool, int, object)},
Optional("role"): str,
Optional("timeout"): int,
Optional("privileged"): bool,
Optional("spec_inline"): object,
}
DEFAULT_CODEBUILD_BUILD = {
Optional("provider"): 'codebuild',
Optional("enabled"): bool,
Optional("properties"): CODEBUILD_PROPS
}
STAGE_CODEBUILD_BUILD = {
Optional("provider"): 'codebuild',
Optional("properties"): CODEBUILD_PROPS
}
# Jenkins
JENKINS_PROPS = {
Optional("project_name"): str,
Optional("server_url"): str,
Optional("provider_name"): str
}
JENKINS_BUILD = {
Optional("provider"): 'jenkins',
Optional("enabled"): bool,
Optional("properties"): JENKINS_PROPS
}
# CloudFormation
PARAM_OVERRIDE_SCHEMA = {
"inputs": str,
"param": str,
"key_name": str
}
CLOUDFORMATION_ACTIONS = Or(
'CHANGE_SET_EXECUTE',
'CHANGE_SET_REPLACE',
'CREATE_UPDATE',
'DELETE_ONLY',
'REPLACE_ON_FAILURE',
'change_set_execute',
'change_set_replace',
'create_update',
'delete_only',
'replace_on_failure'
)
CLOUDFORMATION_PROPS = {
Optional("stack_name"): str,
Optional("template_filename"): str,
Optional("root_dir"): str,
Optional("role"): str,
Optional("action"): CLOUDFORMATION_ACTIONS,
Optional("outputs"): str,
Optional("change_set_approval"): bool,
Optional("param_overrides"): [PARAM_OVERRIDE_SCHEMA]
}
# No need for a stage schema since CFN takes all optional props
DEFAULT_CLOUDFORMATION_DEPLOY = {
"provider": 'cloudformation',
Optional("properties"): CLOUDFORMATION_PROPS
}
# CodeDeploy
CODEDEPLOY_PROPS = {
"application_name": str,
"deployment_group_name": str,
Optional("role"): str
}
STAGE_CODEDEPLOY_DEPLOY = {
Optional("provider"): 'codedeploy',
"properties": CODEDEPLOY_PROPS
}
DEFAULT_CODEDEPLOY_DEPLOY = {
"provider": 'codedeploy',
Optional("properties"): CODEDEPLOY_PROPS
}
# S3
S3_DEPLOY_PROPS = {
"bucket_name": str,
"object_key": str,
Optional("extract"): bool,
Optional("role"): str
}
STAGE_S3_DEPLOY = {
Optional("provider"): 's3',
"properties": S3_DEPLOY_PROPS
}
DEFAULT_S3_DEPLOY = {
"provider": 's3',
Optional("properties"): S3_DEPLOY_PROPS
}
# Service Catalog
SERVICECATALOG_PROPS = {
"product_id": str,
Optional("configuration_file_path"): str
}
STAGE_SERVICECATALOG_DEPLOY = {
Optional("provider"): 'service_catalog',
"properties": SERVICECATALOG_PROPS
}
DEFAULT_SERVICECATALOG_DEPLOY = {
"provider": 'service_catalog',
Optional("properties"): SERVICECATALOG_PROPS
}
# Lambda
LAMBDA_PROPS = {
"function_name": str,
Optional("input"): Or(str, object),
Optional("role"): str
}
STAGE_LAMBDA_INVOKE = {
Optional("provider"): 'lambda',
"properties": LAMBDA_PROPS
}
DEFAULT_LAMBDA_INVOKE = {
"provider": 'lambda',
Optional("properties"): LAMBDA_PROPS
}
# Approval
APPROVAL_PROPS = {
Optional("message"): str,
Optional("notification_endpoint"): str,
Optional("sns_topic_arn"): str
}
DEFAULT_APPROVAL = {
"provider": 'approval',
"properties": APPROVAL_PROPS
}
# Core Schema
PROVIDER_SOURCE_SCHEMAS = {
'codecommit': Schema(CODECOMMIT_SOURCE),
'github': Schema(GITHUB_SOURCE),
's3': Schema(S3_SOURCE),
'codestar': Schema(CODESTAR_SOURCE),
}
PROVIDER_BUILD_SCHEMAS = {
'codebuild': Schema(DEFAULT_CODEBUILD_BUILD),
'jenkins': Schema(JENKINS_BUILD),
}
PROVIDER_DEPLOY_SCHEMAS = {
'cloudformation': Schema(DEFAULT_CLOUDFORMATION_DEPLOY),
's3': Schema(DEFAULT_S3_DEPLOY),
'codedeploy': Schema(DEFAULT_CODEDEPLOY_DEPLOY),
'lambda': Schema(DEFAULT_LAMBDA_INVOKE),
'service_catalog': Schema(DEFAULT_SERVICECATALOG_DEPLOY),
'codebuild': Schema(DEFAULT_CODEBUILD_BUILD),
}
PROVIDER_SCHEMA = {
'source': And(
{
'provider': Or('codecommit', 'github', 's3', 'codestar'),
'properties': dict,
},
lambda x: PROVIDER_SOURCE_SCHEMAS[x['provider']].validate(x), #pylint: disable=W0108
),
Optional('build'): And(
{
Optional('provider'): Or('codebuild', 'jenkins'),
Optional('enabled'): bool,
Optional('properties'): dict,
},
lambda x: PROVIDER_BUILD_SCHEMAS[x.get('provider', 'codebuild')].validate(x), #pylint: disable=W0108
),
Optional('deploy'): And(
{
'provider': Or(
'cloudformation', 's3', 'codedeploy', 'lambda',
'service_catalog', 'codebuild'
),
Optional('enabled'): bool,
Optional('properties'): dict,
},
lambda x: PROVIDER_DEPLOY_SCHEMAS[x['provider']].validate(x), #pylint: disable=W0108
),
}
REGION_SCHEMA = Or(
str,
list
)
TARGET_LIST_SCHEMA = [Or(
str,
int
)]
TARGET_WAVE_SCHEME = {
Optional("size", default=50): int,
}
# Pipeline Params
TARGET_SCHEMA = {
Optional("path"): Or(str, int, TARGET_LIST_SCHEMA),
Optional("tags"): {And(str, Regex(r"\A.{1,128}\Z")): And(str, Regex(r"\A.{0,256}\Z"))},
Optional("target"): Or(str, int, TARGET_LIST_SCHEMA),
Optional("name"): str,
Optional("provider"): Or('lambda', 's3', 'codedeploy', 'cloudformation', 'service_catalog', 'approval', 'codebuild', 'jenkins'),
Optional("properties"): Or(CODEBUILD_PROPS, JENKINS_PROPS, CLOUDFORMATION_PROPS, CODEDEPLOY_PROPS, S3_DEPLOY_PROPS, SERVICECATALOG_PROPS, LAMBDA_PROPS, APPROVAL_PROPS),
Optional("regions"): REGION_SCHEMA,
Optional("exclude", default=[]): [str],
Optional("wave", default={"size": 50}): TARGET_WAVE_SCHEME
}
COMPLETION_TRIGGERS_SCHEMA = {
"pipelines": [str]
}
PIPELINE_TRIGGERS_SCHEMA = {
Optional("code_artifact"): {
"repository": str,
Optional("package"): str,
}
}
TRIGGERS_SCHEMA = {
Optional("on_complete"): COMPLETION_TRIGGERS_SCHEMA,
Optional("triggered_by"): [PIPELINE_TRIGGERS_SCHEMA],
}
PIPELINE_SCHEMA = {
"name": And(str, len),
"default_providers": PROVIDER_SCHEMA,
Optional("params"): PARAM_SCHEMA,
Optional("tags"): dict,
Optional("targets"): [Or(str, int, TARGET_SCHEMA, TARGET_LIST_SCHEMA)],
Optional("regions"): REGION_SCHEMA,
Optional("completion_trigger"): COMPLETION_TRIGGERS_SCHEMA,
Optional("triggers"): TRIGGERS_SCHEMA
}
TOP_LEVEL_SCHEMA = {
"pipelines": [PIPELINE_SCHEMA],
# Allow any toplevel key starting with "x-" or "x_".
# ADF will ignore these, but users can use them to define anchors in one place.
Optional(Regex('^[x][-_].*')): object
}
class SchemaValidation:
def __init__(self, map_input: dict):
self.validated = Schema(TOP_LEVEL_SCHEMA).validate(map_input)
| 28.194366
| 172
| 0.654911
|
from schema import Schema, And, Use, Or, Optional, Regex
from logger import configure_logger
LOGGER = configure_logger(__name__)
NOTIFICATION_PROPS = {
Optional("target"): str,
Optional("type") : Or("lambda", "chat_bot")
}
PARAM_SCHEMA = {
Optional("notification_endpoint"): Or(str, NOTIFICATION_PROPS),
Optional("schedule"): str,
Optional("restart_execution_on_update"): bool,
Optional("pipeline_type", default="default"): Or("default"),
}
AWS_ACCOUNT_ID_REGEX_STR = r"\A[0-9]{12}\Z"
AWS_ACCOUNT_ID_SCHEMA = Schema(
And(
Or(int, str),
Use(str),
Regex(
AWS_ACCOUNT_ID_REGEX_STR,
error=(
"The specified account id is incorrect. "
"This typically happens when you specify the account id as a "
"number, while the account id starts with a zero. If this is "
"the case, please wrap the account id in quotes to make it a "
"string. An AWS Account Id is a number of 12 digits, which "
"should start with a zero if the Account Id has a zero at "
"the start too. "
"The number shown to not match the regular expression could "
"be interpreted as an octal number due to the leading zero. "
"Therefore, it might not match the account id as specified "
"in the deployment map."
)
)
)
)
CODECOMMIT_SOURCE_PROPS = {
"account_id": AWS_ACCOUNT_ID_SCHEMA,
Optional("repository"): str,
Optional("branch"): str,
Optional("poll_for_changes"): bool,
Optional("owner"): str,
Optional("role"): str,
Optional("trigger_on_changes"): bool,
Optional("output_artifact_format", default=None): Or("CODEBUILD_CLONE_REF", "CODE_ZIP", None)
}
CODECOMMIT_SOURCE = {
"provider": 'codecommit',
"properties": CODECOMMIT_SOURCE_PROPS
}
GITHUB_SOURCE_PROPS = {
Optional("repository"): str,
Optional("branch"): str,
"owner": str,
"oauth_token_path": str,
"json_field": str,
Optional("trigger_on_changes"): bool,
}
GITHUB_SOURCE = {
"provider": 'github',
"properties": GITHUB_SOURCE_PROPS
}
CODESTAR_SOURCE_PROPS = {
Optional("repository"): str,
Optional("branch"): str,
"owner": str,
"codestar_connection_path": str
}
CODESTAR_SOURCE = {
"provider": 'codestar',
"properties": CODESTAR_SOURCE_PROPS
}
S3_SOURCE_PROPS = {
"account_id": AWS_ACCOUNT_ID_SCHEMA,
"bucket_name": str,
"object_key": str,
Optional("trigger_on_changes"): bool,
}
S3_SOURCE = {
"provider": 's3',
"properties": S3_SOURCE_PROPS
}
CODEBUILD_IMAGE_PROPS = {
"repository_arn": str,
Optional("tag"): str,
}
CODEBUILD_PROPS = {
Optional("image"): Or(str, CODEBUILD_IMAGE_PROPS),
Optional("size"): Or('small', 'medium', 'large'),
Optional("spec_filename"): str,
Optional("environment_variables"): {Optional(str): Or(str, bool, int, object)},
Optional("role"): str,
Optional("timeout"): int,
Optional("privileged"): bool,
Optional("spec_inline"): object,
}
DEFAULT_CODEBUILD_BUILD = {
Optional("provider"): 'codebuild',
Optional("enabled"): bool,
Optional("properties"): CODEBUILD_PROPS
}
STAGE_CODEBUILD_BUILD = {
Optional("provider"): 'codebuild',
Optional("properties"): CODEBUILD_PROPS
}
JENKINS_PROPS = {
Optional("project_name"): str,
Optional("server_url"): str,
Optional("provider_name"): str
}
JENKINS_BUILD = {
Optional("provider"): 'jenkins',
Optional("enabled"): bool,
Optional("properties"): JENKINS_PROPS
}
PARAM_OVERRIDE_SCHEMA = {
"inputs": str,
"param": str,
"key_name": str
}
CLOUDFORMATION_ACTIONS = Or(
'CHANGE_SET_EXECUTE',
'CHANGE_SET_REPLACE',
'CREATE_UPDATE',
'DELETE_ONLY',
'REPLACE_ON_FAILURE',
'change_set_execute',
'change_set_replace',
'create_update',
'delete_only',
'replace_on_failure'
)
CLOUDFORMATION_PROPS = {
Optional("stack_name"): str,
Optional("template_filename"): str,
Optional("root_dir"): str,
Optional("role"): str,
Optional("action"): CLOUDFORMATION_ACTIONS,
Optional("outputs"): str,
Optional("change_set_approval"): bool,
Optional("param_overrides"): [PARAM_OVERRIDE_SCHEMA]
}
DEFAULT_CLOUDFORMATION_DEPLOY = {
"provider": 'cloudformation',
Optional("properties"): CLOUDFORMATION_PROPS
}
CODEDEPLOY_PROPS = {
"application_name": str,
"deployment_group_name": str,
Optional("role"): str
}
STAGE_CODEDEPLOY_DEPLOY = {
Optional("provider"): 'codedeploy',
"properties": CODEDEPLOY_PROPS
}
DEFAULT_CODEDEPLOY_DEPLOY = {
"provider": 'codedeploy',
Optional("properties"): CODEDEPLOY_PROPS
}
S3_DEPLOY_PROPS = {
"bucket_name": str,
"object_key": str,
Optional("extract"): bool,
Optional("role"): str
}
STAGE_S3_DEPLOY = {
Optional("provider"): 's3',
"properties": S3_DEPLOY_PROPS
}
DEFAULT_S3_DEPLOY = {
"provider": 's3',
Optional("properties"): S3_DEPLOY_PROPS
}
SERVICECATALOG_PROPS = {
"product_id": str,
Optional("configuration_file_path"): str
}
STAGE_SERVICECATALOG_DEPLOY = {
Optional("provider"): 'service_catalog',
"properties": SERVICECATALOG_PROPS
}
DEFAULT_SERVICECATALOG_DEPLOY = {
"provider": 'service_catalog',
Optional("properties"): SERVICECATALOG_PROPS
}
LAMBDA_PROPS = {
"function_name": str,
Optional("input"): Or(str, object),
Optional("role"): str
}
STAGE_LAMBDA_INVOKE = {
Optional("provider"): 'lambda',
"properties": LAMBDA_PROPS
}
DEFAULT_LAMBDA_INVOKE = {
"provider": 'lambda',
Optional("properties"): LAMBDA_PROPS
}
APPROVAL_PROPS = {
Optional("message"): str,
Optional("notification_endpoint"): str,
Optional("sns_topic_arn"): str
}
DEFAULT_APPROVAL = {
"provider": 'approval',
"properties": APPROVAL_PROPS
}
PROVIDER_SOURCE_SCHEMAS = {
'codecommit': Schema(CODECOMMIT_SOURCE),
'github': Schema(GITHUB_SOURCE),
's3': Schema(S3_SOURCE),
'codestar': Schema(CODESTAR_SOURCE),
}
PROVIDER_BUILD_SCHEMAS = {
'codebuild': Schema(DEFAULT_CODEBUILD_BUILD),
'jenkins': Schema(JENKINS_BUILD),
}
PROVIDER_DEPLOY_SCHEMAS = {
'cloudformation': Schema(DEFAULT_CLOUDFORMATION_DEPLOY),
's3': Schema(DEFAULT_S3_DEPLOY),
'codedeploy': Schema(DEFAULT_CODEDEPLOY_DEPLOY),
'lambda': Schema(DEFAULT_LAMBDA_INVOKE),
'service_catalog': Schema(DEFAULT_SERVICECATALOG_DEPLOY),
'codebuild': Schema(DEFAULT_CODEBUILD_BUILD),
}
PROVIDER_SCHEMA = {
'source': And(
{
'provider': Or('codecommit', 'github', 's3', 'codestar'),
'properties': dict,
},
lambda x: PROVIDER_SOURCE_SCHEMAS[x['provider']].validate(x),
),
Optional('build'): And(
{
Optional('provider'): Or('codebuild', 'jenkins'),
Optional('enabled'): bool,
Optional('properties'): dict,
},
lambda x: PROVIDER_BUILD_SCHEMAS[x.get('provider', 'codebuild')].validate(x),
),
Optional('deploy'): And(
{
'provider': Or(
'cloudformation', 's3', 'codedeploy', 'lambda',
'service_catalog', 'codebuild'
),
Optional('enabled'): bool,
Optional('properties'): dict,
},
lambda x: PROVIDER_DEPLOY_SCHEMAS[x['provider']].validate(x),
),
}
REGION_SCHEMA = Or(
str,
list
)
TARGET_LIST_SCHEMA = [Or(
str,
int
)]
TARGET_WAVE_SCHEME = {
Optional("size", default=50): int,
}
TARGET_SCHEMA = {
Optional("path"): Or(str, int, TARGET_LIST_SCHEMA),
Optional("tags"): {And(str, Regex(r"\A.{1,128}\Z")): And(str, Regex(r"\A.{0,256}\Z"))},
Optional("target"): Or(str, int, TARGET_LIST_SCHEMA),
Optional("name"): str,
Optional("provider"): Or('lambda', 's3', 'codedeploy', 'cloudformation', 'service_catalog', 'approval', 'codebuild', 'jenkins'),
Optional("properties"): Or(CODEBUILD_PROPS, JENKINS_PROPS, CLOUDFORMATION_PROPS, CODEDEPLOY_PROPS, S3_DEPLOY_PROPS, SERVICECATALOG_PROPS, LAMBDA_PROPS, APPROVAL_PROPS),
Optional("regions"): REGION_SCHEMA,
Optional("exclude", default=[]): [str],
Optional("wave", default={"size": 50}): TARGET_WAVE_SCHEME
}
COMPLETION_TRIGGERS_SCHEMA = {
"pipelines": [str]
}
PIPELINE_TRIGGERS_SCHEMA = {
Optional("code_artifact"): {
"repository": str,
Optional("package"): str,
}
}
TRIGGERS_SCHEMA = {
Optional("on_complete"): COMPLETION_TRIGGERS_SCHEMA,
Optional("triggered_by"): [PIPELINE_TRIGGERS_SCHEMA],
}
PIPELINE_SCHEMA = {
"name": And(str, len),
"default_providers": PROVIDER_SCHEMA,
Optional("params"): PARAM_SCHEMA,
Optional("tags"): dict,
Optional("targets"): [Or(str, int, TARGET_SCHEMA, TARGET_LIST_SCHEMA)],
Optional("regions"): REGION_SCHEMA,
Optional("completion_trigger"): COMPLETION_TRIGGERS_SCHEMA,
Optional("triggers"): TRIGGERS_SCHEMA
}
TOP_LEVEL_SCHEMA = {
"pipelines": [PIPELINE_SCHEMA],
Optional(Regex('^[x][-_].*')): object
}
class SchemaValidation:
def __init__(self, map_input: dict):
self.validated = Schema(TOP_LEVEL_SCHEMA).validate(map_input)
| true
| true
|
f71475d212b144b5142445b5a0db415640369fb9
| 23
|
py
|
Python
|
instance/config.py
|
randilfernando/bot
|
a193c557a9ce3d9bc9d542e29e50f3077ba716df
|
[
"MIT"
] | null | null | null |
instance/config.py
|
randilfernando/bot
|
a193c557a9ce3d9bc9d542e29e50f3077ba716df
|
[
"MIT"
] | 3
|
2020-04-15T16:06:19.000Z
|
2020-04-15T16:07:49.000Z
|
instance/config.py
|
randilfernando/bot
|
a193c557a9ce3d9bc9d542e29e50f3077ba716df
|
[
"MIT"
] | 1
|
2021-01-27T10:32:47.000Z
|
2021-01-27T10:32:47.000Z
|
INTENT_THRESHOLD = 0.5
| 11.5
| 22
| 0.782609
|
INTENT_THRESHOLD = 0.5
| true
| true
|
f7147793a2e6c2dd68fdd7d5efb9db0e5d179701
| 14,417
|
py
|
Python
|
mi/dataset/dataset_parser.py
|
rmanoni/mi-dataset
|
c1012a0cd8f2ea075e008cdd1ab291ed54f44d43
|
[
"BSD-2-Clause"
] | null | null | null |
mi/dataset/dataset_parser.py
|
rmanoni/mi-dataset
|
c1012a0cd8f2ea075e008cdd1ab291ed54f44d43
|
[
"BSD-2-Clause"
] | null | null | null |
mi/dataset/dataset_parser.py
|
rmanoni/mi-dataset
|
c1012a0cd8f2ea075e008cdd1ab291ed54f44d43
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
@package mi.dataset.parser A collection of parsers that strip data blocks
out of files and feed them into the system.
@file mi/dataset/parser.py
@author Steve Foley
@brief Base classes for data set agent parsers
"""
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
import time
import ntplib
from mi.core.log import get_logger
log = get_logger()
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.exceptions import RecoverableSampleException, SampleEncodingException
from mi.core.exceptions import NotImplementedException, UnexpectedDataException
from mi.core.common import BaseEnum
class DataSetDriverConfigKeys(BaseEnum):
PARTICLE_MODULE = "particle_module"
PARTICLE_CLASS = "particle_class"
PARTICLE_CLASSES_DICT = "particle_classes_dict"
DIRECTORY = "directory"
STORAGE_DIRECTORY = "storage_directory"
PATTERN = "pattern"
FREQUENCY = "frequency"
FILE_MOD_WAIT_TIME = "file_mod_wait_time"
HARVESTER = "harvester"
PARSER = "parser"
MODULE = "module"
CLASS = "class"
URI = "uri"
CLASS_ARGS = "class_args"
class Parser(object):
""" abstract class to show API needed for plugin poller objects """
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
"""
@param config The configuration parameters to feed into the parser
@param stream_handle An already open file-like filehandle
@param state The location in the file to start parsing from.
This reflects what has already been published.
@param sieve_fn A sieve function that might be added to a handler
to appropriate filter out the data
@param state_callback The callback method from the agent driver
(ultimately the agent) to call back when a state needs to be
updated
@param publish_callback The callback from the agent driver (and
ultimately from the agent) where we send our sample particle to
be published into ION
@param exception_callback The callback from the agent driver (and
ultimately from the agent) where we send our error events to
be published into ION
"""
self._chunker = StringChunker(sieve_fn)
self._stream_handle = stream_handle
self._state = state
self._state_callback = state_callback
self._publish_callback = publish_callback
self._exception_callback = exception_callback
self._config = config
# Build class from module and class name, then set the state
if config.get(DataSetDriverConfigKeys.PARTICLE_CLASS) is not None:
if config.get(DataSetDriverConfigKeys.PARTICLE_MODULE):
self._particle_module = __import__(config.get(DataSetDriverConfigKeys.PARTICLE_MODULE),
fromlist=[config.get(DataSetDriverConfigKeys.PARTICLE_CLASS)])
# if there is more than one particle class for this parser, this cannot be used, need to hard code the
# particle class in the driver
try:
self._particle_class = getattr(self._particle_module,
config.get(DataSetDriverConfigKeys.PARTICLE_CLASS))
except TypeError:
self._particle_class = None
else:
log.warn("Particle class is specified in config, but no particle module is specified in config")
def get_records(self, max_count):
"""
Returns a list of particles (following the instrument driver structure).
"""
raise NotImplementedException("get_records() not overridden!")
def _publish_sample(self, samples):
"""
Publish the samples with the given publishing callback.
@param samples The list of data particle to publish up to the system
"""
if isinstance(samples, list):
self._publish_callback(samples)
else:
self._publish_callback([samples])
def _extract_sample(self, particle_class, regex, raw_data, timestamp):
"""
Extract sample from a response line if present and publish
parsed particle
@param particle_class The class to instantiate for this specific
data particle. Parameterizing this allows for simple, standard
behavior from this routine
@param regex The regular expression that matches a data sample if regex
is none then process every line
@param raw_data data to input into this particle.
@retval return a raw particle if a sample was found, else None
"""
particle = None
try:
if regex is None or regex.match(raw_data):
particle = particle_class(raw_data, internal_timestamp=timestamp,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)
# need to actually parse the particle fields to find out of there are errors
particle.generate()
encoding_errors = particle.get_encoding_errors()
if encoding_errors:
log.warn("Failed to encode: %s", encoding_errors)
raise SampleEncodingException("Failed to encode: %s" % encoding_errors)
except (RecoverableSampleException, SampleEncodingException) as e:
log.error("Sample exception detected: %s raw data: %s", e, raw_data)
if self._exception_callback:
self._exception_callback(e)
else:
raise e
return particle
class BufferLoadingParser(Parser):
"""
This class loads data values into a record buffer, then offers up
records from this buffer as they are requested. Parsers dont have
to operate this way, but it can keep memory in check and smooth out
stream inputs if they dont all come at once.
"""
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
"""
@param config The configuration parameters to feed into the parser
@param stream_handle An already open file-like filehandle
@param state The location in the file to start parsing from.
This reflects what has already been published.
@param sieve_fn A sieve function that might be added to a handler
to appropriate filter out the data
@param state_callback The callback method from the agent driver
(ultimately the agent) to call back when a state needs to be
updated
@param publish_callback The callback from the agent driver (and
ultimately from the agent) where we send our sample particle to
be published into ION
@param exception_callback The callback from the agent driver (and
ultimately from the agent) where we send our error events to
be published into ION
"""
self._record_buffer = []
self._timestamp = 0.0
self.file_complete = False
super(BufferLoadingParser, self).__init__(config, stream_handle, state,
sieve_fn, state_callback,
publish_callback,
exception_callback)
def get_records(self, num_records):
"""
Go ahead and execute the data parsing loop up to a point. This involves
getting data from the file, stuffing it in to the chunker, then parsing
it and publishing.
@param num_records The number of records to gather
@retval Return the list of particles requested, [] if none available
"""
if num_records <= 0:
return []
try:
while len(self._record_buffer) < num_records:
self._load_particle_buffer()
except EOFError:
self._process_end_of_file()
return self._yank_particles(num_records)
def _process_end_of_file(self):
"""
Confirm that the chunker does not have any extra bytes left at the end of the file
"""
(nd_timestamp, non_data) = self._chunker.get_next_non_data()
(timestamp, chunk) = self._chunker.get_next_data()
if non_data and len(non_data) > 0:
log.warn("Have extra unexplained non-data bytes at the end of the file:%s", non_data)
raise UnexpectedDataException("Have extra unexplained non-data bytes at the end of the file:%s" % non_data)
elif chunk and len(chunk) > 0:
log.warn("Have extra unexplained data chunk bytes at the end of the file:%s", chunk)
raise UnexpectedDataException("Have extra unexplained data chunk bytes at the end of the file:%s" % chunk)
def _yank_particles(self, num_records):
"""
Get particles out of the buffer and publish them. Update the state
of what has been published, too.
@param num_records The number of particles to remove from the buffer
@retval A list with num_records elements from the buffer. If num_records
cannot be collected (perhaps due to an EOF), the list will have the
elements it was able to collect.
"""
if len(self._record_buffer) < num_records:
num_to_fetch = len(self._record_buffer)
else:
num_to_fetch = num_records
log.trace("Yanking %s records of %s requested",
num_to_fetch,
num_records)
return_list = []
records_to_return = self._record_buffer[:num_to_fetch]
self._record_buffer = self._record_buffer[num_to_fetch:]
if len(records_to_return) > 0:
self._state = records_to_return[-1][1] # state side of tuple of last entry
# strip the state info off of them now that we have what we need
for item in records_to_return:
log.debug("Record to return: %s", item)
return_list.append(item[0])
self._publish_sample(return_list)
log.trace("Sending parser state [%s] to driver", self._state)
file_ingested = False
if self.file_complete and len(self._record_buffer) == 0:
# file has been read completely and all records pulled out of the record buffer
file_ingested = True
self._state_callback(self._state, file_ingested) # push new state to driver
return return_list
def _load_particle_buffer(self):
"""
Load up the internal record buffer with some particles based on a
gather from the get_block method.
"""
while self.get_block():
result = self.parse_chunks()
self._record_buffer.extend(result)
def get_block(self, size=1024):
"""
Get a block of characters for processing
@param size The size of the block to try to read
@retval The length of data retreived
@throws EOFError when the end of the file is reached
"""
# read in some more data
data = self._stream_handle.read(size)
if data:
self._chunker.add_chunk(data, ntplib.system_to_ntp_time(time.time()))
return len(data)
else: # EOF
self.file_complete = True
raise EOFError
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state (ie "(sample, state)"). An empty list of
nothing was parsed.
"""
raise NotImplementedException("Must write parse_chunks()!")
class SimpleParser(Parser):
def __init__(self, config, stream_handle, exception_callback):
"""
Initialize the simple parser, which does not use state or the chunker
and sieve functions.
@param config: The parser configuration dictionary
@param stream_handle: The stream handle of the file to parse
@param exception_callback: The callback to use when an exception occurs
"""
# the record buffer which will store all parsed particles
self._record_buffer = []
# a flag indicating if the file has been parsed or not
self._file_parsed = False
super(SimpleParser, self).__init__(config,
stream_handle,
None, # state not used
None, # sieve_fn not used
None, # state_callback not used
None, # publish_callback not used
exception_callback)
def parse_file(self):
"""
This method must be overridden. This method should open and read the file and parser the data within, and at
the end of this method self._record_buffer will be filled with all the particles in the file.
"""
raise NotImplementedException("parse_file() not overridden!")
def get_records(self, number_requested=1):
"""
Initiate parsing the file if it has not been done already, and pop particles off the record buffer to
return as many as requested if they are available in the buffer.
@param number_requested the number of records requested to be returned
@return an array of particles, with a length of the number requested or less
"""
particles_to_return = []
if number_requested > 0:
if self._file_parsed is False:
self.parse_file()
self._file_parsed = True
while len(particles_to_return) < number_requested and len(self._record_buffer) > 0:
particles_to_return.append(self._record_buffer.pop(0))
return particles_to_return
| 43.820669
| 119
| 0.634667
|
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
import time
import ntplib
from mi.core.log import get_logger
log = get_logger()
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.exceptions import RecoverableSampleException, SampleEncodingException
from mi.core.exceptions import NotImplementedException, UnexpectedDataException
from mi.core.common import BaseEnum
class DataSetDriverConfigKeys(BaseEnum):
PARTICLE_MODULE = "particle_module"
PARTICLE_CLASS = "particle_class"
PARTICLE_CLASSES_DICT = "particle_classes_dict"
DIRECTORY = "directory"
STORAGE_DIRECTORY = "storage_directory"
PATTERN = "pattern"
FREQUENCY = "frequency"
FILE_MOD_WAIT_TIME = "file_mod_wait_time"
HARVESTER = "harvester"
PARSER = "parser"
MODULE = "module"
CLASS = "class"
URI = "uri"
CLASS_ARGS = "class_args"
class Parser(object):
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
self._chunker = StringChunker(sieve_fn)
self._stream_handle = stream_handle
self._state = state
self._state_callback = state_callback
self._publish_callback = publish_callback
self._exception_callback = exception_callback
self._config = config
if config.get(DataSetDriverConfigKeys.PARTICLE_CLASS) is not None:
if config.get(DataSetDriverConfigKeys.PARTICLE_MODULE):
self._particle_module = __import__(config.get(DataSetDriverConfigKeys.PARTICLE_MODULE),
fromlist=[config.get(DataSetDriverConfigKeys.PARTICLE_CLASS)])
try:
self._particle_class = getattr(self._particle_module,
config.get(DataSetDriverConfigKeys.PARTICLE_CLASS))
except TypeError:
self._particle_class = None
else:
log.warn("Particle class is specified in config, but no particle module is specified in config")
def get_records(self, max_count):
raise NotImplementedException("get_records() not overridden!")
def _publish_sample(self, samples):
if isinstance(samples, list):
self._publish_callback(samples)
else:
self._publish_callback([samples])
def _extract_sample(self, particle_class, regex, raw_data, timestamp):
particle = None
try:
if regex is None or regex.match(raw_data):
particle = particle_class(raw_data, internal_timestamp=timestamp,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)
particle.generate()
encoding_errors = particle.get_encoding_errors()
if encoding_errors:
log.warn("Failed to encode: %s", encoding_errors)
raise SampleEncodingException("Failed to encode: %s" % encoding_errors)
except (RecoverableSampleException, SampleEncodingException) as e:
log.error("Sample exception detected: %s raw data: %s", e, raw_data)
if self._exception_callback:
self._exception_callback(e)
else:
raise e
return particle
class BufferLoadingParser(Parser):
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
self._record_buffer = []
self._timestamp = 0.0
self.file_complete = False
super(BufferLoadingParser, self).__init__(config, stream_handle, state,
sieve_fn, state_callback,
publish_callback,
exception_callback)
def get_records(self, num_records):
if num_records <= 0:
return []
try:
while len(self._record_buffer) < num_records:
self._load_particle_buffer()
except EOFError:
self._process_end_of_file()
return self._yank_particles(num_records)
def _process_end_of_file(self):
(nd_timestamp, non_data) = self._chunker.get_next_non_data()
(timestamp, chunk) = self._chunker.get_next_data()
if non_data and len(non_data) > 0:
log.warn("Have extra unexplained non-data bytes at the end of the file:%s", non_data)
raise UnexpectedDataException("Have extra unexplained non-data bytes at the end of the file:%s" % non_data)
elif chunk and len(chunk) > 0:
log.warn("Have extra unexplained data chunk bytes at the end of the file:%s", chunk)
raise UnexpectedDataException("Have extra unexplained data chunk bytes at the end of the file:%s" % chunk)
def _yank_particles(self, num_records):
if len(self._record_buffer) < num_records:
num_to_fetch = len(self._record_buffer)
else:
num_to_fetch = num_records
log.trace("Yanking %s records of %s requested",
num_to_fetch,
num_records)
return_list = []
records_to_return = self._record_buffer[:num_to_fetch]
self._record_buffer = self._record_buffer[num_to_fetch:]
if len(records_to_return) > 0:
self._state = records_to_return[-1][1]
for item in records_to_return:
log.debug("Record to return: %s", item)
return_list.append(item[0])
self._publish_sample(return_list)
log.trace("Sending parser state [%s] to driver", self._state)
file_ingested = False
if self.file_complete and len(self._record_buffer) == 0:
file_ingested = True
self._state_callback(self._state, file_ingested)
return return_list
def _load_particle_buffer(self):
while self.get_block():
result = self.parse_chunks()
self._record_buffer.extend(result)
def get_block(self, size=1024):
data = self._stream_handle.read(size)
if data:
self._chunker.add_chunk(data, ntplib.system_to_ntp_time(time.time()))
return len(data)
else:
self.file_complete = True
raise EOFError
def parse_chunks(self):
raise NotImplementedException("Must write parse_chunks()!")
class SimpleParser(Parser):
def __init__(self, config, stream_handle, exception_callback):
self._record_buffer = []
self._file_parsed = False
super(SimpleParser, self).__init__(config,
stream_handle,
None,
None,
None,
None,
exception_callback)
def parse_file(self):
raise NotImplementedException("parse_file() not overridden!")
def get_records(self, number_requested=1):
particles_to_return = []
if number_requested > 0:
if self._file_parsed is False:
self.parse_file()
self._file_parsed = True
while len(particles_to_return) < number_requested and len(self._record_buffer) > 0:
particles_to_return.append(self._record_buffer.pop(0))
return particles_to_return
| true
| true
|
f7147794bdcdc9985403994d39b45297ef13f697
| 4,281
|
py
|
Python
|
python/algo_mdo_tit_for_2_tat.py
|
Mark-MDO47/PrisonDilemmaTourney
|
8be7127c2c8b506429031dc1b9a2e441370307f4
|
[
"Unlicense"
] | null | null | null |
python/algo_mdo_tit_for_2_tat.py
|
Mark-MDO47/PrisonDilemmaTourney
|
8be7127c2c8b506429031dc1b9a2e441370307f4
|
[
"Unlicense"
] | null | null | null |
python/algo_mdo_tit_for_2_tat.py
|
Mark-MDO47/PrisonDilemmaTourney
|
8be7127c2c8b506429031dc1b9a2e441370307f4
|
[
"Unlicense"
] | null | null | null |
# Author: Mark Olson 2021-11-06 https://github.com/Mark-MDO47/PrisonDilemmaTourney
#
# algo_mdo_tit_for_2_tat.py - Prisoner's Dilemma tournament algorithm file
#
# The algo_mdo_tit_for_2_tat algorithm behaves as follows:
# On the first two moves it returns choices.COOPERATE
# On all subsequent moves:
# if the opponent did choices.DEFECT within the last two moves, it returns choices.DEFECT this move
# else it returns choices.COOPERATE this move
#
# For an algorithm python routine in a file (i.e. with filename algo_mdo_something.py), the calling sequence is
# algo_mdo_something(selfHist, oppHist, ID))
# I recommend adding your initials (mine are mdo) to your file/algorithm name so we don't have name collisions
# NOTE that the function name is the same as the python filename with the *.py removed
# This template file is named algorithm_template.py so the function name is algorithm_template
# Each call to the algorithm will have the following for parameters:
# list of history all the choices made by both parties in reverse order (latest choice before this is [0], prev [1])
# Thus the opponent choice made in previous move, assuming this isn't the first move, is oppChoices[0].
# if len(oppChoices) > 0, there was at least one prior move.
# note: len(oppChoices) should be identical to len(myChoices)
# value of each entry in xxxHist is one of choices.DEFECT or choices.COOPERATE
#
# The algorithm will return
# choices.DEFECT or choices.COOPERATE
#
# See https://en.wikipedia.org/wiki/Prisoner%27s_dilemma
# See https://cs.stanford.edu/people/eroberts/courses/soco/projects/1998-99/game-theory/axelrod.html
#
# Merrill Flood and Melvin Dresher from RAND corporation framed the concept in 1950 to show why two completely rational
# individuals might not cooperate, even if it appears that it is in their best interests to do so.
#
# There are many scenarios that can be mapped to this concept, but the famous mapping by Albert W. Tucker called the
# "Prisoner's Dilemma" revolves around two prisoners, "A" and "B", guilty of the same crime and being held in
# separate interrogation rooms.
#
# Due to weak evidence held by the police, if both cooperate (do not betray the other), that will lead to a small sentence
# for each of them. If one cooperates and the other defects, the defector gets off free and the cooperator gets a
# large sentence. If they both defect, they both get an intermediate sentence.
# (spoiler alert) If the game is played exactly one time, the game-theory best choice for each player is to
# defect (or betray the other player).
#
# Robert Axelrod, professor of political science at the University of Michigan, held a tournament of competing
# strategies for the famous Prisoner's Dilemma in 1980.
#
# He had the insight that if the game is played many times in succession, then the history of play allows each player
# to take into account the "reputation" of the other player in making their choice of behavior.
# He invited some game theorists to submit algorithms that would be competed against each other in a computer tournament.
# Later he held another tournament and invited anyone to submit algorithms.
# The "Tit-For-Tat" algorithm seemed to do the best.
import sys
import PrisonersDilemmaTournament as choices # pick up choices.DEFECT and choices.COOPERATE
# The algo_mdo_tit_for_2_tat algorithm behaves as follows:
# On the first two moves it returns choices.COOPERATE
# On all subsequent moves:
# if the opponent did choices.DEFECT in the last two moves, we return choices.DEFECT this move
# else we return choices.COOPERATE this move
#
# note: the function name should be exactly the same as the filename but without the ".py"
# note: len(selfHist) and len(oppHist) should always be the same
#
def algo_mdo_tit_for_2_tat(selfHist, oppHist, ID):
if len(selfHist) <= 1: # first two moves
return choices.COOPERATE
else:
if (choices.DEFECT == oppHist[1]) or (choices.DEFECT == oppHist[0]):
return choices.DEFECT
else:
return oppHist[0]
if __name__ == "__main__":
sys.stderr.write("ERROR - algo_mdo_tit_for_2_tat.py is not intended to be run stand-alone\n")
exit(-1)
| 57.08
| 122
| 0.749825
|
#
# The algo_mdo_tit_for_2_tat algorithm behaves as follows:
# On the first two moves it returns choices.COOPERATE
# On all subsequent moves:
# if the opponent did choices.DEFECT within the last two moves, it returns choices.DEFECT this move
# else it returns choices.COOPERATE this move
#
# For an algorithm python routine in a file (i.e. with filename algo_mdo_something.py), the calling sequence is
# algo_mdo_something(selfHist, oppHist, ID))
# I recommend adding your initials (mine are mdo) to your file/algorithm name so we don't have name collisions
# if len(oppChoices) > 0, there was at least one prior move.
# note: len(oppChoices) should be identical to len(myChoices)
# value of each entry in xxxHist is one of choices.DEFECT or choices.COOPERATE
#
# The algorithm will return
# choices.DEFECT or choices.COOPERATE
#
# See https://en.wikipedia.org/wiki/Prisoner%27s_dilemma
# See https://cs.stanford.edu/people/eroberts/courses/soco/projects/1998-99/game-theory/axelrod.html
#
# Merrill Flood and Melvin Dresher from RAND corporation framed the concept in 1950 to show why two completely rational
# individuals might not cooperate, even if it appears that it is in their best interests to do so.
#
# There are many scenarios that can be mapped to this concept, but the famous mapping by Albert W. Tucker called the
# "Prisoner's Dilemma" revolves around two prisoners, "A" and "B", guilty of the same crime and being held in
#
# He had the insight that if the game is played many times in succession, then the history of play allows each player
# to take into account the "reputation" of the other player in making their choice of behavior.
# He invited some game theorists to submit algorithms that would be competed against each other in a computer tournament.
# Later he held another tournament and invited anyone to submit algorithms.
# The "Tit-For-Tat" algorithm seemed to do the best.
import sys
import PrisonersDilemmaTournament as choices # pick up choices.DEFECT and choices.COOPERATE
# The algo_mdo_tit_for_2_tat algorithm behaves as follows:
# On the first two moves it returns choices.COOPERATE
# On all subsequent moves:
# if the opponent did choices.DEFECT in the last two moves, we return choices.DEFECT this move
# else we return choices.COOPERATE this move
#
# note: the function name should be exactly the same as the filename but without the ".py"
# note: len(selfHist) and len(oppHist) should always be the same
#
def algo_mdo_tit_for_2_tat(selfHist, oppHist, ID):
if len(selfHist) <= 1: # first two moves
return choices.COOPERATE
else:
if (choices.DEFECT == oppHist[1]) or (choices.DEFECT == oppHist[0]):
return choices.DEFECT
else:
return oppHist[0]
if __name__ == "__main__":
sys.stderr.write("ERROR - algo_mdo_tit_for_2_tat.py is not intended to be run stand-alone\n")
exit(-1)
| true
| true
|
f714788157a6abe32fba2b5d294f2ebd1935e271
| 93
|
py
|
Python
|
app/config.py
|
SilentFan/EvenThing
|
b32054a3c9b53d8dfbdc6a808ff9d00d88d1c876
|
[
"MIT"
] | null | null | null |
app/config.py
|
SilentFan/EvenThing
|
b32054a3c9b53d8dfbdc6a808ff9d00d88d1c876
|
[
"MIT"
] | 1
|
2015-10-23T14:43:09.000Z
|
2015-10-23T14:43:09.000Z
|
app/config.py
|
SilentFan/EvenThing
|
b32054a3c9b53d8dfbdc6a808ff9d00d88d1c876
|
[
"MIT"
] | null | null | null |
__author__ = 'meli'
HOST = '0.0.0.0'
STATIC_PATH = "../static"
TEPLATE_PATH = "../templates"
| 18.6
| 29
| 0.645161
|
__author__ = 'meli'
HOST = '0.0.0.0'
STATIC_PATH = "../static"
TEPLATE_PATH = "../templates"
| true
| true
|
f71478bc18c79e2a61aca2351a0f0fe624d2d8a9
| 1,514
|
py
|
Python
|
cephprimarystorage/cephprimarystorage/cdaemon.py
|
wh872743880/zstack-utility
|
e8ba863514a62a2e5ede6ed27dd50e4307086ce7
|
[
"Apache-2.0"
] | null | null | null |
cephprimarystorage/cephprimarystorage/cdaemon.py
|
wh872743880/zstack-utility
|
e8ba863514a62a2e5ede6ed27dd50e4307086ce7
|
[
"Apache-2.0"
] | null | null | null |
cephprimarystorage/cephprimarystorage/cdaemon.py
|
wh872743880/zstack-utility
|
e8ba863514a62a2e5ede6ed27dd50e4307086ce7
|
[
"Apache-2.0"
] | null | null | null |
'''
@author: frank
'''
import sys, os, os.path
from zstacklib.utils import log
from zstacklib.utils import linux
import zstacklib.utils.iptables as iptables
pidfile = '/var/run/zstack/ceph-primarystorage.pid'
log.configure_log('/var/log/zstack/ceph-primarystorage.log')
logger = log.get_logger(__name__)
import cephagent
def prepare_pid_dir(path):
pdir = os.path.dirname(path)
if not os.path.isdir(pdir):
os.makedirs(pdir)
def main():
usage = 'usage: python -c "from cephprimarystorage import cdaemon; cdaemon.main()" start|stop|restart'
if len(sys.argv) != 2 or not sys.argv[1] in ['start', 'stop', 'restart']:
print usage
sys.exit(1)
global pidfile
prepare_pid_dir(pidfile)
try:
iptc = iptables.from_iptables_save()
iptc.add_rule('-A INPUT -p tcp -m tcp --dport 7762 -j ACCEPT')
iptc.iptable_restore()
cmd = sys.argv[1]
agentdaemon = cephagent.CephDaemon(pidfile)
if cmd == 'start':
logger.debug('zstack-ceph-primarystorage starts')
agentdaemon.start()
elif cmd == 'stop':
logger.debug('zstack-ceph-primarystorage stops')
agentdaemon.stop()
elif cmd == 'restart':
logger.debug('zstack-ceph-primarystorage restarts')
agentdaemon.restart()
sys.exit(0)
except Exception:
logger.warning(linux.get_exception_stacktrace())
sys.exit(1)
if __name__ == '__main__':
main()
| 28.566038
| 106
| 0.632761
|
'''
@author: frank
'''
import sys, os, os.path
from zstacklib.utils import log
from zstacklib.utils import linux
import zstacklib.utils.iptables as iptables
pidfile = '/var/run/zstack/ceph-primarystorage.pid'
log.configure_log('/var/log/zstack/ceph-primarystorage.log')
logger = log.get_logger(__name__)
import cephagent
def prepare_pid_dir(path):
pdir = os.path.dirname(path)
if not os.path.isdir(pdir):
os.makedirs(pdir)
def main():
usage = 'usage: python -c "from cephprimarystorage import cdaemon; cdaemon.main()" start|stop|restart'
if len(sys.argv) != 2 or not sys.argv[1] in ['start', 'stop', 'restart']:
print usage
sys.exit(1)
global pidfile
prepare_pid_dir(pidfile)
try:
iptc = iptables.from_iptables_save()
iptc.add_rule('-A INPUT -p tcp -m tcp --dport 7762 -j ACCEPT')
iptc.iptable_restore()
cmd = sys.argv[1]
agentdaemon = cephagent.CephDaemon(pidfile)
if cmd == 'start':
logger.debug('zstack-ceph-primarystorage starts')
agentdaemon.start()
elif cmd == 'stop':
logger.debug('zstack-ceph-primarystorage stops')
agentdaemon.stop()
elif cmd == 'restart':
logger.debug('zstack-ceph-primarystorage restarts')
agentdaemon.restart()
sys.exit(0)
except Exception:
logger.warning(linux.get_exception_stacktrace())
sys.exit(1)
if __name__ == '__main__':
main()
| false
| true
|
f7147b09abce445b07986e1ce4221073dcb14461
| 2,384
|
py
|
Python
|
dynd/nd/test/test_functional.py
|
mwiebe/dynd-python
|
45ffecaf7887761a5634140f0ed120b33ace58a3
|
[
"BSD-2-Clause"
] | 93
|
2015-01-29T14:00:57.000Z
|
2021-11-23T14:37:27.000Z
|
dynd/nd/test/test_functional.py
|
ContinuumIO/dynd-python
|
bae7afb8eb604b0bce09befc9e896c8ec8357aaa
|
[
"BSD-2-Clause"
] | 143
|
2015-01-04T12:30:24.000Z
|
2016-09-29T18:36:22.000Z
|
dynd/nd/test/test_functional.py
|
ContinuumIO/dynd-python
|
bae7afb8eb604b0bce09befc9e896c8ec8357aaa
|
[
"BSD-2-Clause"
] | 20
|
2015-06-08T11:54:46.000Z
|
2021-03-09T07:57:25.000Z
|
import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
from dynd import annotate, nd, ndt
@unittest.skip('Test disabled since callables were reworked')
class TestApply(unittest.TestCase):
def test_object(self):
@nd.functional.apply(jit = False)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.scalar, ndt.scalar))
@nd.functional.apply(jit = False)
@annotate(ndt.int32)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.int32, ndt.scalar))
self.assertEqual(0, f(0))
self.assertEqual(1, f(1))
@nd.functional.apply(jit = False)
@annotate(ndt.int32, ndt.int32)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.int32, ndt.int32))
self.assertEqual(0, f(0))
self.assertEqual(1, f(1))
"""
def test_numba(self):
try:
import numba
except ImportError as error:
raise unittest.SkipTest(error)
@nd.functional.apply(jit = True)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.scalar, ndt.scalar))
self.assertEqual(0, f(0))
@nd.functional.apply(jit = True)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.scalar, ndt.scalar))
self.assertEqual(0, f(0))
"""
@unittest.skip('Test disabled since callables were reworked')
class TestElwise(unittest.TestCase):
def test_unary(self):
@nd.functional.elwise
@annotate(ndt.int32)
def f(x):
return 2 * x
# self.assertEqual(nd.array([2, 4, 6]), f([1, 2, 3]))
@unittest.skip('Test disabled since callables were reworked')
class TestReduction(unittest.TestCase):
def test_unary(self):
@nd.functional.reduction
@annotate(ndt.int32)
def f(x, y):
return max(x, y)
self.assertEqual(3, f([1, 2, 3]))
self.assertEqual(6, f([[1, 2, 3], [4, 5, 6]]))
"""
def multigen(func):
return lambda x: x
class TestMultidispatch(unittest.TestCase):
def test_unary(self):
@nd.functional.multidispatch()
def callables():
yield 5
print callables(3)
"""
if __name__ == '__main__':
unittest.main(verbosity=2)
| 25.361702
| 70
| 0.588926
|
import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
from dynd import annotate, nd, ndt
@unittest.skip('Test disabled since callables were reworked')
class TestApply(unittest.TestCase):
def test_object(self):
@nd.functional.apply(jit = False)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.scalar, ndt.scalar))
@nd.functional.apply(jit = False)
@annotate(ndt.int32)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.int32, ndt.scalar))
self.assertEqual(0, f(0))
self.assertEqual(1, f(1))
@nd.functional.apply(jit = False)
@annotate(ndt.int32, ndt.int32)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.int32, ndt.int32))
self.assertEqual(0, f(0))
self.assertEqual(1, f(1))
@unittest.skip('Test disabled since callables were reworked')
class TestElwise(unittest.TestCase):
def test_unary(self):
@nd.functional.elwise
@annotate(ndt.int32)
def f(x):
return 2 * x
@unittest.skip('Test disabled since callables were reworked')
class TestReduction(unittest.TestCase):
def test_unary(self):
@nd.functional.reduction
@annotate(ndt.int32)
def f(x, y):
return max(x, y)
self.assertEqual(3, f([1, 2, 3]))
self.assertEqual(6, f([[1, 2, 3], [4, 5, 6]]))
if __name__ == '__main__':
unittest.main(verbosity=2)
| true
| true
|
f7147bee2514ba0ec8ce68a3df5e77e31205a226
| 214
|
py
|
Python
|
test.py
|
BlackPhoenixSlo/vislice
|
48fc9160bd857656cd383c7dd0e562bfae5ebf3a
|
[
"MIT"
] | null | null | null |
test.py
|
BlackPhoenixSlo/vislice
|
48fc9160bd857656cd383c7dd0e562bfae5ebf3a
|
[
"MIT"
] | null | null | null |
test.py
|
BlackPhoenixSlo/vislice
|
48fc9160bd857656cd383c7dd0e562bfae5ebf3a
|
[
"MIT"
] | null | null | null |
import math
def pra(n):
for i in range (2,(int)(math.sqrt(n))):
if (n%i==0):
return False
return True
def izpisi():
for i in range (2,200):
if (pra(i)):
print(i)
| 19.454545
| 43
| 0.481308
|
import math
def pra(n):
for i in range (2,(int)(math.sqrt(n))):
if (n%i==0):
return False
return True
def izpisi():
for i in range (2,200):
if (pra(i)):
print(i)
| true
| true
|
f7147c16c55d6759428fde88ad86145632dfa7ae
| 5,378
|
py
|
Python
|
backend/app/literature/crud/cross_reference_crud.py
|
alliance-genome/agr_literature_service
|
2278316422d5c3ab65e21bb97d91e861e48853c5
|
[
"MIT"
] | null | null | null |
backend/app/literature/crud/cross_reference_crud.py
|
alliance-genome/agr_literature_service
|
2278316422d5c3ab65e21bb97d91e861e48853c5
|
[
"MIT"
] | 39
|
2021-10-18T17:02:49.000Z
|
2022-03-28T20:56:24.000Z
|
backend/app/literature/crud/cross_reference_crud.py
|
alliance-genome/agr_literature_service
|
2278316422d5c3ab65e21bb97d91e861e48853c5
|
[
"MIT"
] | 1
|
2021-10-21T00:11:18.000Z
|
2021-10-21T00:11:18.000Z
|
from sqlalchemy.orm import Session
from datetime import datetime
from fastapi import HTTPException
from fastapi import status
from fastapi.encoders import jsonable_encoder
from literature.schemas import CrossReferenceSchema
from literature.schemas import CrossReferenceSchemaUpdate
from literature.models import CrossReferenceModel
from literature.models import ReferenceModel
from literature.models import ResourceModel
from literature.models import ResourceDescriptorModel
from literature.crud.reference_resource import create_obj, add_reference_resource
def create(db: Session, cross_reference: CrossReferenceSchema) -> str:
cross_reference_data = jsonable_encoder(cross_reference)
if db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == cross_reference_data['curie']).first():
raise HTTPException(status_code=status.HTTP_409_CONFLICT,
detail=f"CrossReference with curie {cross_reference_data['curie']} already exists")
db_obj = create_obj(db, CrossReferenceModel, cross_reference_data)
db.add(db_obj)
db.commit()
return "created"
def destroy(db: Session, curie: str) -> None:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
db.delete(cross_reference)
db.commit()
return None
def patch(db: Session, curie: str, cross_reference_update: CrossReferenceSchemaUpdate) -> dict:
cross_reference_data = jsonable_encoder(cross_reference_update)
cross_reference_db_obj = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference_db_obj:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
add_reference_resource(db, cross_reference_db_obj, cross_reference_update, non_fatal=True)
for field, value in cross_reference_data.items():
setattr(cross_reference_db_obj, field, value)
cross_reference_db_obj.date_updated = datetime.utcnow()
db.commit()
return {"message": "updated"}
def show(db: Session, curie: str, indirect=True) -> dict:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"CrossReference with the curie {curie} is not available")
cross_reference_data = jsonable_encoder(cross_reference)
if cross_reference_data['resource_id']:
cross_reference_data['resource_curie'] = db.query(ResourceModel.curie).filter(ResourceModel.resource_id == cross_reference_data['resource_id']).first().curie
del cross_reference_data['resource_id']
if cross_reference_data['reference_id']:
cross_reference_data['reference_curie'] = db.query(ReferenceModel.curie).filter(ReferenceModel.reference_id == cross_reference_data['reference_id']).first().curie
del cross_reference_data['reference_id']
author_ids = []
editor_ids = []
if not indirect:
for author in cross_reference.authors:
author_ids.append(author.author_id)
for editor in cross_reference.editors:
editor_ids.append(editor.editor_id)
cross_reference_data['author_ids'] = author_ids
cross_reference_data['editor_ids'] = editor_ids
[db_prefix, local_id] = curie.split(":", 1)
resource_descriptor = db.query(ResourceDescriptorModel).filter(ResourceDescriptorModel.db_prefix == db_prefix).first()
if resource_descriptor:
default_url = resource_descriptor.default_url.replace("[%s]", local_id)
cross_reference_data['url'] = default_url
if cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
page_url = ""
for rd_page in resource_descriptor.pages:
if rd_page.name == cr_page:
page_url = rd_page.url
break
pages_data.append({"name": cr_page,
"url": page_url.replace("[%s]", local_id)})
cross_reference_data['pages'] = pages_data
elif cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
pages_data.append({"name": cr_page})
cross_reference_data['pages'] = pages_data
return cross_reference_data
def show_changesets(db: Session, curie: str):
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} is not available")
history = []
for version in cross_reference.versions:
tx = version.transaction
history.append({'transaction': {'id': tx.id,
'issued_at': tx.issued_at,
'user_id': tx.user_id},
'changeset': version.changeset})
return history
| 42.015625
| 170
| 0.697285
|
from sqlalchemy.orm import Session
from datetime import datetime
from fastapi import HTTPException
from fastapi import status
from fastapi.encoders import jsonable_encoder
from literature.schemas import CrossReferenceSchema
from literature.schemas import CrossReferenceSchemaUpdate
from literature.models import CrossReferenceModel
from literature.models import ReferenceModel
from literature.models import ResourceModel
from literature.models import ResourceDescriptorModel
from literature.crud.reference_resource import create_obj, add_reference_resource
def create(db: Session, cross_reference: CrossReferenceSchema) -> str:
cross_reference_data = jsonable_encoder(cross_reference)
if db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == cross_reference_data['curie']).first():
raise HTTPException(status_code=status.HTTP_409_CONFLICT,
detail=f"CrossReference with curie {cross_reference_data['curie']} already exists")
db_obj = create_obj(db, CrossReferenceModel, cross_reference_data)
db.add(db_obj)
db.commit()
return "created"
def destroy(db: Session, curie: str) -> None:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
db.delete(cross_reference)
db.commit()
return None
def patch(db: Session, curie: str, cross_reference_update: CrossReferenceSchemaUpdate) -> dict:
cross_reference_data = jsonable_encoder(cross_reference_update)
cross_reference_db_obj = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference_db_obj:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
add_reference_resource(db, cross_reference_db_obj, cross_reference_update, non_fatal=True)
for field, value in cross_reference_data.items():
setattr(cross_reference_db_obj, field, value)
cross_reference_db_obj.date_updated = datetime.utcnow()
db.commit()
return {"message": "updated"}
def show(db: Session, curie: str, indirect=True) -> dict:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"CrossReference with the curie {curie} is not available")
cross_reference_data = jsonable_encoder(cross_reference)
if cross_reference_data['resource_id']:
cross_reference_data['resource_curie'] = db.query(ResourceModel.curie).filter(ResourceModel.resource_id == cross_reference_data['resource_id']).first().curie
del cross_reference_data['resource_id']
if cross_reference_data['reference_id']:
cross_reference_data['reference_curie'] = db.query(ReferenceModel.curie).filter(ReferenceModel.reference_id == cross_reference_data['reference_id']).first().curie
del cross_reference_data['reference_id']
author_ids = []
editor_ids = []
if not indirect:
for author in cross_reference.authors:
author_ids.append(author.author_id)
for editor in cross_reference.editors:
editor_ids.append(editor.editor_id)
cross_reference_data['author_ids'] = author_ids
cross_reference_data['editor_ids'] = editor_ids
[db_prefix, local_id] = curie.split(":", 1)
resource_descriptor = db.query(ResourceDescriptorModel).filter(ResourceDescriptorModel.db_prefix == db_prefix).first()
if resource_descriptor:
default_url = resource_descriptor.default_url.replace("[%s]", local_id)
cross_reference_data['url'] = default_url
if cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
page_url = ""
for rd_page in resource_descriptor.pages:
if rd_page.name == cr_page:
page_url = rd_page.url
break
pages_data.append({"name": cr_page,
"url": page_url.replace("[%s]", local_id)})
cross_reference_data['pages'] = pages_data
elif cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
pages_data.append({"name": cr_page})
cross_reference_data['pages'] = pages_data
return cross_reference_data
def show_changesets(db: Session, curie: str):
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} is not available")
history = []
for version in cross_reference.versions:
tx = version.transaction
history.append({'transaction': {'id': tx.id,
'issued_at': tx.issued_at,
'user_id': tx.user_id},
'changeset': version.changeset})
return history
| true
| true
|
f7147c364dc2a6ceaf51a2404c99136f6f4bc427
| 3,123
|
py
|
Python
|
tensorflow_graphics/nn/metric/tests/fscore_test.py
|
jackd/graphics
|
736b99a3306e302674a9b7599e3e2857b85fdb74
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_graphics/nn/metric/tests/fscore_test.py
|
jackd/graphics
|
736b99a3306e302674a9b7599e3e2857b85fdb74
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_graphics/nn/metric/tests/fscore_test.py
|
jackd/graphics
|
736b99a3306e302674a9b7599e3e2857b85fdb74
|
[
"Apache-2.0"
] | 1
|
2020-04-11T10:37:36.000Z
|
2020-04-11T10:37:36.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the fscore metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.nn.metric import fscore
from tensorflow_graphics.nn.metric import precision
from tensorflow_graphics.nn.metric import recall
from tensorflow_graphics.util import test_case
def random_tensor(tensor_shape):
return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)
def random_tensor_shape():
tensor_size = np.random.randint(5) + 1
return np.random.randint(1, 10, size=(tensor_size)).tolist()
def binary_precision_function(ground_truth, predictions):
return precision.evaluate(ground_truth, predictions, classes=[1])
def binary_recall_function(ground_truth, predictions):
return recall.evaluate(ground_truth, predictions, classes=[1])
class FscoreTest(test_case.TestCase):
@parameterized.parameters(
# Precision = 0.5, Recall = 0.25.
((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 2 * (0.5 * 0.25) / (0.5 + 0.25)),
# Precision = 1, Recall = 1.
((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1),
# Precision = 0, Recall = 0.
((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0))
def test_evaluate_preset(self, ground_truth, predictions, expected_fscore):
tensor_shape = random_tensor_shape()
ground_truth_labels = np.tile(ground_truth, tensor_shape + [1])
predicted_labels = np.tile(predictions, tensor_shape + [1])
expected = np.tile(expected_fscore, tensor_shape)
result = fscore.evaluate(
ground_truth_labels,
predicted_labels,
precision_function=binary_precision_function,
recall_function=binary_recall_function)
self.assertAllClose(expected, result)
@parameterized.parameters(
("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)),
("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)),
)
def test_evaluate_shape_exception_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(fscore.evaluate, error_msg, shape)
@parameterized.parameters(
((1, 5, 3), (2, 5, 1)),
((None, 2, 6), (4, 2, None)),
((3, 1, 1, 2), (3, 5, 8, 2)),
)
def test_evaluate_shape_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(fscore.evaluate, shapes)
if __name__ == "__main__":
test_case.main()
| 34.7
| 80
| 0.707973
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.nn.metric import fscore
from tensorflow_graphics.nn.metric import precision
from tensorflow_graphics.nn.metric import recall
from tensorflow_graphics.util import test_case
def random_tensor(tensor_shape):
return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)
def random_tensor_shape():
tensor_size = np.random.randint(5) + 1
return np.random.randint(1, 10, size=(tensor_size)).tolist()
def binary_precision_function(ground_truth, predictions):
return precision.evaluate(ground_truth, predictions, classes=[1])
def binary_recall_function(ground_truth, predictions):
return recall.evaluate(ground_truth, predictions, classes=[1])
class FscoreTest(test_case.TestCase):
@parameterized.parameters(
((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 2 * (0.5 * 0.25) / (0.5 + 0.25)),
((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1),
((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0))
def test_evaluate_preset(self, ground_truth, predictions, expected_fscore):
tensor_shape = random_tensor_shape()
ground_truth_labels = np.tile(ground_truth, tensor_shape + [1])
predicted_labels = np.tile(predictions, tensor_shape + [1])
expected = np.tile(expected_fscore, tensor_shape)
result = fscore.evaluate(
ground_truth_labels,
predicted_labels,
precision_function=binary_precision_function,
recall_function=binary_recall_function)
self.assertAllClose(expected, result)
@parameterized.parameters(
("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)),
("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)),
)
def test_evaluate_shape_exception_raised(self, error_msg, *shape):
self.assert_exception_is_raised(fscore.evaluate, error_msg, shape)
@parameterized.parameters(
((1, 5, 3), (2, 5, 1)),
((None, 2, 6), (4, 2, None)),
((3, 1, 1, 2), (3, 5, 8, 2)),
)
def test_evaluate_shape_exception_not_raised(self, *shapes):
self.assert_exception_is_not_raised(fscore.evaluate, shapes)
if __name__ == "__main__":
test_case.main()
| true
| true
|
f7147c381b59f0fc0e28d456483711cc73e3d0d7
| 10,454
|
py
|
Python
|
stylegan2/run_training.py
|
arita37/pic-recon
|
703f80eb6d191f68441ce71bc0f388556cb3e1bc
|
[
"MIT"
] | 8
|
2021-06-16T20:04:27.000Z
|
2021-12-17T18:57:37.000Z
|
stylegan2/run_training.py
|
comp-imaging-sci/pic-recon
|
703f80eb6d191f68441ce71bc0f388556cb3e1bc
|
[
"MIT"
] | null | null | null |
stylegan2/run_training.py
|
comp-imaging-sci/pic-recon
|
703f80eb6d191f68441ce71bc0f388556cb3e1bc
|
[
"MIT"
] | 2
|
2021-06-19T18:00:58.000Z
|
2021-07-14T05:08:16.000Z
|
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
#
# Modified my Varun A. Kelkar - vak2@illinois.edu
import argparse
import copy
import os
import sys
import dnnlib
from dnnlib import EasyDict
from metrics.metric_defaults import metric_defaults
#----------------------------------------------------------------------------
_valid_configs = [
# Table 1
'config-a', # Baseline StyleGAN
'config-b', # + Weight demodulation
'config-c', # + Lazy regularization
'config-d', # + Path length regularization
'config-e', # + No growing, new G & D arch.
'config-f', # + Large networks (default)
'config-g', # + zero sectioning in dlatent
'config-h', # f + selu nonlinearity
'config-i', # + zero sectioning in dlatent in a different way
'config-j', # h + mix all styles
# Table 2
'config-e-Gorig-Dorig', 'config-e-Gorig-Dresnet', 'config-e-Gorig-Dskip',
'config-e-Gresnet-Dorig', 'config-e-Gresnet-Dresnet', 'config-e-Gresnet-Dskip',
'config-e-Gskip-Dorig', 'config-e-Gskip-Dresnet', 'config-e-Gskip-Dskip',
'config-frgb', 'config-hrgb', 'config-jrgb',
# No latent noise series
'config-frgb-nonoise',
]
#----------------------------------------------------------------------------
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics, resume, resume_path, stall):
train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop.
G = EasyDict(func_name='training.networks_stylegan2.G_main') # Options for generator network.
D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg') # Options for generator loss.
D_loss = EasyDict(func_name='training.loss.D_logistic_r1') # Options for discriminator loss.
sched = EasyDict() # Options for TrainingSchedule.
grid = EasyDict(size='8k', layout='random') # Options for setup_snapshot_image_grid().
sc = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().
tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf().
if resume:
train.resume_pkl = resume_path
train.resume_kimg = resume
train.stall = stall
train.data_dir = data_dir
train.total_kimg = total_kimg
train.mirror_augment = mirror_augment
train.image_snapshot_ticks = train.network_snapshot_ticks = 10
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.minibatch_size_base = 32
sched.minibatch_gpu_base = 4
D_loss.gamma = 10
metrics = [metric_defaults[x] for x in metrics]
desc = f'stylegan2-lr-{sched.D_lrate_base}'
desc += '-' + dataset
# nonoise series
if '-nonoise' in config_id:
desc += '-nonoise'
G.if_latent_noise = False
config_id = config_id.strip('-nonoise')
# for rgb images
if 'rgb' in config_id:
dataset_args = EasyDict(tfrecord_dir=dataset, dtype='uint8', dynamic_range=[0,255])
config_id = config_id.strip('rgb')
else:
dataset_args = EasyDict(tfrecord_dir=dataset, dtype='float32', dynamic_range=[0,1])
assert num_gpus in [1, 2, 4, 8]
sc.num_gpus = num_gpus
desc += '-%dgpu' % num_gpus
assert config_id in _valid_configs
desc += '-' + config_id
# Configs A-E: Shrink networks to match original StyleGAN.
if config_id != 'config-f':
G.fmap_base = D.fmap_base = 8 << 10
# Config E: Set gamma to 100 and override G & D architecture.
if config_id.startswith('config-e'):
D_loss.gamma = 100
if 'Gorig' in config_id: G.architecture = 'orig'
if 'Gskip' in config_id: G.architecture = 'skip' # (default)
if 'Gresnet' in config_id: G.architecture = 'resnet'
if 'Dorig' in config_id: D.architecture = 'orig'
if 'Dskip' in config_id: D.architecture = 'skip'
if 'Dresnet' in config_id: D.architecture = 'resnet' # (default)
# Configs A-D: Enable progressive growing and switch to networks that support it.
if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
sched.lod_initial_resolution = 8
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.G_lrate_dict = sched.D_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.minibatch_size_base = 32 # (default)
sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
sched.minibatch_gpu_base = 4 # (default)
sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
G.synthesis_func = 'G_synthesis_stylegan_revised'
D.func_name = 'training.networks_stylegan2.D_stylegan'
# Configs A-C: Disable path length regularization.
if config_id in ['config-a', 'config-b', 'config-c']:
G_loss = EasyDict(func_name='training.loss.G_logistic_ns')
# Configs A-B: Disable lazy regularization.
if config_id in ['config-a', 'config-b']:
train.lazy_regularization = False
# Config A: Switch to original StyleGAN networks.
if config_id == 'config-a':
G = EasyDict(func_name='training.networks_stylegan.G_style')
D = EasyDict(func_name='training.networks_stylegan.D_basic')
# Config G: Zero sectioning in dlatent
if config_id == 'config-g':
G.zero_section = 2
G.nonlinearity = 'lsoftplus'
G.latent_size = 2048
G.dlatent_size = 2048
# Config H: Use ELU nonlinearity, and no zero sectioning
if config_id == 'config-h':
G.nonlinearity = 'lsoftplus'
# Config I: Zero sectioning in dlatent in a different way
if config_id == 'config-i':
G.zero_section = [0.1, 0.2, 0.3, 0.45, 0.6, 0.8, 0.9, 1.]
G.nonlinearity = 'lsoftplus'
G.latent_size = 2048
G.dlatent_size = 2048
if config_id == 'config-j':
G.mix_all = 1
# G.nonlinearity = 'lsoftplus'
if gamma is not None:
D_loss.gamma = gamma
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
kwargs = EasyDict(train)
kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(sc)
kwargs.submit_config.run_dir_root = result_dir
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
#----------------------------------------------------------------------------
def _str_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def _parse_comma_sep(s):
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
#----------------------------------------------------------------------------
_examples = '''examples:
# Train StyleGAN2 using the FFHQ dataset
python %(prog)s --num-gpus=8 --data-dir=~/datasets --config=config-f --dataset=ffhq --mirror-augment=true
valid configs:
''' + ', '.join(_valid_configs) + '''
valid metrics:
''' + ', '.join(sorted([x for x in metric_defaults.keys()])) + '''
'''
def main():
parser = argparse.ArgumentParser(
description='Train StyleGAN2.',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser.add_argument('--data-dir', help='Dataset root directory', required=True)
parser.add_argument('--dataset', help='Training dataset', required=True)
parser.add_argument('--config', help='Training config (default: %(default)s)', default='config-f', required=True, dest='config_id', metavar='CONFIG')
parser.add_argument('--num-gpus', help='Number of GPUs (default: %(default)s)', default=1, type=int, metavar='N')
parser.add_argument('--total-kimg', help='Training length in thousands of images (default: %(default)s)', metavar='KIMG', default=25000, type=int)
parser.add_argument('--gamma', help='R1 regularization weight (default is config dependent)', default=None, type=float)
parser.add_argument('--mirror-augment', help='Mirror augment (default: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
parser.add_argument('--metrics', help='Comma-separated list of metrics or "none" (default: %(default)s)', default=None, type=_parse_comma_sep)
parser.add_argument('--resume', help='Resume training from. (default: %(default)s)', default=0, type=float, metavar='N')
parser.add_argument('--resume_path', help='Resume training from pkl. (default: %(default)s)', default='', type=str, metavar='N')
parser.add_argument('--stall', help='Pause training (default: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
args = parser.parse_args()
if not os.path.exists(args.data_dir):
print ('Error: dataset root directory does not exist.')
sys.exit(1)
if args.config_id not in _valid_configs:
print ('Error: --config value must be one of: ', ', '.join(_valid_configs))
sys.exit(1)
for metric in args.metrics:
if metric not in metric_defaults:
print ('Error: unknown metric \'%s\'' % metric)
sys.exit(1)
run(**vars(args))
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| 42.323887
| 153
| 0.621772
|
import argparse
import copy
import os
import sys
import dnnlib
from dnnlib import EasyDict
from metrics.metric_defaults import metric_defaults
_valid_configs = [
'config-a',
'config-b',
'config-c',
'config-d',
'config-e',
'config-f',
'config-g',
'config-h',
'config-i',
'config-j',
'config-e-Gorig-Dorig', 'config-e-Gorig-Dresnet', 'config-e-Gorig-Dskip',
'config-e-Gresnet-Dorig', 'config-e-Gresnet-Dresnet', 'config-e-Gresnet-Dskip',
'config-e-Gskip-Dorig', 'config-e-Gskip-Dresnet', 'config-e-Gskip-Dskip',
'config-frgb', 'config-hrgb', 'config-jrgb',
'config-frgb-nonoise',
]
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics, resume, resume_path, stall):
train = EasyDict(run_func_name='training.training_loop.training_loop')
G = EasyDict(func_name='training.networks_stylegan2.G_main')
D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2')
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg')
D_loss = EasyDict(func_name='training.loss.D_logistic_r1')
sched = EasyDict()
grid = EasyDict(size='8k', layout='random')
sc = dnnlib.SubmitConfig()
tf_config = {'rnd.np_random_seed': 1000}
if resume:
train.resume_pkl = resume_path
train.resume_kimg = resume
train.stall = stall
train.data_dir = data_dir
train.total_kimg = total_kimg
train.mirror_augment = mirror_augment
train.image_snapshot_ticks = train.network_snapshot_ticks = 10
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.minibatch_size_base = 32
sched.minibatch_gpu_base = 4
D_loss.gamma = 10
metrics = [metric_defaults[x] for x in metrics]
desc = f'stylegan2-lr-{sched.D_lrate_base}'
desc += '-' + dataset
if '-nonoise' in config_id:
desc += '-nonoise'
G.if_latent_noise = False
config_id = config_id.strip('-nonoise')
if 'rgb' in config_id:
dataset_args = EasyDict(tfrecord_dir=dataset, dtype='uint8', dynamic_range=[0,255])
config_id = config_id.strip('rgb')
else:
dataset_args = EasyDict(tfrecord_dir=dataset, dtype='float32', dynamic_range=[0,1])
assert num_gpus in [1, 2, 4, 8]
sc.num_gpus = num_gpus
desc += '-%dgpu' % num_gpus
assert config_id in _valid_configs
desc += '-' + config_id
if config_id != 'config-f':
G.fmap_base = D.fmap_base = 8 << 10
if config_id.startswith('config-e'):
D_loss.gamma = 100
if 'Gorig' in config_id: G.architecture = 'orig'
if 'Gskip' in config_id: G.architecture = 'skip'
if 'Gresnet' in config_id: G.architecture = 'resnet'
if 'Dorig' in config_id: D.architecture = 'orig'
if 'Dskip' in config_id: D.architecture = 'skip'
if 'Dresnet' in config_id: D.architecture = 'resnet'
if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
sched.lod_initial_resolution = 8
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.G_lrate_dict = sched.D_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.minibatch_size_base = 32
sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
sched.minibatch_gpu_base = 4
sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
G.synthesis_func = 'G_synthesis_stylegan_revised'
D.func_name = 'training.networks_stylegan2.D_stylegan'
if config_id in ['config-a', 'config-b', 'config-c']:
G_loss = EasyDict(func_name='training.loss.G_logistic_ns')
if config_id in ['config-a', 'config-b']:
train.lazy_regularization = False
if config_id == 'config-a':
G = EasyDict(func_name='training.networks_stylegan.G_style')
D = EasyDict(func_name='training.networks_stylegan.D_basic')
if config_id == 'config-g':
G.zero_section = 2
G.nonlinearity = 'lsoftplus'
G.latent_size = 2048
G.dlatent_size = 2048
if config_id == 'config-h':
G.nonlinearity = 'lsoftplus'
if config_id == 'config-i':
G.zero_section = [0.1, 0.2, 0.3, 0.45, 0.6, 0.8, 0.9, 1.]
G.nonlinearity = 'lsoftplus'
G.latent_size = 2048
G.dlatent_size = 2048
if config_id == 'config-j':
G.mix_all = 1
if gamma is not None:
D_loss.gamma = gamma
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
kwargs = EasyDict(train)
kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(sc)
kwargs.submit_config.run_dir_root = result_dir
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
def _str_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def _parse_comma_sep(s):
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
_examples = '''examples:
# Train StyleGAN2 using the FFHQ dataset
python %(prog)s --num-gpus=8 --data-dir=~/datasets --config=config-f --dataset=ffhq --mirror-augment=true
valid configs:
''' + ', '.join(_valid_configs) + '''
valid metrics:
''' + ', '.join(sorted([x for x in metric_defaults.keys()])) + '''
'''
def main():
parser = argparse.ArgumentParser(
description='Train StyleGAN2.',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser.add_argument('--data-dir', help='Dataset root directory', required=True)
parser.add_argument('--dataset', help='Training dataset', required=True)
parser.add_argument('--config', help='Training config (default: %(default)s)', default='config-f', required=True, dest='config_id', metavar='CONFIG')
parser.add_argument('--num-gpus', help='Number of GPUs (default: %(default)s)', default=1, type=int, metavar='N')
parser.add_argument('--total-kimg', help='Training length in thousands of images (default: %(default)s)', metavar='KIMG', default=25000, type=int)
parser.add_argument('--gamma', help='R1 regularization weight (default is config dependent)', default=None, type=float)
parser.add_argument('--mirror-augment', help='Mirror augment (default: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
parser.add_argument('--metrics', help='Comma-separated list of metrics or "none" (default: %(default)s)', default=None, type=_parse_comma_sep)
parser.add_argument('--resume', help='Resume training from. (default: %(default)s)', default=0, type=float, metavar='N')
parser.add_argument('--resume_path', help='Resume training from pkl. (default: %(default)s)', default='', type=str, metavar='N')
parser.add_argument('--stall', help='Pause training (default: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
args = parser.parse_args()
if not os.path.exists(args.data_dir):
print ('Error: dataset root directory does not exist.')
sys.exit(1)
if args.config_id not in _valid_configs:
print ('Error: --config value must be one of: ', ', '.join(_valid_configs))
sys.exit(1)
for metric in args.metrics:
if metric not in metric_defaults:
print ('Error: unknown metric \'%s\'' % metric)
sys.exit(1)
run(**vars(args))
if __name__ == "__main__":
main()
| true
| true
|
f7147c4f2008f9ebc6684280d9bd9c43be116fb6
| 1,759
|
py
|
Python
|
application/commonApp/markdown_it_extensions.py
|
Marcelotsvaz/vaz-projects
|
8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4
|
[
"Unlicense"
] | null | null | null |
application/commonApp/markdown_it_extensions.py
|
Marcelotsvaz/vaz-projects
|
8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4
|
[
"Unlicense"
] | null | null | null |
application/commonApp/markdown_it_extensions.py
|
Marcelotsvaz/vaz-projects
|
8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4
|
[
"Unlicense"
] | null | null | null |
#
# VAZ Projects
#
#
# Author: Marcelo Tellier Sartori Vaz <marcelotsvaz@gmail.com>
from functools import partial
import re
from django.template import loader
def linkAttributes( self, tokens, index, options, env ):
'''
Add target and rel attributes to links.
'''
tokens[index].attrSet( 'rel', 'noopener' )
return self.renderToken( tokens, index, options, env )
def imageGalleryPlugin( md, markdownImages ):
'''
Plugin for rendering image galleries using Django UserImage.
Syntax: #[cssClass1 cssClass2](identifier1, identifier2, identifier3)
'''
md.block.ruler.before(
'paragraph',
'imageGallery',
partial( imageGallery, markdownImages = markdownImages ),
{ 'alt': [ 'paragraph', 'reference', 'blockquote', 'list' ] }
)
def imageGallery( state, startLine, endLine, silent, markdownImages ):
'''
Rule for image gallery.
'''
lineContent = state.getLines( startLine, startLine + 1, 0, False ).strip()
# Only run the regex if the first two characters match.
if not lineContent.startswith( '#[' ):
return False
match = re.match( r'^#\[(.*)\]\((.*)\)$', lineContent )
if not match:
return False
cssClasses = match[1]
identifiers = match[2]
if not silent:
state.line = startLine + 1
if identifiers.strip() == '*':
images = markdownImages
else:
identifiers = { identifier.strip() for identifier in identifiers.split( ',' ) }
images = [ image for image in markdownImages if image.identifier in identifiers ]
renderedTemplate = loader.render_to_string(
'commonApp/image_gallery.html',
{ 'images': images, 'cssClasses': cssClasses },
)
token = state.push( 'html_block', '', 0 )
token.content = renderedTemplate
token.map = [startLine, state.line]
return True
| 22.551282
| 84
| 0.683911
|
from functools import partial
import re
from django.template import loader
def linkAttributes( self, tokens, index, options, env ):
tokens[index].attrSet( 'rel', 'noopener' )
return self.renderToken( tokens, index, options, env )
def imageGalleryPlugin( md, markdownImages ):
md.block.ruler.before(
'paragraph',
'imageGallery',
partial( imageGallery, markdownImages = markdownImages ),
{ 'alt': [ 'paragraph', 'reference', 'blockquote', 'list' ] }
)
def imageGallery( state, startLine, endLine, silent, markdownImages ):
lineContent = state.getLines( startLine, startLine + 1, 0, False ).strip()
if not lineContent.startswith( '#[' ):
return False
match = re.match( r'^#\[(.*)\]\((.*)\)$', lineContent )
if not match:
return False
cssClasses = match[1]
identifiers = match[2]
if not silent:
state.line = startLine + 1
if identifiers.strip() == '*':
images = markdownImages
else:
identifiers = { identifier.strip() for identifier in identifiers.split( ',' ) }
images = [ image for image in markdownImages if image.identifier in identifiers ]
renderedTemplate = loader.render_to_string(
'commonApp/image_gallery.html',
{ 'images': images, 'cssClasses': cssClasses },
)
token = state.push( 'html_block', '', 0 )
token.content = renderedTemplate
token.map = [startLine, state.line]
return True
| true
| true
|
f7147f6d980b3546daeecc96d4d8fba4b04a0b48
| 14,109
|
py
|
Python
|
src/src/create_tf_record.py
|
zhaodi-Wen/Child_skin_disease_detect
|
e95045341e8c27161eebb2c9c3b68026a4ea247b
|
[
"Apache-2.0"
] | null | null | null |
src/src/create_tf_record.py
|
zhaodi-Wen/Child_skin_disease_detect
|
e95045341e8c27161eebb2c9c3b68026a4ea247b
|
[
"Apache-2.0"
] | null | null | null |
src/src/create_tf_record.py
|
zhaodi-Wen/Child_skin_disease_detect
|
e95045341e8c27161eebb2c9c3b68026a4ea247b
|
[
"Apache-2.0"
] | null | null | null |
# -*-coding: utf-8 -*-
"""
@Project: create_tfrecord
@File : create_tfrecord.py
@Author : panjq
@E-mail : pan_jinquan@163.com
@Date : 2018-07-27 17:19:54
@desc : 将图片数据保存为单个tfrecord文件
"""
##########################################################################
import tensorflow as tf
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import random
from PIL import Image
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
train_path = './train_new/img'
test_path = './test_new/img'
list = set(os.listdir(test_path))
classes=sorted(list,key=str.lower)
print(classes)
##########################################################################
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# 生成字符串型的属性
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# 生成实数型的属性
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def get_example_nums(tf_records_filenames):
'''
统计tf_records图像的个数(example)个数
:param tf_records_filenames: tf_records文件路径
:return:
'''
nums= 0
for record in tf.python_io.tf_record_iterator(tf_records_filenames):
nums += 1
return nums
def show_image(title,image):
'''
显示图片
:param title: 图像标题
:param image: 图像的数据
:return:
'''
# plt.figure("show_image")
# print(image.dtype)
plt.imshow(image)
plt.axis('on') # 关掉坐标轴为 off
plt.title(title) # 图像题目
plt.show()
# def load_labels_file(filename,labels_num=1,shuffle=False):
# '''
# 载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2
# :param filename:
# :param labels_num :labels个数
# :param shuffle :是否打乱顺序
# :return:images type->list
# :return:labels type->list
# '''
# images=[]
# labels=[]
# with open(filename) as f:
# lines_list=f.readlines()
# if shuffle:
# random.shuffle(lines_list)
#
# for lines in lines_list:
# line=lines.rstrip().split(' ')
# label=[]
# for i in range(labels_num):
# label.append(int(line[i+1]))
# images.append(line[0])
# labels.append(label)
# return images,labels
def load_labels_file(filename,num=1,shuffle=False):
'''
载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2
:param filename:
:param labels_num :labels个数
:param shuffle :是否打乱顺序
:return:images type->list
:return:labels type->list
'''
images=[]
labels=[]
# with open(filename) as f:
# lines_list=f.readlines()
# if shuffle:
# random.shuffle(lines_list)
#
# for lines in lines_list:
# line=lines.rstrip().split(' ')
# label=[]
# for i in range(labels_num):
# label.append(int(line[i+1]))
# images.append(line[0])
# labels.append(label)
# return images,labels
for index,name in enumerate(classes):
# print(index,name)
class_path = filename+'/'+name+'/'
# print(class_path)
for img_name in os.listdir(class_path):
img_path = class_path+img_name
# print(img_path)
images.append(img_path)
labels.append(index)
# img = Image.open(img_path)
# img = img.resize((224,224))
# img_raw = img.tobytes()
# with open(train_label,'a') as f:
# f.write(str(index)+'\n')
randnum = random.randint(0, 100)
random.seed(randnum)
random.shuffle(images)
random.seed(randnum)
random.shuffle(labels)
return images,labels
def read_image(filename, resize_height, resize_width,normalization=False):
'''
读取图片数据,默认返回的是uint8,[0,255]
:param filename:
:param resize_height:
:param resize_width:
:param normalization:是否归一化到[0.,1.0]
:return: 返回的图片数据
'''
bgr_image = cv2.imread(filename)
if len(bgr_image.shape)==2:#若是灰度图则转为三通道
print("Warning:gray image",filename)
bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_GRAY2BGR)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)#将BGR转为RGB
# show_image(filename,rgb_image)
# rgb_image=Image.open(filename)
if resize_height>0 and resize_width>0:
rgb_image=cv2.resize(rgb_image,(resize_width,resize_height))
rgb_image=np.asanyarray(rgb_image)
if normalization:
# 不能写成:rgb_image=rgb_image/255
rgb_image=rgb_image/255.0
# show_image("src resize image",image)
return rgb_image
def get_batch_images(images,labels,batch_size,labels_nums,one_hot=False,shuffle=False,num_threads=64):
'''
:param images:图像
:param labels:标签
:param batch_size:
:param labels_nums:标签个数
:param one_hot:是否将labels转为one_hot的形式
:param shuffle:是否打乱顺序,一般train时shuffle=True,验证时shuffle=False
:return:返回batch的images和labels
'''
min_after_dequeue = 200
capacity = min_after_dequeue + 3 * batch_size # 保证capacity必须大于min_after_dequeue参数值
if shuffle:
images_batch, labels_batch = tf.train.shuffle_batch([images,labels],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads)
else:
images_batch, labels_batch = tf.train.batch([images,labels],
batch_size=batch_size,
capacity=capacity,
num_threads=num_threads)
if one_hot:
labels_batch = tf.one_hot(labels_batch, labels_nums, 1, 0)
return images_batch,labels_batch
def read_records(filename,resize_height, resize_width,type=None):
'''
解析record文件:源文件的图像数据是RGB,uint8,[0,255],一般作为训练数据时,需要归一化到[0,1]
:param filename:
:param resize_height:
:param resize_width:
:param type:选择图像数据的返回类型
None:默认将uint8-[0,255]转为float32-[0,255]
normalization:归一化float32-[0,1]
centralization:归一化float32-[0,1],再减均值中心化
:return:
'''
# 创建文件队列,不限读取的数量
filename_queue = tf.train.string_input_producer([filename])
# create a reader from file queue
reader = tf.TFRecordReader()
# reader从文件队列中读入一个序列化的样本
_, serialized_example = reader.read(filename_queue)
# get feature from serialized example
# 解析符号化的样本
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64)
}
)
tf_image = tf.decode_raw(features['image_raw'], tf.uint8)#获得图像原始的数据
tf_height = features['height']
tf_width = features['width']
tf_depth = features['depth']
tf_label = tf.cast(features['label'], tf.int32)
# PS:恢复原始图像数据,reshape的大小必须与保存之前的图像shape一致,否则出错
# tf_image=tf.reshape(tf_image, [-1]) # 转换为行向量
tf_image=tf.reshape(tf_image, [resize_height, resize_width, 3]) # 设置图像的维度
# 恢复数据后,才可以对图像进行resize_images:输入uint->输出float32
# tf_image=tf.image.resize_images(tf_image,[224, 224])
# 存储的图像类型为uint8,tensorflow训练时数据必须是tf.float32
if type is None:
tf_image = tf.cast(tf_image, tf.float32)
elif type=='normalization':# [1]若需要归一化请使用:
# 仅当输入数据是uint8,才会归一化[0,255]
# tf_image = tf.image.convert_image_dtype(tf_image, tf.float32)
tf_image = tf.cast(tf_image, tf.float32) * (1. / 255.0) # 归一化
elif type=='centralization':
# 若需要归一化,且中心化,假设均值为0.5,请使用:
tf_image = tf.cast(tf_image, tf.float32) * (1. / 255) - 0.5 #中心化
# 这里仅仅返回图像和标签
# return tf_image, tf_height,tf_width,tf_depth,tf_label
return tf_image,tf_label
def create_records(image_dir, output_record_dir, resize_height, resize_width,shuffle,log=5):
'''
实现将图像原始数据,label,长,宽等信息保存为record文件
注意:读取的图像数据默认是uint8,再转为tf的字符串型BytesList保存,解析请需要根据需要转换类型
:param image_dir:原始图像的目录
:param file:输入保存图片信息的txt文件(image_dir+file构成图片的路径)
:param output_record_dir:保存record文件的路径
:param resize_height:
:param resize_width:
PS:当resize_height或者resize_width=0是,不执行resize
:param shuffle:是否打乱顺序
:param log:log信息打印间隔
'''
# 加载文件,仅获取一个label
images_list, labels_list=load_labels_file(image_dir,1,shuffle)
writer = tf.python_io.TFRecordWriter(output_record_dir)
for i, [image_name, labels] in enumerate(zip(images_list, labels_list)):
image_path=image_name
# print(image_path)
# print(labels)
if not os.path.exists(image_path):
print('Err:no image',image_path)
continue
image = read_image(image_path, resize_height, resize_width)
image_raw = image.tostring()
if i%log==0 or i==len(images_list)-1:
print('------------processing:%d-th------------' % (i))
print('current image_path=%s' % (image_path),'shape:{}'.format(image.shape),'labels:{}'.format(labels))
# 这里仅保存一个label,多label适当增加"'label': _int64_feature(label)"项
label=labels
example = tf.train.Example(features=tf.train.Features(feature={
'image_raw': _bytes_feature(image_raw),
'height': _int64_feature(image.shape[0]),
'width': _int64_feature(image.shape[1]),
'depth': _int64_feature(image.shape[2]),
'labels': _int64_feature(label)
}))
writer.write(example.SerializeToString())
writer.close()
def disp_records(record_file,resize_height, resize_width,show_nums=4):
'''
解析record文件,并显示show_nums张图片,主要用于验证生成record文件是否成功
:param tfrecord_file: record文件路径
:return:
'''
# 读取record函数
tf_image, tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
# 显示前4个图片
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(show_nums):
image,label = sess.run([tf_image,tf_label]) # 在会话中取出image和label
# image = tf_image.eval()
# 直接从record解析的image是一个向量,需要reshape显示
# image = image.reshape([height,width,depth])
#print('shape:{},tpye:{},labels:{}'.format(image.shape,image.dtype,label))
# pilimg = Image.fromarray(np.asarray(image_eval_reshape))
# pilimg.show()
show_image("image:%d"%(label),image)
coord.request_stop()
coord.join(threads)
def batch_test(record_file,resize_height, resize_width):
'''
:param record_file: record文件路径
:param resize_height:
:param resize_width:
:return:
:PS:image_batch, label_batch一般作为网络的输入
'''
# 读取record函数
tf_image,tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
image_batch, label_batch= get_batch_images(tf_image,tf_label,batch_size=4,labels_nums=5,one_hot=False,shuffle=False)
init = tf.global_variables_initializer()
with tf.Session() as sess: # 开始一个会话
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(4):
# 在会话中取出images和labels
images, labels = sess.run([image_batch, label_batch])
# 这里仅显示每个batch里第一张图片
show_image("image", images[0, :, :, :])
print('shape:{},tpye:{},labels:{}'.format(images.shape,images.dtype,labels))
# 停止所有线程
coord.request_stop()
coord.join(threads)
# if __name__ == '__main__':
# # 参数设置
#
# resize_height = 224 # 指定存储图片高度
# resize_width = 224 # 指定存储图片宽度
# shuffle=True
# log=5
# # 产生train.record文件
# image_dir='dataset/train'
# train_labels = 'dataset/train.txt' # 图片路径
# train_record_output = 'dataset/record/train.tfrecords'
# create_records(image_dir,train_labels, train_record_output, resize_height, resize_width,shuffle,log)
# train_nums=get_example_nums(train_record_output)
# print("save train example nums={}".format(train_nums))
#
# # 产生val.record文件
# image_dir='dataset/val'
# val_labels = 'dataset/val.txt' # 图片路径
# val_record_output = 'dataset/record/val.tfrecords'
# create_records(image_dir,val_labels, val_record_output, resize_height, resize_width,shuffle,log)
# val_nums=get_example_nums(val_record_output)
# print("save val example nums={}".format(val_nums))
#
# # 测试显示函数
# # disp_records(train_record_output,resize_height, resize_width)
# batch_test(train_record_output,resize_height, resize_width)
if __name__ == '__main__':
# 参数设置
resize_height = 224 # 指定存储图片高度
resize_width = 224 # 指定存储图片宽度
shuffle=True
log=5
# 产生train.record文件
image_dir='./train_new/img'
# train_labels = './onsets/train.txt' # 图片路径
train_record_output = 'train.tfrecord'
create_records(image_dir, train_record_output, resize_height, resize_width,shuffle,log)
train_nums=get_example_nums(train_record_output)
print("save train example nums={}".format(train_nums))
# 产生val.record文件
image_dir='./test_new/img'
# val_labels = './onsets/val.txt' # 图片路径
val_record_output = 'val.tfrecord'
create_records(image_dir, val_record_output, resize_height, resize_width,shuffle,log)
val_nums=get_example_nums(val_record_output)
print("save val example nums={}".format(val_nums))
# 测试显示函数
# disp_records(train_record_output,resize_height, resize_width)
# batch_test(train_record_output,resize_height, resize_width)
| 35.628788
| 120
| 0.633851
|
---------' % (i))
print('current image_path=%s' % (image_path),'shape:{}'.format(image.shape),'labels:{}'.format(labels))
label=labels
example = tf.train.Example(features=tf.train.Features(feature={
'image_raw': _bytes_feature(image_raw),
'height': _int64_feature(image.shape[0]),
'width': _int64_feature(image.shape[1]),
'depth': _int64_feature(image.shape[2]),
'labels': _int64_feature(label)
}))
writer.write(example.SerializeToString())
writer.close()
def disp_records(record_file,resize_height, resize_width,show_nums=4):
tf_image, tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(show_nums):
image,label = sess.run([tf_image,tf_label])
show_image("image:%d"%(label),image)
coord.request_stop()
coord.join(threads)
def batch_test(record_file,resize_height, resize_width):
tf_image,tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
image_batch, label_batch= get_batch_images(tf_image,tf_label,batch_size=4,labels_nums=5,one_hot=False,shuffle=False)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(4):
images, labels = sess.run([image_batch, label_batch])
show_image("image", images[0, :, :, :])
print('shape:{},tpye:{},labels:{}'.format(images.shape,images.dtype,labels))
coord.request_stop()
coord.join(threads)
image_dir='./train_new/img'
rain_record_output = 'train.tfrecord'
create_records(image_dir, train_record_output, resize_height, resize_width,shuffle,log)
train_nums=get_example_nums(train_record_output)
print("save train example nums={}".format(train_nums))
image_dir='./test_new/img'
al_record_output = 'val.tfrecord'
create_records(image_dir, val_record_output, resize_height, resize_width,shuffle,log)
val_nums=get_example_nums(val_record_output)
print("save val example nums={}".format(val_nums))
| true
| true
|
f7147ffd89ad00e230016e38b0ea2a521df527ea
| 558
|
py
|
Python
|
tests/test_trend_locations.py
|
chewett/TrendAnalyser
|
62a13efa9b9d78449535e791b1932cf3fd60f2a6
|
[
"MIT"
] | null | null | null |
tests/test_trend_locations.py
|
chewett/TrendAnalyser
|
62a13efa9b9d78449535e791b1932cf3fd60f2a6
|
[
"MIT"
] | null | null | null |
tests/test_trend_locations.py
|
chewett/TrendAnalyser
|
62a13efa9b9d78449535e791b1932cf3fd60f2a6
|
[
"MIT"
] | 1
|
2020-10-26T15:14:10.000Z
|
2020-10-26T15:14:10.000Z
|
import json
from TwitterAPI import TwitterAPI
from time import gmtime, strftime
def load_conf():
conf = json.load(open("../conf.json"))
return conf
def get_api():
conf = load_conf()
det_file = open(conf['twitter_key_location'])
det = json.load(det_file)
api = TwitterAPI(det['consumer_key'],
det['consumer_secret'],
det['access_token_key'],
det['access_token_secret'])
return api
api = get_api()
r = api.request("trends/available")
print json.loads(r.text)
| 19.241379
| 49
| 0.614695
|
import json
from TwitterAPI import TwitterAPI
from time import gmtime, strftime
def load_conf():
conf = json.load(open("../conf.json"))
return conf
def get_api():
conf = load_conf()
det_file = open(conf['twitter_key_location'])
det = json.load(det_file)
api = TwitterAPI(det['consumer_key'],
det['consumer_secret'],
det['access_token_key'],
det['access_token_secret'])
return api
api = get_api()
r = api.request("trends/available")
print json.loads(r.text)
| false
| true
|
f714815478b554b66febb336cf04f3a3d3a923e6
| 10,162
|
py
|
Python
|
melodic/lib/python2.7/dist-packages/geographic_msgs/msg/_RouteSegment.py
|
Dieptranivsr/Ros_Diep
|
d790e75e6f5da916701b11a2fdf3e03b6a47086b
|
[
"MIT"
] | null | null | null |
melodic/lib/python2.7/dist-packages/geographic_msgs/msg/_RouteSegment.py
|
Dieptranivsr/Ros_Diep
|
d790e75e6f5da916701b11a2fdf3e03b6a47086b
|
[
"MIT"
] | 1
|
2021-07-08T10:26:06.000Z
|
2021-07-08T10:31:11.000Z
|
melodic/lib/python2.7/dist-packages/geographic_msgs/msg/_RouteSegment.py
|
Dieptranivsr/Ros_Diep
|
d790e75e6f5da916701b11a2fdf3e03b6a47086b
|
[
"MIT"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from geographic_msgs/RouteSegment.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geographic_msgs.msg
import uuid_msgs.msg
class RouteSegment(genpy.Message):
_md5sum = "8583d1e2ddf1891c3934a5d2ed9a799c"
_type = "geographic_msgs/RouteSegment"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# Route network segment.
#
# This is one directed edge of a RouteNetwork graph. It represents a
# known path from one way point to another. If the path is two-way,
# there will be another RouteSegment with "start" and "end" reversed.
uuid_msgs/UniqueID id # Unique identifier for this segment
uuid_msgs/UniqueID start # beginning way point of segment
uuid_msgs/UniqueID end # ending way point of segment
KeyValue[] props # segment properties
================================================================================
MSG: uuid_msgs/UniqueID
# A universally unique identifier (UUID).
#
# http://en.wikipedia.org/wiki/Universally_unique_identifier
# http://tools.ietf.org/html/rfc4122.html
uint8[16] uuid
================================================================================
MSG: geographic_msgs/KeyValue
# Geographic map tag (key, value) pair
#
# This is equivalent to diagnostic_msgs/KeyValue, repeated here to
# avoid introducing a trivial stack dependency.
string key # tag label
string value # corresponding value
"""
__slots__ = ['id','start','end','props']
_slot_types = ['uuid_msgs/UniqueID','uuid_msgs/UniqueID','uuid_msgs/UniqueID','geographic_msgs/KeyValue[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
id,start,end,props
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(RouteSegment, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = []
else:
self.id = uuid_msgs.msg.UniqueID()
self.start = uuid_msgs.msg.UniqueID()
self.end = uuid_msgs.msg.UniqueID()
self.props = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.id.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.start.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.end.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
length = len(self.props)
buff.write(_struct_I.pack(length))
for val1 in self.props:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = None
end = 0
start = end
end += 16
self.id.uuid = str[start:end]
start = end
end += 16
self.start.uuid = str[start:end]
start = end
end += 16
self.end.uuid = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.props = []
for i in range(0, length):
val1 = geographic_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.props.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.id.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.start.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.end.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
length = len(self.props)
buff.write(_struct_I.pack(length))
for val1 in self.props:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = None
end = 0
start = end
end += 16
self.id.uuid = str[start:end]
start = end
end += 16
self.start.uuid = str[start:end]
start = end
end += 16
self.end.uuid = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.props = []
for i in range(0, length):
val1 = geographic_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.props.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_16B = None
def _get_struct_16B():
global _struct_16B
if _struct_16B is None:
_struct_16B = struct.Struct("<16B")
return _struct_16B
_struct_16s = None
def _get_struct_16s():
global _struct_16s
if _struct_16s is None:
_struct_16s = struct.Struct("<16s")
return _struct_16s
| 33.986622
| 145
| 0.605688
|
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geographic_msgs.msg
import uuid_msgs.msg
class RouteSegment(genpy.Message):
_md5sum = "8583d1e2ddf1891c3934a5d2ed9a799c"
_type = "geographic_msgs/RouteSegment"
_has_header = False
_full_text = """# Route network segment.
#
# This is one directed edge of a RouteNetwork graph. It represents a
# known path from one way point to another. If the path is two-way,
# there will be another RouteSegment with "start" and "end" reversed.
uuid_msgs/UniqueID id # Unique identifier for this segment
uuid_msgs/UniqueID start # beginning way point of segment
uuid_msgs/UniqueID end # ending way point of segment
KeyValue[] props # segment properties
================================================================================
MSG: uuid_msgs/UniqueID
# A universally unique identifier (UUID).
#
# http://en.wikipedia.org/wiki/Universally_unique_identifier
# http://tools.ietf.org/html/rfc4122.html
uint8[16] uuid
================================================================================
MSG: geographic_msgs/KeyValue
# Geographic map tag (key, value) pair
#
# This is equivalent to diagnostic_msgs/KeyValue, repeated here to
# avoid introducing a trivial stack dependency.
string key # tag label
string value # corresponding value
"""
__slots__ = ['id','start','end','props']
_slot_types = ['uuid_msgs/UniqueID','uuid_msgs/UniqueID','uuid_msgs/UniqueID','geographic_msgs/KeyValue[]']
def __init__(self, *args, **kwds):
if args or kwds:
super(RouteSegment, self).__init__(*args, **kwds)
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = []
else:
self.id = uuid_msgs.msg.UniqueID()
self.start = uuid_msgs.msg.UniqueID()
self.end = uuid_msgs.msg.UniqueID()
self.props = []
def _get_types(self):
return self._slot_types
def serialize(self, buff):
try:
_x = self.id.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.start.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.end.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
length = len(self.props)
buff.write(_struct_I.pack(length))
for val1 in self.props:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = None
end = 0
start = end
end += 16
self.id.uuid = str[start:end]
start = end
end += 16
self.start.uuid = str[start:end]
start = end
end += 16
self.end.uuid = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.props = []
for i in range(0, length):
val1 = geographic_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.props.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
def serialize_numpy(self, buff, numpy):
try:
_x = self.id.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.start.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.end.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
length = len(self.props)
buff.write(_struct_I.pack(length))
for val1 in self.props:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = None
end = 0
start = end
end += 16
self.id.uuid = str[start:end]
start = end
end += 16
self.start.uuid = str[start:end]
start = end
end += 16
self.end.uuid = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.props = []
for i in range(0, length):
val1 = geographic_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.props.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_16B = None
def _get_struct_16B():
global _struct_16B
if _struct_16B is None:
_struct_16B = struct.Struct("<16B")
return _struct_16B
_struct_16s = None
def _get_struct_16s():
global _struct_16s
if _struct_16s is None:
_struct_16s = struct.Struct("<16s")
return _struct_16s
| true
| true
|
f7148181728209c0820ba3d6bb65094941aa250a
| 4,393
|
py
|
Python
|
fairseq/tasks/__init__.py
|
HYUNMIN-HWANG/fairseq
|
8094376456f586f119ffe5b83d7af5979066197d
|
[
"MIT"
] | null | null | null |
fairseq/tasks/__init__.py
|
HYUNMIN-HWANG/fairseq
|
8094376456f586f119ffe5b83d7af5979066197d
|
[
"MIT"
] | null | null | null |
fairseq/tasks/__init__.py
|
HYUNMIN-HWANG/fairseq
|
8094376456f586f119ffe5b83d7af5979066197d
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import argparse
import importlib
import os
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent
from hydra.core.config_store import ConfigStore
from .fairseq_task import FairseqTask, LegacyFairseqTask # noqa
# register dataclass
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(cfg: FairseqDataclass, **kwargs):
TASK_DATACLASS_REGISTRY
task = None
task_name = getattr(cfg, "task", None)
if isinstance(task_name, str):
# legacy tasks
task = TASK_REGISTRY[task_name]
if task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = dc.from_namespace(cfg)
else:
task_name = getattr(cfg, "_name", None)
if task_name and task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = merge_with_parent(dc(), cfg)
task = TASK_REGISTRY[task_name]
assert (
task is not None
), f"Could not infer task type from {cfg}. Available argparse tasks: {TASK_REGISTRY.keys()}. Available hydra tasks: {TASK_DATACLASS_REGISTRY.keys()}"
return task.setup_task(cfg, **kwargs)
def register_task(name, dataclass=None):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError("Cannot register duplicate task ({})".format(name))
if not issubclass(cls, FairseqTask):
raise ValueError(
"Task ({}: {}) must extend FairseqTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError(
"Cannot register task with duplicate class name ({})".format(
cls.__name__
)
)
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
TASK_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="task", node=node, provider="fairseq")
return cls
return register_task_cls
def get_task(name):
return TASK_REGISTRY[name]
def import_tasks(tasks_dir, namespace):
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
import_tasks(tasks_dir, "fairseq.tasks")
| 31.833333
| 153
| 0.619622
|
import argparse
import importlib
import os
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent
from hydra.core.config_store import ConfigStore
from .fairseq_task import FairseqTask, LegacyFairseqTask
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(cfg: FairseqDataclass, **kwargs):
TASK_DATACLASS_REGISTRY
task = None
task_name = getattr(cfg, "task", None)
if isinstance(task_name, str):
task = TASK_REGISTRY[task_name]
if task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = dc.from_namespace(cfg)
else:
task_name = getattr(cfg, "_name", None)
if task_name and task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = merge_with_parent(dc(), cfg)
task = TASK_REGISTRY[task_name]
assert (
task is not None
), f"Could not infer task type from {cfg}. Available argparse tasks: {TASK_REGISTRY.keys()}. Available hydra tasks: {TASK_DATACLASS_REGISTRY.keys()}"
return task.setup_task(cfg, **kwargs)
def register_task(name, dataclass=None):
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError("Cannot register duplicate task ({})".format(name))
if not issubclass(cls, FairseqTask):
raise ValueError(
"Task ({}: {}) must extend FairseqTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError(
"Cannot register task with duplicate class name ({})".format(
cls.__name__
)
)
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
TASK_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="task", node=node, provider="fairseq")
return cls
return register_task_cls
def get_task(name):
return TASK_REGISTRY[name]
def import_tasks(tasks_dir, namespace):
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + task_name)
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
tasks_dir = os.path.dirname(__file__)
import_tasks(tasks_dir, "fairseq.tasks")
| true
| true
|
f71482183f909628cf759af5837725bdda0f8c43
| 2,686
|
py
|
Python
|
py/phl/phlsys_workingdircommand__t.py
|
aevri/phabricator-tools
|
ef7501bcaee83e98d168d16f64b3f73e744d3336
|
[
"Apache-2.0"
] | 150
|
2015-01-21T15:52:22.000Z
|
2021-11-09T05:53:36.000Z
|
py/phl/phlsys_workingdircommand__t.py
|
aevri/phabricator-tools
|
ef7501bcaee83e98d168d16f64b3f73e744d3336
|
[
"Apache-2.0"
] | 72
|
2015-05-08T04:33:08.000Z
|
2017-01-27T09:37:36.000Z
|
py/phl/phlsys_workingdircommand__t.py
|
aevri/phabricator-tools
|
ef7501bcaee83e98d168d16f64b3f73e744d3336
|
[
"Apache-2.0"
] | 38
|
2015-01-30T10:33:47.000Z
|
2021-11-09T05:53:30.000Z
|
"""Test suite for phlsys_workingdircommand."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# [ A] command is executed correctly
# [ A] working directory is restored after command execution
# -----------------------------------------------------------------------------
# Tests:
# [ A] test_A_command_with_working_directory
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import stat
import tempfile
import unittest
import phlsys_fs
import phlsys_workingdircommand
_PYCAT_COMMAND = """
#! /bin/sh
echo "Hello $1!"
""" .lstrip()
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_A_command_with_working_directory(self):
working_dir = tempfile.mkdtemp()
with phlsys_fs.chtmpdir_context():
tmp_dir = os.getcwd()
pycat_script_path = os.path.join(tmp_dir, 'pycat.sh')
phlsys_fs.write_text_file(pycat_script_path, _PYCAT_COMMAND)
mode = os.stat(pycat_script_path).st_mode
os.chmod(pycat_script_path, mode | stat.S_IEXEC)
self.assertEqual(os.getcwd(), tmp_dir)
command = phlsys_workingdircommand.CommandWithWorkingDirectory(
pycat_script_path, working_dir)
result = command('Alice')
# [ A] command is executed correctly
self.assertEqual('Hello Alice!\n', result)
# [ A] working directory is restored after command execution
self.assertEqual(os.getcwd(), tmp_dir)
# -----------------------------------------------------------------------------
# Copyright (C) 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| 34.883117
| 79
| 0.57446
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import stat
import tempfile
import unittest
import phlsys_fs
import phlsys_workingdircommand
_PYCAT_COMMAND = """
#! /bin/sh
echo "Hello $1!"
""" .lstrip()
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_A_command_with_working_directory(self):
working_dir = tempfile.mkdtemp()
with phlsys_fs.chtmpdir_context():
tmp_dir = os.getcwd()
pycat_script_path = os.path.join(tmp_dir, 'pycat.sh')
phlsys_fs.write_text_file(pycat_script_path, _PYCAT_COMMAND)
mode = os.stat(pycat_script_path).st_mode
os.chmod(pycat_script_path, mode | stat.S_IEXEC)
self.assertEqual(os.getcwd(), tmp_dir)
command = phlsys_workingdircommand.CommandWithWorkingDirectory(
pycat_script_path, working_dir)
result = command('Alice')
self.assertEqual('Hello Alice!\n', result)
self.assertEqual(os.getcwd(), tmp_dir)
| true
| true
|
f7148301c9b51fcf16d83d7b786a76b3a79b2e1f
| 31,201
|
py
|
Python
|
wifiphisher/tests/test_deauth.py
|
burakbozdag/wifiphisher-docker
|
e5f373f63786c1a6cfeed4e9f5b00d0f986ade58
|
[
"Apache-2.0"
] | 3
|
2020-09-04T14:54:30.000Z
|
2022-03-24T19:09:48.000Z
|
wifiphisher/tests/test_deauth.py
|
burakbozdag/wifiphisher-docker
|
e5f373f63786c1a6cfeed4e9f5b00d0f986ade58
|
[
"Apache-2.0"
] | null | null | null |
wifiphisher/tests/test_deauth.py
|
burakbozdag/wifiphisher-docker
|
e5f373f63786c1a6cfeed4e9f5b00d0f986ade58
|
[
"Apache-2.0"
] | 1
|
2020-12-26T20:11:22.000Z
|
2020-12-26T20:11:22.000Z
|
# pylint: skip-file
""" This module tests the deauth module in extensions """
import collections
import unittest
from collections import defaultdict
import mock
import scapy.layers.dot11 as dot11
import wifiphisher.common.constants as constants
import wifiphisher.extensions.deauth as deauth
class TestDeauth(unittest.TestCase):
""" Tests Deauth class """
def setUp(self):
""" Set up the tests """
essid = dot11.Dot11Elt(ID='SSID', info="")
rates = dot11.Dot11Elt(ID='Rates', info="\x03\x12\x96\x18\x24\x30\x48\x60")
dsset = dot11.Dot11Elt(ID='DSset', info='\x06')
self.packet = dot11.RadioTap() / dot11.Dot11() / essid / rates / dsset
custom_tuple = collections.namedtuple("test",
("target_ap_bssid target_ap_channel rogue_ap_mac args "
"target_ap_essid is_freq_hop_allowed"))
self.target_channel = "6"
self.target_bssid = "BB:BB:BB:BB:BB:BB"
self.rogue_mac = "CC:CC:CC:CC:CC:CC"
self.target_essid = "Evil"
self.args = mock.Mock()
self.args.deauth_essid = False
self.args.channel_monitor = False
self.args.deauth_channels = []
data0 = custom_tuple(self.target_bssid, self.target_channel, self.rogue_mac,
self.args, self.target_essid, True)
data1 = custom_tuple(None, self.target_channel, self.rogue_mac,
self.args, self.target_essid, True)
self.deauth_obj0 = deauth.Deauth(data0)
self.deauth_obj1 = deauth.Deauth(data1)
# test for --deauth-essid
self.deauth_obj0._deauth_bssids = dict()
self.deauth_obj1._deauth_bssids = dict()
def test_craft_packet_normal_expected(self):
"""
Test _craft_packet method when given all the normal arguments and
expecting normal results
"""
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "00:00:00:00:00:00"
result = self.deauth_obj0._craft_packet(sender, receiver, bssid)
message0 = "Failed to craft a packet for disassociation"
message1 = "Failed to craft a packet for deauthentication"
# check the disassociation packet
self.assertEqual(result[0].addr1, receiver, message0)
self.assertEqual(result[0].addr2, sender, message0)
self.assertEqual(result[0].addr3, bssid, message0)
# check the deauthentication packet
self.assertEqual(result[1].addr1, receiver, message1)
self.assertEqual(result[1].addr2, sender, message1)
self.assertEqual(result[1].addr3, bssid, message1)
def test_get_packet_broadcast(self):
"""
Test get_packet method for crafting the broadcast frame
"""
# setup the packet
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
essid = dot11.Dot11Elt(ID='SSID', info="")
rates = dot11.Dot11Elt(ID='Rates', info="\x03\x12\x96\x18\x24\x30\x48\x60")
dsset = dot11.Dot11Elt(ID='DSset', info='\x06')
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid / rates / dsset
packet.addr1 = receiver
packet.addr2 = sender
packet.addr3 = self.target_bssid
packet.FCfield = 0x0
# run the method
pkts_to_send = self.deauth_obj0.get_packet(packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel: target channel should be one key of
# the result
self.assertEqual(self.target_channel in pkts_to_send, True,
message0)
# check the packets
# check the disassoction packet
result = pkts_to_send[self.target_channel]
self.assertEqual(result[0].subtype, 10, message1)
self.assertEqual(result[0].addr1, constants.WIFI_BROADCAST, message1)
self.assertEqual(result[0].addr2, self.target_bssid, message1)
self.assertEqual(result[0].addr3, self.target_bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1].subtype, 12, message1)
self.assertEqual(result[1].addr1, constants.WIFI_BROADCAST, message1)
self.assertEqual(result[1].addr2, self.target_bssid, message1)
self.assertEqual(result[1].addr3, self.target_bssid, message1)
def test_get_packet_second_run_non_releavent_client_empty(self):
"""
Test get_packet method for the second time when given a packet which
is not related to the target access point and --essid is not used.
The expected result are an channel list containing target channel and
an empty packet list
"""
# setup the packets
sender0 = "00:00:00:00:00:00"
receiver0 = "11:11:11:11:11:11"
bssid0 = "22:22:22:22:22:22:22"
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = "55:55:55:55:55:55"
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
# run the method twice
self.deauth_obj0.get_packet(self.packet)
# change the values for the next run
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
# if the bssid is not in self._deauth_bssids, return empty channel
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_get_packet_second_run_our_ap_empty(self):
"""
Test get_packet method for the second time when given a packet which
is from our own rouge ap to the target access point and --essid is
not used. The expected result are an channel list containing target
channel and an empty packet list
"""
# setup the packets
sender0 = "00:00:00:00:00:00"
receiver0 = "11:11:11:11:11:11"
bssid0 = "22:22:22:22:22:22:22"
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = self.rogue_mac
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
# run the method twice
self.deauth_obj0.get_packet(self.packet)
# change the values for the next run
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
# return empty channel if the frame is invalid
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_get_packet_multiple_clients_multiple_packets(self):
"""
Test get_packet method when run multiple times with valid cleints.
--essid is not used. The expected result are the channel of the
target AP followed by the broadcast packet for the target AP and
all the client packets
"""
# setup the packet
sender0 = self.target_bssid
receiver0 = "11:11:11:11:11:11"
bssid0 = self.target_bssid
sender1 = "33:33:33:33:33:33"
receiver1 = self.target_bssid
bssid1 = self.target_bssid
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
# add target_bssid in the self._deauth_bssids
self.deauth_obj0._deauth_bssids[self.target_bssid] = self.target_channel
# run the method
pkts_to_send0 = self.deauth_obj0.get_packet(self.packet)
result0 = pkts_to_send0[self.target_channel]
# change the values for the next run
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
# result1 will accumulate the result from result 0
pkts_to_send1 = self.deauth_obj0.get_packet(self.packet)
result1 = pkts_to_send1[self.target_channel]
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(self.target_channel in pkts_to_send0, True,
message0)
# check the packets for the first client
# check the disassociation packet
self.assertEqual(result0[0].subtype, 10, message1)
self.assertEqual(result0[0].addr1, self.target_bssid, message1)
self.assertEqual(result0[0].addr2, receiver0, message1)
self.assertEqual(result0[0].addr3, self.target_bssid, message1)
# check the deauthentication packet
self.assertEqual(result0[1].subtype, 12, message1)
self.assertEqual(result0[1].addr1, self.target_bssid, message1)
self.assertEqual(result0[1].addr2, receiver0, message1)
self.assertEqual(result0[1].addr3, self.target_bssid, message1)
# check the disassociation packet
self.assertEqual(result0[2].subtype, 10, message1)
self.assertEqual(result0[2].addr1, receiver0, message1)
self.assertEqual(result0[2].addr2, self.target_bssid, message1)
self.assertEqual(result0[2].addr3, self.target_bssid, message1)
# check the deauthentication packet
self.assertEqual(result0[3].subtype, 12, message1)
self.assertEqual(result0[3].addr1, receiver0, message1)
self.assertEqual(result0[3].addr2, self.target_bssid, message1)
self.assertEqual(result0[3].addr3, self.target_bssid, message1)
# check the packets for the second client
# check the disassociation packet
self.assertEqual(result1[4].subtype, 10, message1)
self.assertEqual(result1[4].addr1, sender1, message1)
self.assertEqual(result1[4].addr2, self.target_bssid, message1)
self.assertEqual(result1[4].addr3, self.target_bssid, message1)
# check the deauthentication packet
self.assertEqual(result1[5].subtype, 12, message1)
self.assertEqual(result1[5].addr1, sender1, message1)
self.assertEqual(result1[5].addr2, self.target_bssid, message1)
self.assertEqual(result1[5].addr3, self.target_bssid, message1)
# check the disassociation packet
self.assertEqual(result1[6].subtype, 10, message1)
self.assertEqual(result1[6].addr1, self.target_bssid, message1)
self.assertEqual(result1[6].addr2, sender1, message1)
self.assertEqual(result1[6].addr3, self.target_bssid, message1)
# check the deauthentication packet
self.assertEqual(result1[7].subtype, 12, message1)
self.assertEqual(result1[7].addr1, self.target_bssid, message1)
self.assertEqual(result1[7].addr2, sender1, message1)
self.assertEqual(result1[7].addr3, self.target_bssid, message1)
def test_get_packet_essid_flag_client_client_packet(self):
"""
Test get_packet method when --essid flag is given. A new
client is given as input and the proper packets and the
clients channel is expected
"""
# setup the packet
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = receiver
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# add the bssid to the deauth_bssid set
self.deauth_obj1._deauth_bssids[bssid] = self.target_channel
# run the method
pkts_to_send = self.deauth_obj1.get_packet(self.packet)
result = pkts_to_send[self.target_channel]
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(self.target_channel in pkts_to_send, True, message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[0].subtype, 10, message1)
self.assertEqual(result[0].addr1, sender, message1)
self.assertEqual(result[0].addr2, receiver, message1)
self.assertEqual(result[0].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1].subtype, 12, message1)
self.assertEqual(result[1].addr1, sender, message1)
self.assertEqual(result[1].addr2, receiver, message1)
self.assertEqual(result[1].addr3, bssid, message1)
# check the disassociation packet
self.assertEqual(result[2].subtype, 10, message1)
self.assertEqual(result[2].addr1, receiver, message1)
self.assertEqual(result[2].addr2, sender, message1)
self.assertEqual(result[2].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[3].subtype, 12, message1)
self.assertEqual(result[3].addr1, receiver, message1)
self.assertEqual(result[3].addr2, sender, message1)
self.assertEqual(result[3].addr3, bssid, message1)
def test_get_packet_essid_flag_our_own_ap_empty_list(self):
"""
Test get_packet method when --essid flag is given. Our own
client is given as input. An empty list for both channel and
packets
"""
# setup the packet
sender = "00:00:00:00:00:00"
receiver = self.rogue_mac
bssid = self.rogue_mac
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# run the method
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed0_channel_empty_list(self, mock_ord):
"""
Test get_packet method when --essid flag is given. This is the
case when a packet is malformed in the channel section. An empty
list for both channel and packets. This test the TypeError case
"""
mock_ord.side_effect = TypeError
# setup the packet
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# run the method
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed1_channel_empty_list(self, mock_ord):
"""
Test get_packet method when --essid flag is given. This is the
case when a packet is malformed in the channel section. An empty
list for both channel and packets. This tests the IndexError case
"""
mock_ord.side_effect = IndexError
# setup the packet
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# run the method
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed2_channel_empty_list(self, mock_ord):
"""
Test get_packet method when --essid flag is given. This is the
case when a packet is malformed in the channel section. In this case
the channel reported is out of range and an empty list for both
channel and packets
"""
mock_ord.return_value = 200
# setup the packet
sender = "33:33:33:33:33:33"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# run the method
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1], [], message1)
def test_add_client_invalid_sender_none(self):
"""
Test _add_client when the given sender is in the non_client_address.
The expected output is None
"""
# setup the arguments
sender = constants.WIFI_INVALID
receiver = "11:11:11:11:11:11"
bssid = receiver
# run the method
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
# check the result
self.assertIsNone(result)
def test_add_client_invalid_receiver_none(self):
"""
Test _add_client when the given receiver is in the non_client_address.
The expected output is None
"""
# setup the arguments
sender = "11:11:11:11:11:11"
receiver = constants.WIFI_INVALID
bssid = sender
# run the method
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
# check the result
self.assertIsNone(result)
def test_add_client_invalid_sender_receiver_none(self):
"""
Test _add_client when the given sender and receiver are in the
non_client_address. The expected output is None
"""
# setup the arguments
sender = constants.WIFI_INVALID
receiver = constants.WIFI_INVALID
bssid = "22:22:22:22:22:22:22"
# run the method
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
# check the result
self.assertIsNone(result)
def test_add_client_irrelevent_sender_receiver_none(self):
"""
Test _add_client when neither sender nor receiver is the
BSSID. The expected output is None
"""
# setup the arguments
sender = "11:11:11:11:11:11"
receiver = "33:33:33:33:33:33"
bssid = "22:22:22:22:22:22:22"
# run the method
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
# check the result
self.assertIsNone(result)
def test_add_client_receiver_is_bssid_packets(self):
"""
Test _add_client when the given receiver is the bssid. The
expected output is proper packets for both sender and receiver
"""
# setup the packet
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = receiver
# run the method
result = self.deauth_obj1._add_clients(sender, receiver, bssid)
message0 = "Failed to return the correct client"
message1 = "Failed to return an correct packets"
# check the client
self.assertEqual(result[0], sender, message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1][0].subtype, 10, message1)
self.assertEqual(result[1][0].addr1, sender, message1)
self.assertEqual(result[1][0].addr2, receiver, message1)
self.assertEqual(result[1][0].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1][1].subtype, 12, message1)
self.assertEqual(result[1][1].addr1, sender, message1)
self.assertEqual(result[1][1].addr2, receiver, message1)
self.assertEqual(result[1][1].addr3, bssid, message1)
# check the disassociation packet
self.assertEqual(result[1][2].subtype, 10, message1)
self.assertEqual(result[1][2].addr1, receiver, message1)
self.assertEqual(result[1][2].addr2, sender, message1)
self.assertEqual(result[1][2].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1][3].subtype, 12, message1)
self.assertEqual(result[1][3].addr1, receiver, message1)
self.assertEqual(result[1][3].addr2, sender, message1)
self.assertEqual(result[1][3].addr3, bssid, message1)
def test_add_client_sender_is_bssid_packets(self):
"""
Test _add_client when the given sender is the bssid. The
expected output is proper packets for both sender and receiver
"""
# setup the packet
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = sender
# run the method
result = self.deauth_obj1._add_clients(sender, receiver, bssid)
message0 = "Failed to return the correct client"
message1 = "Failed to return an correct packets"
# check the client
self.assertEqual(result[0], receiver, message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1][0].subtype, 10, message1)
self.assertEqual(result[1][0].addr1, sender, message1)
self.assertEqual(result[1][0].addr2, receiver, message1)
self.assertEqual(result[1][0].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1][1].subtype, 12, message1)
self.assertEqual(result[1][1].addr1, sender, message1)
self.assertEqual(result[1][1].addr2, receiver, message1)
self.assertEqual(result[1][1].addr3, bssid, message1)
# check the disassociation packet
self.assertEqual(result[1][2].subtype, 10, message1)
self.assertEqual(result[1][2].addr1, receiver, message1)
self.assertEqual(result[1][2].addr2, sender, message1)
self.assertEqual(result[1][2].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1][3].subtype, 12, message1)
self.assertEqual(result[1][3].addr1, receiver, message1)
self.assertEqual(result[1][3].addr2, sender, message1)
self.assertEqual(result[1][3].addr3, bssid, message1)
def test_send_output_no_client_proper(self):
"""
Test send_output method when no client has been detected.
The expected result is an empty message list
"""
message = "Failed to send the proper output"
self.assertEqual(self.deauth_obj1.send_output(), [], message)
def test_send_output_single_client_proper(self):
"""
Test send_output method when a client has been already
detected. The expected result is the proper output
containing that client
"""
# setup the packet
sender = "44:44:44:44:44:44"
receiver = "55:55:55:55:55:55"
bssid = receiver
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# run the method
self.deauth_obj1._deauth_bssids[bssid] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
actual = self.deauth_obj1.send_output()
expected = "DEAUTH/DISAS - {}".format(sender)
message = "Failed to send the proper output"
self.assertEqual(expected, actual[0], message)
def test_send_output_multiple_client_proper(self):
"""
Test send_output method when multiple client has been already
detected. The expected result is the proper output
containing that clients
"""
# setup the packet
sender0 = "22:22:22:22:22:22"
receiver0 = "11:11:11:11:11:11"
bssid0 = receiver0
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = sender1
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
# run the method
self.deauth_obj1._deauth_bssids[bssid0] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
# change the packet details
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
# run the method again
self.deauth_obj1._deauth_bssids[bssid1] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
actual = self.deauth_obj1.send_output()
expected0 = "DEAUTH/DISAS - {}".format(sender0)
expected1 = "DEAUTH/DISAS - {}".format(receiver1)
self.assertIn(expected0, actual)
self.assertIn(expected1, actual)
def test_send_channels_non_frenzy_target_channel(self):
"""
Test send_channels method when --essid is not given. The
expected result is the target AP's channel
"""
actual = self.deauth_obj0.send_channels()
message = "Failed to send target AP's channel"
expected = [self.target_channel]
self.assertEqual(expected, actual, message)
def test_send_channels_frenzy_all_channels(self):
"""
Test send_channels method when --essid is given. The expected
result is all channels
"""
actual = self.deauth_obj1.send_channels()
message = "Failed to send all the channels"
expected = [str(ch) for ch in range(1, 14)]
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_0_from_ds_1_addr2(self):
"""
Test _extract_bssid when to_ds is 1 and from_ds is 0.
The case should return packet.addr2
"""
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 2
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 2"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr2
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_1_from_ds_0_addr1(self):
"""
Test _extract_bssid when to_ds is 1 and from_ds is 0.
The case should return packet.addr2
"""
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 1
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 1"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr1
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_0_from_ds_0_addr3(self):
"""
Test _extract_bssid when to_ds is 0 and from_ds is 0.
The case should return packet.addr3
"""
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 0
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 3"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr3
self.assertEqual(expected, actual, message)
def test_get_packet_to_ds_1_from_ds_1_empty(self):
"""
Drop the WDS frame in get_packet
"""
self.packet.FCfield = 3
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_get_packet_address_malform_empty(self):
"""
Drop the frame if the address is malformed
"""
packet = mock.Mock(spec=[])
result = self.deauth_obj0.get_packet(packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_is_target_target_ap_bssid_true(self):
"""
Get the target attacking bssid for the speficic ESSID
when --essid is not used
"""
essid = dot11.Dot11Elt(ID='SSID', info="Evil")
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid
packet.addr3 = "99:99:99:99:99:99"
self.deauth_obj0._data.args.deauth_essid = "Evil"
result = self.deauth_obj0._is_target(packet)
expected = True
message = "Fail to check the attacking essid: " + self.target_essid
self.assertEqual(result, expected, message)
def test_is_target_essid_non_decodable_error(self):
"""
Assign essid to a constant when it is utf-8 non-decodable
"""
essid = dot11.Dot11Elt(ID='SSID', info='\x99\x87\x33')
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid
packet.addr3 = "99:99:99:99:99:99"
result = self.deauth_obj0._is_target(packet)
expected = False
message = 'Fail to raise the UnicodeDecodeError for non-printable essid'
self.assertEqual(result, expected, message)
def test_channel_deauth(self):
"""
Test that we are deauthing on the right channels each time.
"""
# In obj0 we are targetting a specific AP
# Default behavior (e.g. through AP selection phase)
result = self.deauth_obj0.send_channels()
expected = [str(self.deauth_obj0._data.target_ap_channel)]
message = "Fail to receive right channels"
self.assertEqual(result, expected, message)
# In obj1 we set --deauth-channels 1 2 3 4
self.deauth_obj1._data.args.deauth_channels = [1, 2, 3, 4]
result = self.deauth_obj1.send_channels()
expected = ['1', '2', '3', '4']
message = "Fail to receive right channels"
self.assertEqual(result, expected, message)
| 36.154114
| 101
| 0.639659
|
import collections
import unittest
from collections import defaultdict
import mock
import scapy.layers.dot11 as dot11
import wifiphisher.common.constants as constants
import wifiphisher.extensions.deauth as deauth
class TestDeauth(unittest.TestCase):
def setUp(self):
essid = dot11.Dot11Elt(ID='SSID', info="")
rates = dot11.Dot11Elt(ID='Rates', info="\x03\x12\x96\x18\x24\x30\x48\x60")
dsset = dot11.Dot11Elt(ID='DSset', info='\x06')
self.packet = dot11.RadioTap() / dot11.Dot11() / essid / rates / dsset
custom_tuple = collections.namedtuple("test",
("target_ap_bssid target_ap_channel rogue_ap_mac args "
"target_ap_essid is_freq_hop_allowed"))
self.target_channel = "6"
self.target_bssid = "BB:BB:BB:BB:BB:BB"
self.rogue_mac = "CC:CC:CC:CC:CC:CC"
self.target_essid = "Evil"
self.args = mock.Mock()
self.args.deauth_essid = False
self.args.channel_monitor = False
self.args.deauth_channels = []
data0 = custom_tuple(self.target_bssid, self.target_channel, self.rogue_mac,
self.args, self.target_essid, True)
data1 = custom_tuple(None, self.target_channel, self.rogue_mac,
self.args, self.target_essid, True)
self.deauth_obj0 = deauth.Deauth(data0)
self.deauth_obj1 = deauth.Deauth(data1)
self.deauth_obj0._deauth_bssids = dict()
self.deauth_obj1._deauth_bssids = dict()
def test_craft_packet_normal_expected(self):
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "00:00:00:00:00:00"
result = self.deauth_obj0._craft_packet(sender, receiver, bssid)
message0 = "Failed to craft a packet for disassociation"
message1 = "Failed to craft a packet for deauthentication"
self.assertEqual(result[0].addr1, receiver, message0)
self.assertEqual(result[0].addr2, sender, message0)
self.assertEqual(result[0].addr3, bssid, message0)
self.assertEqual(result[1].addr1, receiver, message1)
self.assertEqual(result[1].addr2, sender, message1)
self.assertEqual(result[1].addr3, bssid, message1)
def test_get_packet_broadcast(self):
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
essid = dot11.Dot11Elt(ID='SSID', info="")
rates = dot11.Dot11Elt(ID='Rates', info="\x03\x12\x96\x18\x24\x30\x48\x60")
dsset = dot11.Dot11Elt(ID='DSset', info='\x06')
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid / rates / dsset
packet.addr1 = receiver
packet.addr2 = sender
packet.addr3 = self.target_bssid
packet.FCfield = 0x0
pkts_to_send = self.deauth_obj0.get_packet(packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(self.target_channel in pkts_to_send, True,
message0)
result = pkts_to_send[self.target_channel]
self.assertEqual(result[0].subtype, 10, message1)
self.assertEqual(result[0].addr1, constants.WIFI_BROADCAST, message1)
self.assertEqual(result[0].addr2, self.target_bssid, message1)
self.assertEqual(result[0].addr3, self.target_bssid, message1)
self.assertEqual(result[1].subtype, 12, message1)
self.assertEqual(result[1].addr1, constants.WIFI_BROADCAST, message1)
self.assertEqual(result[1].addr2, self.target_bssid, message1)
self.assertEqual(result[1].addr3, self.target_bssid, message1)
def test_get_packet_second_run_non_releavent_client_empty(self):
sender0 = "00:00:00:00:00:00"
receiver0 = "11:11:11:11:11:11"
bssid0 = "22:22:22:22:22:22:22"
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = "55:55:55:55:55:55"
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
self.deauth_obj0.get_packet(self.packet)
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
def test_get_packet_second_run_our_ap_empty(self):
sender0 = "00:00:00:00:00:00"
receiver0 = "11:11:11:11:11:11"
bssid0 = "22:22:22:22:22:22:22"
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = self.rogue_mac
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
self.deauth_obj0.get_packet(self.packet)
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
def test_get_packet_multiple_clients_multiple_packets(self):
sender0 = self.target_bssid
receiver0 = "11:11:11:11:11:11"
bssid0 = self.target_bssid
sender1 = "33:33:33:33:33:33"
receiver1 = self.target_bssid
bssid1 = self.target_bssid
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
self.deauth_obj0._deauth_bssids[self.target_bssid] = self.target_channel
pkts_to_send0 = self.deauth_obj0.get_packet(self.packet)
result0 = pkts_to_send0[self.target_channel]
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
pkts_to_send1 = self.deauth_obj0.get_packet(self.packet)
result1 = pkts_to_send1[self.target_channel]
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(self.target_channel in pkts_to_send0, True,
message0)
self.assertEqual(result0[0].subtype, 10, message1)
self.assertEqual(result0[0].addr1, self.target_bssid, message1)
self.assertEqual(result0[0].addr2, receiver0, message1)
self.assertEqual(result0[0].addr3, self.target_bssid, message1)
self.assertEqual(result0[1].subtype, 12, message1)
self.assertEqual(result0[1].addr1, self.target_bssid, message1)
self.assertEqual(result0[1].addr2, receiver0, message1)
self.assertEqual(result0[1].addr3, self.target_bssid, message1)
self.assertEqual(result0[2].subtype, 10, message1)
self.assertEqual(result0[2].addr1, receiver0, message1)
self.assertEqual(result0[2].addr2, self.target_bssid, message1)
self.assertEqual(result0[2].addr3, self.target_bssid, message1)
self.assertEqual(result0[3].subtype, 12, message1)
self.assertEqual(result0[3].addr1, receiver0, message1)
self.assertEqual(result0[3].addr2, self.target_bssid, message1)
self.assertEqual(result0[3].addr3, self.target_bssid, message1)
self.assertEqual(result1[4].subtype, 10, message1)
self.assertEqual(result1[4].addr1, sender1, message1)
self.assertEqual(result1[4].addr2, self.target_bssid, message1)
self.assertEqual(result1[4].addr3, self.target_bssid, message1)
self.assertEqual(result1[5].subtype, 12, message1)
self.assertEqual(result1[5].addr1, sender1, message1)
self.assertEqual(result1[5].addr2, self.target_bssid, message1)
self.assertEqual(result1[5].addr3, self.target_bssid, message1)
self.assertEqual(result1[6].subtype, 10, message1)
self.assertEqual(result1[6].addr1, self.target_bssid, message1)
self.assertEqual(result1[6].addr2, sender1, message1)
self.assertEqual(result1[6].addr3, self.target_bssid, message1)
self.assertEqual(result1[7].subtype, 12, message1)
self.assertEqual(result1[7].addr1, self.target_bssid, message1)
self.assertEqual(result1[7].addr2, sender1, message1)
self.assertEqual(result1[7].addr3, self.target_bssid, message1)
def test_get_packet_essid_flag_client_client_packet(self):
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = receiver
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
self.deauth_obj1._deauth_bssids[bssid] = self.target_channel
pkts_to_send = self.deauth_obj1.get_packet(self.packet)
result = pkts_to_send[self.target_channel]
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(self.target_channel in pkts_to_send, True, message0)
self.assertEqual(result[0].subtype, 10, message1)
self.assertEqual(result[0].addr1, sender, message1)
self.assertEqual(result[0].addr2, receiver, message1)
self.assertEqual(result[0].addr3, bssid, message1)
self.assertEqual(result[1].subtype, 12, message1)
self.assertEqual(result[1].addr1, sender, message1)
self.assertEqual(result[1].addr2, receiver, message1)
self.assertEqual(result[1].addr3, bssid, message1)
self.assertEqual(result[2].subtype, 10, message1)
self.assertEqual(result[2].addr1, receiver, message1)
self.assertEqual(result[2].addr2, sender, message1)
self.assertEqual(result[2].addr3, bssid, message1)
self.assertEqual(result[3].subtype, 12, message1)
self.assertEqual(result[3].addr1, receiver, message1)
self.assertEqual(result[3].addr2, sender, message1)
self.assertEqual(result[3].addr3, bssid, message1)
def test_get_packet_essid_flag_our_own_ap_empty_list(self):
sender = "00:00:00:00:00:00"
receiver = self.rogue_mac
bssid = self.rogue_mac
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed0_channel_empty_list(self, mock_ord):
mock_ord.side_effect = TypeError
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed1_channel_empty_list(self, mock_ord):
mock_ord.side_effect = IndexError
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed2_channel_empty_list(self, mock_ord):
mock_ord.return_value = 200
sender = "33:33:33:33:33:33"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
def test_add_client_invalid_sender_none(self):
sender = constants.WIFI_INVALID
receiver = "11:11:11:11:11:11"
bssid = receiver
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
self.assertIsNone(result)
def test_add_client_invalid_receiver_none(self):
sender = "11:11:11:11:11:11"
receiver = constants.WIFI_INVALID
bssid = sender
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
self.assertIsNone(result)
def test_add_client_invalid_sender_receiver_none(self):
sender = constants.WIFI_INVALID
receiver = constants.WIFI_INVALID
bssid = "22:22:22:22:22:22:22"
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
self.assertIsNone(result)
def test_add_client_irrelevent_sender_receiver_none(self):
sender = "11:11:11:11:11:11"
receiver = "33:33:33:33:33:33"
bssid = "22:22:22:22:22:22:22"
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
self.assertIsNone(result)
def test_add_client_receiver_is_bssid_packets(self):
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = receiver
result = self.deauth_obj1._add_clients(sender, receiver, bssid)
message0 = "Failed to return the correct client"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], sender, message0)
self.assertEqual(result[1][0].subtype, 10, message1)
self.assertEqual(result[1][0].addr1, sender, message1)
self.assertEqual(result[1][0].addr2, receiver, message1)
self.assertEqual(result[1][0].addr3, bssid, message1)
self.assertEqual(result[1][1].subtype, 12, message1)
self.assertEqual(result[1][1].addr1, sender, message1)
self.assertEqual(result[1][1].addr2, receiver, message1)
self.assertEqual(result[1][1].addr3, bssid, message1)
self.assertEqual(result[1][2].subtype, 10, message1)
self.assertEqual(result[1][2].addr1, receiver, message1)
self.assertEqual(result[1][2].addr2, sender, message1)
self.assertEqual(result[1][2].addr3, bssid, message1)
self.assertEqual(result[1][3].subtype, 12, message1)
self.assertEqual(result[1][3].addr1, receiver, message1)
self.assertEqual(result[1][3].addr2, sender, message1)
self.assertEqual(result[1][3].addr3, bssid, message1)
def test_add_client_sender_is_bssid_packets(self):
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = sender
result = self.deauth_obj1._add_clients(sender, receiver, bssid)
message0 = "Failed to return the correct client"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], receiver, message0)
self.assertEqual(result[1][0].subtype, 10, message1)
self.assertEqual(result[1][0].addr1, sender, message1)
self.assertEqual(result[1][0].addr2, receiver, message1)
self.assertEqual(result[1][0].addr3, bssid, message1)
self.assertEqual(result[1][1].subtype, 12, message1)
self.assertEqual(result[1][1].addr1, sender, message1)
self.assertEqual(result[1][1].addr2, receiver, message1)
self.assertEqual(result[1][1].addr3, bssid, message1)
self.assertEqual(result[1][2].subtype, 10, message1)
self.assertEqual(result[1][2].addr1, receiver, message1)
self.assertEqual(result[1][2].addr2, sender, message1)
self.assertEqual(result[1][2].addr3, bssid, message1)
self.assertEqual(result[1][3].subtype, 12, message1)
self.assertEqual(result[1][3].addr1, receiver, message1)
self.assertEqual(result[1][3].addr2, sender, message1)
self.assertEqual(result[1][3].addr3, bssid, message1)
def test_send_output_no_client_proper(self):
message = "Failed to send the proper output"
self.assertEqual(self.deauth_obj1.send_output(), [], message)
def test_send_output_single_client_proper(self):
sender = "44:44:44:44:44:44"
receiver = "55:55:55:55:55:55"
bssid = receiver
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
self.deauth_obj1._deauth_bssids[bssid] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
actual = self.deauth_obj1.send_output()
expected = "DEAUTH/DISAS - {}".format(sender)
message = "Failed to send the proper output"
self.assertEqual(expected, actual[0], message)
def test_send_output_multiple_client_proper(self):
sender0 = "22:22:22:22:22:22"
receiver0 = "11:11:11:11:11:11"
bssid0 = receiver0
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = sender1
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
self.deauth_obj1._deauth_bssids[bssid0] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
self.deauth_obj1._deauth_bssids[bssid1] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
actual = self.deauth_obj1.send_output()
expected0 = "DEAUTH/DISAS - {}".format(sender0)
expected1 = "DEAUTH/DISAS - {}".format(receiver1)
self.assertIn(expected0, actual)
self.assertIn(expected1, actual)
def test_send_channels_non_frenzy_target_channel(self):
actual = self.deauth_obj0.send_channels()
message = "Failed to send target AP's channel"
expected = [self.target_channel]
self.assertEqual(expected, actual, message)
def test_send_channels_frenzy_all_channels(self):
actual = self.deauth_obj1.send_channels()
message = "Failed to send all the channels"
expected = [str(ch) for ch in range(1, 14)]
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_0_from_ds_1_addr2(self):
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 2
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 2"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr2
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_1_from_ds_0_addr1(self):
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 1
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 1"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr1
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_0_from_ds_0_addr3(self):
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 0
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 3"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr3
self.assertEqual(expected, actual, message)
def test_get_packet_to_ds_1_from_ds_1_empty(self):
self.packet.FCfield = 3
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_get_packet_address_malform_empty(self):
packet = mock.Mock(spec=[])
result = self.deauth_obj0.get_packet(packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_is_target_target_ap_bssid_true(self):
essid = dot11.Dot11Elt(ID='SSID', info="Evil")
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid
packet.addr3 = "99:99:99:99:99:99"
self.deauth_obj0._data.args.deauth_essid = "Evil"
result = self.deauth_obj0._is_target(packet)
expected = True
message = "Fail to check the attacking essid: " + self.target_essid
self.assertEqual(result, expected, message)
def test_is_target_essid_non_decodable_error(self):
essid = dot11.Dot11Elt(ID='SSID', info='\x99\x87\x33')
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid
packet.addr3 = "99:99:99:99:99:99"
result = self.deauth_obj0._is_target(packet)
expected = False
message = 'Fail to raise the UnicodeDecodeError for non-printable essid'
self.assertEqual(result, expected, message)
def test_channel_deauth(self):
# In obj0 we are targetting a specific AP
# Default behavior (e.g. through AP selection phase)
result = self.deauth_obj0.send_channels()
expected = [str(self.deauth_obj0._data.target_ap_channel)]
message = "Fail to receive right channels"
self.assertEqual(result, expected, message)
# In obj1 we set --deauth-channels 1 2 3 4
self.deauth_obj1._data.args.deauth_channels = [1, 2, 3, 4]
result = self.deauth_obj1.send_channels()
expected = ['1', '2', '3', '4']
message = "Fail to receive right channels"
self.assertEqual(result, expected, message)
| true
| true
|
f71483250f4208d1d6c47fbd42c29ab271f6db11
| 24
|
py
|
Python
|
tests/applicationinsights_tests/exception_tests/__init__.py
|
Rebeccalau/ApplicationInsights-Python
|
cc91fede2d6d6c48acaa5687aa13ca491a17025a
|
[
"MIT"
] | 89
|
2015-05-06T22:02:17.000Z
|
2019-04-22T14:50:33.000Z
|
tests/applicationinsights_tests/exception_tests/__init__.py
|
Rebeccalau/ApplicationInsights-Python
|
cc91fede2d6d6c48acaa5687aa13ca491a17025a
|
[
"MIT"
] | 115
|
2015-04-29T17:44:52.000Z
|
2019-04-25T21:39:02.000Z
|
tests/applicationinsights_tests/exception_tests/__init__.py
|
Rebeccalau/ApplicationInsights-Python
|
cc91fede2d6d6c48acaa5687aa13ca491a17025a
|
[
"MIT"
] | 59
|
2015-04-19T13:34:52.000Z
|
2019-04-25T21:04:02.000Z
|
from . import TestEnable
| 24
| 24
| 0.833333
|
from . import TestEnable
| true
| true
|
f71483376b99d2de664991b4779d8b43020d054e
| 888
|
py
|
Python
|
from_3b1b/on_hold/eop/reusables/eop_helpers.py
|
Tarang74/manim
|
df34d6fc0470916cfba63534b023addb69cdec9a
|
[
"MIT"
] | 1
|
2021-03-26T08:23:35.000Z
|
2021-03-26T08:23:35.000Z
|
from_3b1b/on_hold/eop/reusables/eop_helpers.py
|
Tarang74/manim
|
df34d6fc0470916cfba63534b023addb69cdec9a
|
[
"MIT"
] | null | null | null |
from_3b1b/on_hold/eop/reusables/eop_helpers.py
|
Tarang74/manim
|
df34d6fc0470916cfba63534b023addb69cdec9a
|
[
"MIT"
] | null | null | null |
from utils.color import *
from active_projects.eop.reusables.eop_constants import *
def binary(i):
# returns an array of 0s and 1s
if i == 0:
return []
j = i
binary_array = []
while j > 0:
jj = j / 2
if jj > 0:
binary_array.append(j % 2)
else:
binary_array.append(1)
j = jj
return binary_array[::-1]
def nb_of_ones(i):
return binary(i).count(1)
def rainbow_color(alpha):
nb_colors = 100
rainbow = color_gradient([RED, ORANGE, YELLOW, GREEN, BLUE, PURPLE],
nb_colors)
rainbow = np.append(rainbow, PURPLE)
index = int(alpha * nb_colors)
return rainbow[index]
def graded_color(n, k):
if n != 0:
alpha = float(k) / n
else:
alpha = 0.5
color = interpolate_color(GRADE_COLOR_1, GRADE_COLOR_2, alpha)
return color
| 21.658537
| 72
| 0.574324
|
from utils.color import *
from active_projects.eop.reusables.eop_constants import *
def binary(i):
if i == 0:
return []
j = i
binary_array = []
while j > 0:
jj = j / 2
if jj > 0:
binary_array.append(j % 2)
else:
binary_array.append(1)
j = jj
return binary_array[::-1]
def nb_of_ones(i):
return binary(i).count(1)
def rainbow_color(alpha):
nb_colors = 100
rainbow = color_gradient([RED, ORANGE, YELLOW, GREEN, BLUE, PURPLE],
nb_colors)
rainbow = np.append(rainbow, PURPLE)
index = int(alpha * nb_colors)
return rainbow[index]
def graded_color(n, k):
if n != 0:
alpha = float(k) / n
else:
alpha = 0.5
color = interpolate_color(GRADE_COLOR_1, GRADE_COLOR_2, alpha)
return color
| true
| true
|
f714837c409dd8d89eb62bb58f903782ce4acb43
| 5,005
|
py
|
Python
|
infomercial/exp/softmeta_bandit.py
|
CoAxLab/infomercial
|
fa5d1c1e5c1351735dda2961a2a94f71cd17e270
|
[
"MIT"
] | 4
|
2019-11-14T03:13:25.000Z
|
2021-01-04T17:30:23.000Z
|
infomercial/exp/softmeta_bandit.py
|
CoAxLab/infomercial
|
fa5d1c1e5c1351735dda2961a2a94f71cd17e270
|
[
"MIT"
] | null | null | null |
infomercial/exp/softmeta_bandit.py
|
CoAxLab/infomercial
|
fa5d1c1e5c1351735dda2961a2a94f71cd17e270
|
[
"MIT"
] | null | null | null |
import os
import fire
import gym
import numpy as np
from scipy.special import softmax
from noboard.csv import SummaryWriter
from copy import deepcopy
from scipy.stats import entropy
from collections import OrderedDict
from infomercial.distance import kl
from infomercial.memory import DiscreteDistribution
from infomercial.models import Critic
from infomercial.models import SoftmaxActor
from infomercial.utils import estimate_regret
from infomercial.utils import load_checkpoint
from infomercial.utils import save_checkpoint
def R_update(state, reward, critic, lr):
"""Really simple TD learning"""
update = lr * (reward - critic(state))
critic.update(state, update)
return critic
def E_update(state, value, critic, lr):
"""Bellman update"""
update = lr * value
critic.replace(state, update)
return critic
def R_homeostasis(reward, total_reward, set_point):
"""Update reward value assuming homeostatic value.
Value based on Keramati and Gutkin, 2014.
https://elifesciences.org/articles/04811
"""
deviance_last = np.abs(set_point - total_reward)
deviance = np.abs(set_point - (total_reward + reward))
reward_value = deviance_last - deviance
return reward_value
def run(env_name='BanditOneHot10-v0',
num_episodes=1000,
temp=1.0,
tie_threshold=0.0,
tie_break=None,
lr_R=.1,
master_seed=42,
write_to_disk=True,
log_dir=None):
"""Bandit agent - softmax (E, R)"""
# --- Init ---
writer = SummaryWriter(log_dir=log_dir, write_to_disk=write_to_disk)
# -
env = gym.make(env_name)
env.seed(master_seed)
num_actions = env.action_space.n
all_actions = list(range(num_actions))
best_action = env.best
default_reward_value = 0
default_info_value = entropy(np.ones(num_actions) / num_actions)
E_t = default_info_value
R_t = default_reward_value
# --- Agents and memories ---
critic_R = Critic(num_actions, default_value=default_reward_value)
critic_E = Critic(num_actions, default_value=default_info_value)
actor_R = SoftmaxActor(num_actions, temp=temp, seed_value=master_seed)
actor_E = SoftmaxActor(num_actions, temp=temp, seed_value=master_seed)
memories = [DiscreteDistribution() for _ in range(num_actions)]
# -
num_best = 0
total_R = 0.0
total_E = 0.0
total_regret = 0.0
# ------------------------------------------------------------------------
for n in range(num_episodes):
env.reset()
# Meta-greed policy selection
if (E_t - tie_threshold) > R_t:
critic = critic_E
actor = actor_E
policy = 0
else:
critic = critic_R
actor = actor_R
policy = 1
# Choose an action; Choose a bandit
action = actor(list(critic.model.values()))
if action in best_action:
num_best += 1
# Est. regret and save it
regret = estimate_regret(all_actions, action, critic)
# Pull a lever.
state, R_t, _, _ = env.step(action)
R_t = R_homeostasis(R_t, total_R, num_episodes)
# Estimate E
old = deepcopy(memories[action])
memories[action].update((int(state), int(R_t)))
new = deepcopy(memories[action])
E_t = kl(new, old, default_info_value)
# Learning, both policies.
critic_R = R_update(action, R_t, critic_R, lr_R)
critic_E = E_update(action, E_t, critic_E, lr=1)
# Log data
writer.add_scalar("policy", policy, n)
writer.add_scalar("state", int(state), n)
writer.add_scalar("action", action, n)
writer.add_scalar("regret", regret, n)
writer.add_scalar("score_E", E_t, n)
writer.add_scalar("score_R", R_t, n)
writer.add_scalar("value_E", critic_E(action), n)
writer.add_scalar("value_R", critic_R(action), n)
total_E += E_t
total_R += R_t
total_regret += regret
writer.add_scalar("total_regret", total_regret, n)
writer.add_scalar("total_E", total_E, n)
writer.add_scalar("total_R", total_R, n)
writer.add_scalar("p_bests", num_best / (n + 1), n)
# -- Build the final result, and save or return it ---
writer.close()
result = dict(best=env.best,
num_episodes=num_episodes,
temp=temp,
tie_threshold=tie_threshold,
critic_E=critic_E.state_dict(),
critic_R=critic_R.state_dict(),
total_E=total_E,
total_R=total_R,
total_regret=total_regret,
env_name=env_name,
lr_R=lr_R,
master_seed=master_seed)
if write_to_disk:
save_checkpoint(result,
filename=os.path.join(writer.log_dir, "result.pkl"))
return result
if __name__ == "__main__":
fire.Fire(run)
| 29.441176
| 78
| 0.624575
|
import os
import fire
import gym
import numpy as np
from scipy.special import softmax
from noboard.csv import SummaryWriter
from copy import deepcopy
from scipy.stats import entropy
from collections import OrderedDict
from infomercial.distance import kl
from infomercial.memory import DiscreteDistribution
from infomercial.models import Critic
from infomercial.models import SoftmaxActor
from infomercial.utils import estimate_regret
from infomercial.utils import load_checkpoint
from infomercial.utils import save_checkpoint
def R_update(state, reward, critic, lr):
update = lr * (reward - critic(state))
critic.update(state, update)
return critic
def E_update(state, value, critic, lr):
update = lr * value
critic.replace(state, update)
return critic
def R_homeostasis(reward, total_reward, set_point):
deviance_last = np.abs(set_point - total_reward)
deviance = np.abs(set_point - (total_reward + reward))
reward_value = deviance_last - deviance
return reward_value
def run(env_name='BanditOneHot10-v0',
num_episodes=1000,
temp=1.0,
tie_threshold=0.0,
tie_break=None,
lr_R=.1,
master_seed=42,
write_to_disk=True,
log_dir=None):
writer = SummaryWriter(log_dir=log_dir, write_to_disk=write_to_disk)
env = gym.make(env_name)
env.seed(master_seed)
num_actions = env.action_space.n
all_actions = list(range(num_actions))
best_action = env.best
default_reward_value = 0
default_info_value = entropy(np.ones(num_actions) / num_actions)
E_t = default_info_value
R_t = default_reward_value
critic_R = Critic(num_actions, default_value=default_reward_value)
critic_E = Critic(num_actions, default_value=default_info_value)
actor_R = SoftmaxActor(num_actions, temp=temp, seed_value=master_seed)
actor_E = SoftmaxActor(num_actions, temp=temp, seed_value=master_seed)
memories = [DiscreteDistribution() for _ in range(num_actions)]
num_best = 0
total_R = 0.0
total_E = 0.0
total_regret = 0.0
for n in range(num_episodes):
env.reset()
if (E_t - tie_threshold) > R_t:
critic = critic_E
actor = actor_E
policy = 0
else:
critic = critic_R
actor = actor_R
policy = 1
action = actor(list(critic.model.values()))
if action in best_action:
num_best += 1
regret = estimate_regret(all_actions, action, critic)
state, R_t, _, _ = env.step(action)
R_t = R_homeostasis(R_t, total_R, num_episodes)
old = deepcopy(memories[action])
memories[action].update((int(state), int(R_t)))
new = deepcopy(memories[action])
E_t = kl(new, old, default_info_value)
critic_R = R_update(action, R_t, critic_R, lr_R)
critic_E = E_update(action, E_t, critic_E, lr=1)
writer.add_scalar("policy", policy, n)
writer.add_scalar("state", int(state), n)
writer.add_scalar("action", action, n)
writer.add_scalar("regret", regret, n)
writer.add_scalar("score_E", E_t, n)
writer.add_scalar("score_R", R_t, n)
writer.add_scalar("value_E", critic_E(action), n)
writer.add_scalar("value_R", critic_R(action), n)
total_E += E_t
total_R += R_t
total_regret += regret
writer.add_scalar("total_regret", total_regret, n)
writer.add_scalar("total_E", total_E, n)
writer.add_scalar("total_R", total_R, n)
writer.add_scalar("p_bests", num_best / (n + 1), n)
writer.close()
result = dict(best=env.best,
num_episodes=num_episodes,
temp=temp,
tie_threshold=tie_threshold,
critic_E=critic_E.state_dict(),
critic_R=critic_R.state_dict(),
total_E=total_E,
total_R=total_R,
total_regret=total_regret,
env_name=env_name,
lr_R=lr_R,
master_seed=master_seed)
if write_to_disk:
save_checkpoint(result,
filename=os.path.join(writer.log_dir, "result.pkl"))
return result
if __name__ == "__main__":
fire.Fire(run)
| true
| true
|
f71483e95b29a8c96562e9039fa444d6ed47f2af
| 680
|
py
|
Python
|
powerdnsadmin/default_config.py
|
hivelocity/PowerDNS-Admin
|
0c9ccf0a7e75817d77fcbfabba58ad2c66c04afa
|
[
"MIT"
] | null | null | null |
powerdnsadmin/default_config.py
|
hivelocity/PowerDNS-Admin
|
0c9ccf0a7e75817d77fcbfabba58ad2c66c04afa
|
[
"MIT"
] | null | null | null |
powerdnsadmin/default_config.py
|
hivelocity/PowerDNS-Admin
|
0c9ccf0a7e75817d77fcbfabba58ad2c66c04afa
|
[
"MIT"
] | null | null | null |
import os
basedir = os.path.abspath(os.path.abspath(os.path.dirname(__file__)))
### BASIC APP CONFIG
SALT = '$2b$12$yLUMTIfl21FKJQpTkRQXCu'
SECRET_KEY = 'e951e5a1f4b94151b360f47edf596dd2'
BIND_ADDRESS = '0.0.0.0'
PORT = 9191
HSTS_ENABLED = False
### DATABASE CONFIG
SQLA_DB_USER = 'pdns'
#SQLA_DB_PASSWORD = 'changeme'
SQLA_DB_HOST = '10.42.42.204'
SQLA_DB_NAME = 'pdns'
SQLALCHEMY_TRACK_MODIFICATIONS = True
### DATBASE - MySQL
#SQLALCHEMY_DATABASE_URI = 'mysql://'+SQLA_DB_USER+':'+SQLA_DB_PASSWORD+'@'+SQLA_DB_HOST+'/'+SQLA_DB_NAME
### DATABSE - SQLite
# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'pdns.db')
# SAML Authnetication
SAML_ENABLED = False
| 26.153846
| 105
| 0.744118
|
import os
basedir = os.path.abspath(os.path.abspath(os.path.dirname(__file__)))
u'
SECRET_KEY = 'e951e5a1f4b94151b360f47edf596dd2'
BIND_ADDRESS = '0.0.0.0'
PORT = 9191
HSTS_ENABLED = False
T = '10.42.42.204'
SQLA_DB_NAME = 'pdns'
SQLALCHEMY_TRACK_MODIFICATIONS = True
| true
| true
|
f714845eb007f7f56466665f162e3ab47ac7470a
| 23,375
|
py
|
Python
|
tests/test_chargen.py
|
jderam/hyperborea3
|
c9a7aced16793f501f9befae15a47b07a8451edc
|
[
"MIT"
] | 2
|
2022-01-07T22:53:19.000Z
|
2022-02-01T07:46:13.000Z
|
tests/test_chargen.py
|
jderam/hyperborea-tools
|
c9a7aced16793f501f9befae15a47b07a8451edc
|
[
"MIT"
] | 60
|
2021-12-29T04:57:27.000Z
|
2022-02-12T09:50:55.000Z
|
tests/test_chargen.py
|
jderam/hyperborea-tools
|
c9a7aced16793f501f9befae15a47b07a8451edc
|
[
"MIT"
] | null | null | null |
import pytest
from hyperborea3.chargen import (
DBPATH,
ac_to_aac,
calculate_ac,
class_id_to_name,
get_alignment,
get_attr,
get_attr_mod,
get_caster_schools,
get_class_id_map,
get_class_level_data,
get_combat_matrix,
get_deity,
get_favoured_weapons,
get_gender,
get_hd,
get_level,
get_qualifying_classes,
get_race_id,
get_random_familiar,
get_random_spell,
get_save_bonuses,
get_spells,
get_starting_armour,
get_starting_gear,
get_starting_money,
get_starting_shield,
get_starting_weapons_melee,
get_starting_weapons_missile,
get_thief_skills,
get_turn_undead_matrix,
get_unskilled_weapon_penalty,
get_xp_to_next,
list_tables,
list_views,
roll_hit_points,
roll_stats,
)
from hyperborea3.valid_data import (
VALID_ABILITY_SCORES,
VALID_ABILITIES,
VALID_ALIGMENTS_SHORT,
VALID_CA,
VALID_CLASS_ID_MAP,
VALID_CLASS_IDS,
VALID_DEITIES,
VALID_DENOMINATIONS,
VALID_DICE_METHODS,
VALID_FA,
VALID_FAMILIARS,
VALID_FAVOURED_WEAPONS,
VALID_GENDERS,
VALID_GP,
VALID_HD_PLUS,
VALID_HD_QTY,
VALID_HD_SIZE,
VALID_LEVELS,
VALID_RACE_IDS,
VALID_SAVES,
VALID_SCHOOLS,
VALID_SCHOOLS_BY_CLASS_ID,
VALID_SPELL_LEVELS,
VALID_SQL_TABLES,
VALID_SQL_VIEWS,
VALID_TA,
VALID_UNSKILLED_PENALTIES,
)
def test_db():
assert DBPATH.is_file()
@pytest.mark.skip(
reason=(
"Currently failing on github "
"'sqlite3.OperationalError: no such table: sqlite_schema'"
)
)
def test_db_tables():
assert list_tables() == VALID_SQL_TABLES
@pytest.mark.skip(
reason=(
"Currently failing on github "
"'sqlite3.OperationalError: no such table: sqlite_schema'"
)
)
def test_db_views():
assert list_views() == VALID_SQL_VIEWS
def test_xp_to_next():
# if character is already at max level, should return None
level = 12
for class_id in VALID_CLASS_IDS:
xp_to_next = get_xp_to_next(class_id, level)
assert xp_to_next is None
def test_roll_stats():
for class_id in VALID_CLASS_IDS:
for i in range(100):
attr = roll_stats(method=6, class_id=class_id)
for stat in attr.keys():
assert stat in VALID_ABILITIES
assert attr[stat]["score"] in VALID_ABILITY_SCORES
for method in VALID_DICE_METHODS[:5]:
for i in range(1000):
attr = roll_stats(method=method)
for stat in attr.keys():
assert stat in VALID_ABILITIES
assert attr[stat]["score"] in VALID_ABILITY_SCORES
def test_get_class_id_map():
class_id_map = get_class_id_map()
assert class_id_map == VALID_CLASS_ID_MAP
@pytest.mark.parametrize(
"class_id,expected",
[(k, v) for k, v in VALID_CLASS_ID_MAP.items()],
)
def test_class_id_to_name(class_id: int, expected: str) -> None:
class_name = class_id_to_name(class_id)
assert class_name == expected
def test_get_qualifying_classes():
subclasses = True
for i in range(1000):
attr = get_attr()
qual_classes = get_qualifying_classes(attr, subclasses)
for c in qual_classes:
assert c in VALID_CLASS_IDS
subclasses = False
for i in range(1000):
attr = get_attr()
qual_classes = get_qualifying_classes(attr, subclasses)
for c in qual_classes:
assert c in range(1, 5)
def test_get_level():
for class_id in VALID_CLASS_IDS:
for xp in range(0, 1000000, 1000):
level = get_level(class_id, xp)
assert level in VALID_LEVELS
def test_get_race_id():
for i in range(1000):
race_id = get_race_id()
assert race_id in VALID_RACE_IDS
def test_get_gender():
for i in range(1000):
gender = get_gender()
assert gender in VALID_GENDERS
def test_get_save_bonuses():
for class_id in VALID_CLASS_IDS:
sv_bonus = get_save_bonuses(class_id)
for k, v in sv_bonus.items():
assert v in [0, 2]
# barbarians, berserkers, and paladins get +2 to all saves
if class_id in [5, 6, 9, 27]:
assert sum([v for v in sv_bonus.values()]) == 10
# all others get +2 to two saves
else:
assert sum([v for v in sv_bonus.values()]) == 4
def test_get_class_level_data():
for class_id in VALID_CLASS_IDS:
for level in VALID_LEVELS:
cl_data = get_class_level_data(class_id, level)
assert cl_data["fa"] in VALID_FA
assert cl_data["ca"] in VALID_CA
assert cl_data["ta"] in VALID_TA
assert cl_data["sv"] in VALID_SAVES
def test_get_hd():
for class_id in VALID_CLASS_IDS:
for level in VALID_LEVELS:
hd = get_hd(class_id, level)
qty = hd.split("d")[0]
# number of dice in 1-9
assert int(qty) in VALID_HD_QTY
part2 = hd.split("d")[1].split("+")
assert len(part2) in [1, 2]
# die size in d4, d6, d8, d10, d12
assert int(part2[0]) in VALID_HD_SIZE
if len(part2) == 2:
# +hp in 1,2,3; 2,4,6; 3,6,9; 4,8,12
assert int(part2[1]) in VALID_HD_PLUS
def test_roll_hit_points():
max_possible_hp = (10 * 12) + (12 * 3) # Barbarian
for class_id in VALID_CLASS_IDS:
for level in VALID_LEVELS:
for cn_score in VALID_ABILITY_SCORES:
mods = get_attr_mod("cn", cn_score)
hp_adj = mods["hp_adj"]
hp = roll_hit_points(class_id, level, hp_adj)
assert level <= hp <= max_possible_hp
def test_get_combat_matrix():
for fa in VALID_FA:
combat_matrix = get_combat_matrix(fa)
assert list(combat_matrix.keys()) == list(range(-9, 10))
assert combat_matrix[0] == 20 - fa
def test_starting_armour():
for class_id in VALID_CLASS_IDS:
armour = get_starting_armour(class_id)
assert list(armour.keys()) == [
"armour_id",
"armour_type",
"ac",
"dr",
"weight_class",
"mv",
"cost",
"weight",
"description",
]
def test_starting_shield():
for class_id in VALID_CLASS_IDS:
shield = get_starting_shield(class_id)
if class_id in [1, 9, 27]:
assert shield == {
"shield_id": 2,
"shield_type": "Large Shield",
"def_mod": 2,
"cost": 10,
"weight": 10,
}
elif class_id in [5, 7, 24, 26, 31, 32, 33]:
assert shield == {
"shield_id": 1,
"shield_type": "Small Shield",
"def_mod": 1,
"cost": 5,
"weight": 5,
}
else:
assert shield is None
def test_starting_weapons_melee():
for class_id in VALID_CLASS_IDS:
melee_weapons = get_starting_weapons_melee(class_id)
assert 1 <= len(melee_weapons) <= 3
def test_starting_weapons_missile():
for class_id in VALID_CLASS_IDS:
missile_weapons = get_starting_weapons_missile(class_id)
if class_id == 8:
assert len(missile_weapons) == 2
else:
assert len(missile_weapons) in [0, 1]
def test_unskilled_penalty():
for class_id in VALID_CLASS_IDS:
assert (
get_unskilled_weapon_penalty(class_id)
== VALID_UNSKILLED_PENALTIES[class_id]
)
def test_get_favoured_weapons():
for class_id in VALID_CLASS_IDS:
print(f"{class_id=}")
favoured_weapons = get_favoured_weapons(class_id)
actual_melee_wpn_ids = [
x["weapon_id"] for x in favoured_weapons["weapons_melee"]
]
actual_missile_wpn_ids = [
x["weapon_id"] for x in favoured_weapons["weapons_missile"]
]
expected = VALID_FAVOURED_WEAPONS[class_id]
assert favoured_weapons["any"] == expected["any"]
assert actual_melee_wpn_ids == expected["melee_wpns"]
assert actual_missile_wpn_ids == expected["missile_wpns"]
assert favoured_weapons["unskilled_penalty"] == expected["unskilled_penalty"]
def test_get_starting_gear():
for class_id in VALID_CLASS_IDS:
equip = get_starting_gear(class_id)
assert len(equip) > 0
for item in equip:
assert isinstance(item, str)
def test_get_starting_money():
for i in range(100):
money = get_starting_money()
assert list(money.keys()) == VALID_DENOMINATIONS
for k in VALID_DENOMINATIONS:
if k == "gp":
assert money[k] in VALID_GP
else:
assert money[k] == 0
def test_calculate_ac():
for class_id in VALID_CLASS_IDS:
armour = get_starting_armour(class_id)
shield = get_starting_shield(class_id)
shield_def_mod = shield["def_mod"] if shield is not None else 0
for dx_score in VALID_ABILITY_SCORES:
dx_mod = get_attr_mod("dx", dx_score)
ac = calculate_ac(
armour["ac"],
shield_def_mod,
dx_mod["def_adj"],
)
# all AC values for starting characters should be 1 to 11 (level 1)
# This may need updating after we include higher-level PCs,
# depending on if they have any abilities that improve AC
assert ac in range(
1, 12
), f"""invalid ac:
class_id = {class_id}
armour_ac = {armour["ac"]}
shield_def_mod = {shield_def_mod}
dx_score = {dx_score}
dx_def_adj = {dx_mod["def_adj"]}
ac = {ac}
"""
def test_ac_to_aac():
for ac in range(-10, 20):
aac = ac_to_aac(ac)
assert ac + aac == 19
def test_get_alignment():
for class_id in VALID_CLASS_IDS:
alignment = get_alignment(class_id)
if class_id in [1, 2, 3, 7, 8, 11, 13, 18, 19]:
allowed_alignments = ["CE", "CG", "LE", "LG", "N"]
elif class_id in [4, 24, 25, 26, 31]:
allowed_alignments = ["CE", "CG", "LE", "N"]
elif class_id == 10:
allowed_alignments = ["CG", "LG", "N"]
elif class_id in [14, 22, 30]:
allowed_alignments = ["CE", "LE", "N"]
elif class_id in [15, 16, 21, 23, 29, 32]:
allowed_alignments = ["CE", "CG", "N"]
elif class_id in [12, 28]:
allowed_alignments = ["LE", "LG", "N"]
elif class_id in [5, 6, 20]:
allowed_alignments = ["CE", "CG"]
elif class_id == 33:
allowed_alignments = ["LE", "N"]
elif class_id == 9:
allowed_alignments = ["LG"]
elif class_id == 27:
allowed_alignments = ["LE"]
elif class_id == 17:
allowed_alignments = ["N"]
else:
raise ValueError(f"Unexpected class_id: {class_id}")
assert (
alignment["short_name"] in allowed_alignments
), f"""
Unexpected alignment '{alignment}' not in
allowed values {allowed_alignments}
"""
@pytest.mark.repeat(20)
def test_get_deity():
for short_align in VALID_ALIGMENTS_SHORT:
deity = get_deity(short_align)
assert deity["deity_name"] in VALID_DEITIES
def test_get_thief_skills():
# classes without thief skills
for class_id in [
1,
2,
3,
7,
9,
11,
12,
13,
14,
15,
16,
17,
19,
20,
21,
27,
28,
29,
30,
]:
thief_skills = get_thief_skills(class_id, 1, 10, 10, 10)
assert (
thief_skills is None
), f"class_id: {class_id} is not supposed to have thief skills"
# level 1 thief with 10's
expected_thief_skills = [
{"thief_skill": "climb", "skill_name": "Climb", "skill_roll": 8, "stat": "dx"},
{
"thief_skill": "decipher_script",
"skill_name": "Decipher Script",
"skill_roll": 0,
"stat": "in",
},
{
"thief_skill": "discern_noise",
"skill_name": "Discern Noise",
"skill_roll": 4,
"stat": "ws",
},
{"thief_skill": "hide", "skill_name": "Hide", "skill_roll": 5, "stat": "dx"},
{
"thief_skill": "manipulate_traps",
"skill_name": "Manipulate Traps",
"skill_roll": 3,
"stat": "dx",
},
{
"thief_skill": "move_silently",
"skill_name": "Move Silently",
"skill_roll": 5,
"stat": "dx",
},
{
"thief_skill": "open_locks",
"skill_name": "Open Locks",
"skill_roll": 3,
"stat": "dx",
},
{
"thief_skill": "pick_pockets",
"skill_name": "Pick Pockets",
"skill_roll": 4,
"stat": "dx",
},
{
"thief_skill": "read_scrolls",
"skill_name": "Read Scrolls",
"skill_roll": None,
"stat": "in",
},
]
thief_skills = get_thief_skills(4, 1, 10, 10, 10)
assert thief_skills == expected_thief_skills
# level 1 thief with 16's
expected_thief_skills = [
{"thief_skill": "climb", "skill_name": "Climb", "skill_roll": 9, "stat": "dx"},
{
"thief_skill": "decipher_script",
"skill_name": "Decipher Script",
"skill_roll": 1,
"stat": "in",
},
{
"thief_skill": "discern_noise",
"skill_name": "Discern Noise",
"skill_roll": 5,
"stat": "ws",
},
{"thief_skill": "hide", "skill_name": "Hide", "skill_roll": 6, "stat": "dx"},
{
"thief_skill": "manipulate_traps",
"skill_name": "Manipulate Traps",
"skill_roll": 4,
"stat": "dx",
},
{
"thief_skill": "move_silently",
"skill_name": "Move Silently",
"skill_roll": 6,
"stat": "dx",
},
{
"thief_skill": "open_locks",
"skill_name": "Open Locks",
"skill_roll": 4,
"stat": "dx",
},
{
"thief_skill": "pick_pockets",
"skill_name": "Pick Pockets",
"skill_roll": 5,
"stat": "dx",
},
{
"thief_skill": "read_scrolls",
"skill_name": "Read Scrolls",
"skill_roll": None,
"stat": "in",
},
]
thief_skills = get_thief_skills(4, 1, 16, 16, 16)
assert thief_skills == expected_thief_skills
# level 12 thief with 10's
expected_thief_skills = [
{"thief_skill": "climb", "skill_name": "Climb", "skill_roll": 10, "stat": "dx"},
{
"thief_skill": "decipher_script",
"skill_name": "Decipher Script",
"skill_roll": 5,
"stat": "in",
},
{
"thief_skill": "discern_noise",
"skill_name": "Discern Noise",
"skill_roll": 9,
"stat": "ws",
},
{"thief_skill": "hide", "skill_name": "Hide", "skill_roll": 10, "stat": "dx"},
{
"thief_skill": "manipulate_traps",
"skill_name": "Manipulate Traps",
"skill_roll": 8,
"stat": "dx",
},
{
"thief_skill": "move_silently",
"skill_name": "Move Silently",
"skill_roll": 10,
"stat": "dx",
},
{
"thief_skill": "open_locks",
"skill_name": "Open Locks",
"skill_roll": 8,
"stat": "dx",
},
{
"thief_skill": "pick_pockets",
"skill_name": "Pick Pockets",
"skill_roll": 9,
"stat": "dx",
},
{
"thief_skill": "read_scrolls",
"skill_name": "Read Scrolls",
"skill_roll": 5,
"stat": "in",
},
]
thief_skills = get_thief_skills(4, 12, 10, 10, 10)
assert thief_skills == expected_thief_skills
# level 12 thief with 16's
expected_thief_skills = [
{"thief_skill": "climb", "skill_name": "Climb", "skill_roll": 11, "stat": "dx"},
{
"thief_skill": "decipher_script",
"skill_name": "Decipher Script",
"skill_roll": 6,
"stat": "in",
},
{
"thief_skill": "discern_noise",
"skill_name": "Discern Noise",
"skill_roll": 10,
"stat": "ws",
},
{"thief_skill": "hide", "skill_name": "Hide", "skill_roll": 11, "stat": "dx"},
{
"thief_skill": "manipulate_traps",
"skill_name": "Manipulate Traps",
"skill_roll": 9,
"stat": "dx",
},
{
"thief_skill": "move_silently",
"skill_name": "Move Silently",
"skill_roll": 11,
"stat": "dx",
},
{
"thief_skill": "open_locks",
"skill_name": "Open Locks",
"skill_roll": 9,
"stat": "dx",
},
{
"thief_skill": "pick_pockets",
"skill_name": "Pick Pockets",
"skill_roll": 10,
"stat": "dx",
},
{
"thief_skill": "read_scrolls",
"skill_name": "Read Scrolls",
"skill_roll": 6,
"stat": "in",
},
]
thief_skills = get_thief_skills(4, 12, 16, 16, 16)
assert thief_skills == expected_thief_skills
def test_get_caster_schools():
for class_id in VALID_CLASS_IDS:
schools = get_caster_schools(class_id)
if class_id == 21:
assert schools in [
["clr", "mag"],
["clr", "nec"],
["drd", "mag"],
["drd", "nec"],
]
else:
assert schools == VALID_SCHOOLS_BY_CLASS_ID[class_id]
def test_get_turn_undead_matrix():
for ta in VALID_TA:
for turn_adj in [-1, 0, 1]:
turn_undead_matrix = get_turn_undead_matrix(ta, turn_adj)
if ta == 0:
assert turn_undead_matrix is None
if ta == 1 and turn_adj == -1:
assert turn_undead_matrix == {
"undead_type_00": "9:12",
"undead_type_01": "6:12",
"undead_type_02": "3:12",
"undead_type_03": "NT",
"undead_type_04": "NT",
"undead_type_05": "NT",
"undead_type_06": "NT",
"undead_type_07": "NT",
"undead_type_08": "NT",
"undead_type_09": "NT",
"undead_type_10": "NT",
"undead_type_11": "NT",
"undead_type_12": "NT",
"undead_type_13": "NT",
}
if ta == 1 and turn_adj == 0:
assert turn_undead_matrix == {
"undead_type_00": "10:12",
"undead_type_01": "7:12",
"undead_type_02": "4:12",
"undead_type_03": "1:12",
"undead_type_04": "NT",
"undead_type_05": "NT",
"undead_type_06": "NT",
"undead_type_07": "NT",
"undead_type_08": "NT",
"undead_type_09": "NT",
"undead_type_10": "NT",
"undead_type_11": "NT",
"undead_type_12": "NT",
"undead_type_13": "NT",
}
if ta == 12 and turn_adj == 0:
assert turn_undead_matrix == {
"undead_type_00": "UD",
"undead_type_01": "UD",
"undead_type_02": "UD",
"undead_type_03": "UD",
"undead_type_04": "UD",
"undead_type_05": "UD",
"undead_type_06": "D",
"undead_type_07": "D",
"undead_type_08": "D",
"undead_type_09": "T",
"undead_type_10": "T",
"undead_type_11": "10:12",
"undead_type_12": "7:12",
"undead_type_13": "4:12",
}
if ta == 12 and turn_adj == 1:
assert turn_undead_matrix == {
"undead_type_00": "UD",
"undead_type_01": "UD",
"undead_type_02": "UD",
"undead_type_03": "UD",
"undead_type_04": "UD",
"undead_type_05": "UD",
"undead_type_06": "D",
"undead_type_07": "D",
"undead_type_08": "D",
"undead_type_09": "T",
"undead_type_10": "T",
"undead_type_11": "11:12",
"undead_type_12": "8:12",
"undead_type_13": "5:12",
}
def test_spell_data():
for school in VALID_SCHOOLS:
for spell_level in VALID_SPELL_LEVELS:
for d100_roll in range(1, 101):
spell = get_random_spell(school, spell_level, d100_roll)
assert spell is not None
def test_get_random_spell():
for school in VALID_SCHOOLS:
for spell_level in VALID_SPELL_LEVELS:
for i in range(1000):
spell = get_random_spell(school, spell_level)
assert spell["school"] == school
assert spell["spell_level"] == spell_level
assert spell["reversible"] in [None, True, False]
def test_get_spells():
for class_id in VALID_CLASS_IDS:
for level in VALID_LEVELS:
cl_data = get_class_level_data(class_id, level)
ca = cl_data["ca"]
spells = get_spells(class_id, level, ca)
if ca > 0:
assert spells, f"{class_id=} {level=} {spells=}"
schools = list(spells.keys())
else:
schools = []
if ca > 1 and class_id != 21:
assert schools == VALID_SCHOOLS_BY_CLASS_ID[class_id]
elif class_id == 21:
assert schools in [
["clr", "mag"],
["clr", "nec"],
["drd", "mag"],
["drd", "nec"],
]
# classes without spells
if ca == 0:
assert spells is None, f"{class_id=} {level=}"
# classes with no spells at early levels
def test_get_random_familiar():
for i in range(1000):
animal = get_random_familiar()
assert animal in VALID_FAMILIARS, f"{animal=} not in {VALID_FAMILIARS}"
| 30.47588
| 88
| 0.515722
|
import pytest
from hyperborea3.chargen import (
DBPATH,
ac_to_aac,
calculate_ac,
class_id_to_name,
get_alignment,
get_attr,
get_attr_mod,
get_caster_schools,
get_class_id_map,
get_class_level_data,
get_combat_matrix,
get_deity,
get_favoured_weapons,
get_gender,
get_hd,
get_level,
get_qualifying_classes,
get_race_id,
get_random_familiar,
get_random_spell,
get_save_bonuses,
get_spells,
get_starting_armour,
get_starting_gear,
get_starting_money,
get_starting_shield,
get_starting_weapons_melee,
get_starting_weapons_missile,
get_thief_skills,
get_turn_undead_matrix,
get_unskilled_weapon_penalty,
get_xp_to_next,
list_tables,
list_views,
roll_hit_points,
roll_stats,
)
from hyperborea3.valid_data import (
VALID_ABILITY_SCORES,
VALID_ABILITIES,
VALID_ALIGMENTS_SHORT,
VALID_CA,
VALID_CLASS_ID_MAP,
VALID_CLASS_IDS,
VALID_DEITIES,
VALID_DENOMINATIONS,
VALID_DICE_METHODS,
VALID_FA,
VALID_FAMILIARS,
VALID_FAVOURED_WEAPONS,
VALID_GENDERS,
VALID_GP,
VALID_HD_PLUS,
VALID_HD_QTY,
VALID_HD_SIZE,
VALID_LEVELS,
VALID_RACE_IDS,
VALID_SAVES,
VALID_SCHOOLS,
VALID_SCHOOLS_BY_CLASS_ID,
VALID_SPELL_LEVELS,
VALID_SQL_TABLES,
VALID_SQL_VIEWS,
VALID_TA,
VALID_UNSKILLED_PENALTIES,
)
def test_db():
assert DBPATH.is_file()
@pytest.mark.skip(
reason=(
"Currently failing on github "
"'sqlite3.OperationalError: no such table: sqlite_schema'"
)
)
def test_db_tables():
assert list_tables() == VALID_SQL_TABLES
@pytest.mark.skip(
reason=(
"Currently failing on github "
"'sqlite3.OperationalError: no such table: sqlite_schema'"
)
)
def test_db_views():
assert list_views() == VALID_SQL_VIEWS
def test_xp_to_next():
level = 12
for class_id in VALID_CLASS_IDS:
xp_to_next = get_xp_to_next(class_id, level)
assert xp_to_next is None
def test_roll_stats():
for class_id in VALID_CLASS_IDS:
for i in range(100):
attr = roll_stats(method=6, class_id=class_id)
for stat in attr.keys():
assert stat in VALID_ABILITIES
assert attr[stat]["score"] in VALID_ABILITY_SCORES
for method in VALID_DICE_METHODS[:5]:
for i in range(1000):
attr = roll_stats(method=method)
for stat in attr.keys():
assert stat in VALID_ABILITIES
assert attr[stat]["score"] in VALID_ABILITY_SCORES
def test_get_class_id_map():
class_id_map = get_class_id_map()
assert class_id_map == VALID_CLASS_ID_MAP
@pytest.mark.parametrize(
"class_id,expected",
[(k, v) for k, v in VALID_CLASS_ID_MAP.items()],
)
def test_class_id_to_name(class_id: int, expected: str) -> None:
class_name = class_id_to_name(class_id)
assert class_name == expected
def test_get_qualifying_classes():
subclasses = True
for i in range(1000):
attr = get_attr()
qual_classes = get_qualifying_classes(attr, subclasses)
for c in qual_classes:
assert c in VALID_CLASS_IDS
subclasses = False
for i in range(1000):
attr = get_attr()
qual_classes = get_qualifying_classes(attr, subclasses)
for c in qual_classes:
assert c in range(1, 5)
def test_get_level():
for class_id in VALID_CLASS_IDS:
for xp in range(0, 1000000, 1000):
level = get_level(class_id, xp)
assert level in VALID_LEVELS
def test_get_race_id():
for i in range(1000):
race_id = get_race_id()
assert race_id in VALID_RACE_IDS
def test_get_gender():
for i in range(1000):
gender = get_gender()
assert gender in VALID_GENDERS
def test_get_save_bonuses():
for class_id in VALID_CLASS_IDS:
sv_bonus = get_save_bonuses(class_id)
for k, v in sv_bonus.items():
assert v in [0, 2]
if class_id in [5, 6, 9, 27]:
assert sum([v for v in sv_bonus.values()]) == 10
else:
assert sum([v for v in sv_bonus.values()]) == 4
def test_get_class_level_data():
for class_id in VALID_CLASS_IDS:
for level in VALID_LEVELS:
cl_data = get_class_level_data(class_id, level)
assert cl_data["fa"] in VALID_FA
assert cl_data["ca"] in VALID_CA
assert cl_data["ta"] in VALID_TA
assert cl_data["sv"] in VALID_SAVES
def test_get_hd():
for class_id in VALID_CLASS_IDS:
for level in VALID_LEVELS:
hd = get_hd(class_id, level)
qty = hd.split("d")[0]
assert int(qty) in VALID_HD_QTY
part2 = hd.split("d")[1].split("+")
assert len(part2) in [1, 2]
assert int(part2[0]) in VALID_HD_SIZE
if len(part2) == 2:
assert int(part2[1]) in VALID_HD_PLUS
def test_roll_hit_points():
max_possible_hp = (10 * 12) + (12 * 3)
for class_id in VALID_CLASS_IDS:
for level in VALID_LEVELS:
for cn_score in VALID_ABILITY_SCORES:
mods = get_attr_mod("cn", cn_score)
hp_adj = mods["hp_adj"]
hp = roll_hit_points(class_id, level, hp_adj)
assert level <= hp <= max_possible_hp
def test_get_combat_matrix():
for fa in VALID_FA:
combat_matrix = get_combat_matrix(fa)
assert list(combat_matrix.keys()) == list(range(-9, 10))
assert combat_matrix[0] == 20 - fa
def test_starting_armour():
for class_id in VALID_CLASS_IDS:
armour = get_starting_armour(class_id)
assert list(armour.keys()) == [
"armour_id",
"armour_type",
"ac",
"dr",
"weight_class",
"mv",
"cost",
"weight",
"description",
]
def test_starting_shield():
for class_id in VALID_CLASS_IDS:
shield = get_starting_shield(class_id)
if class_id in [1, 9, 27]:
assert shield == {
"shield_id": 2,
"shield_type": "Large Shield",
"def_mod": 2,
"cost": 10,
"weight": 10,
}
elif class_id in [5, 7, 24, 26, 31, 32, 33]:
assert shield == {
"shield_id": 1,
"shield_type": "Small Shield",
"def_mod": 1,
"cost": 5,
"weight": 5,
}
else:
assert shield is None
def test_starting_weapons_melee():
for class_id in VALID_CLASS_IDS:
melee_weapons = get_starting_weapons_melee(class_id)
assert 1 <= len(melee_weapons) <= 3
def test_starting_weapons_missile():
for class_id in VALID_CLASS_IDS:
missile_weapons = get_starting_weapons_missile(class_id)
if class_id == 8:
assert len(missile_weapons) == 2
else:
assert len(missile_weapons) in [0, 1]
def test_unskilled_penalty():
for class_id in VALID_CLASS_IDS:
assert (
get_unskilled_weapon_penalty(class_id)
== VALID_UNSKILLED_PENALTIES[class_id]
)
def test_get_favoured_weapons():
for class_id in VALID_CLASS_IDS:
print(f"{class_id=}")
favoured_weapons = get_favoured_weapons(class_id)
actual_melee_wpn_ids = [
x["weapon_id"] for x in favoured_weapons["weapons_melee"]
]
actual_missile_wpn_ids = [
x["weapon_id"] for x in favoured_weapons["weapons_missile"]
]
expected = VALID_FAVOURED_WEAPONS[class_id]
assert favoured_weapons["any"] == expected["any"]
assert actual_melee_wpn_ids == expected["melee_wpns"]
assert actual_missile_wpn_ids == expected["missile_wpns"]
assert favoured_weapons["unskilled_penalty"] == expected["unskilled_penalty"]
def test_get_starting_gear():
for class_id in VALID_CLASS_IDS:
equip = get_starting_gear(class_id)
assert len(equip) > 0
for item in equip:
assert isinstance(item, str)
def test_get_starting_money():
for i in range(100):
money = get_starting_money()
assert list(money.keys()) == VALID_DENOMINATIONS
for k in VALID_DENOMINATIONS:
if k == "gp":
assert money[k] in VALID_GP
else:
assert money[k] == 0
def test_calculate_ac():
for class_id in VALID_CLASS_IDS:
armour = get_starting_armour(class_id)
shield = get_starting_shield(class_id)
shield_def_mod = shield["def_mod"] if shield is not None else 0
for dx_score in VALID_ABILITY_SCORES:
dx_mod = get_attr_mod("dx", dx_score)
ac = calculate_ac(
armour["ac"],
shield_def_mod,
dx_mod["def_adj"],
)
assert ac in range(
1, 12
), f"""invalid ac:
class_id = {class_id}
armour_ac = {armour["ac"]}
shield_def_mod = {shield_def_mod}
dx_score = {dx_score}
dx_def_adj = {dx_mod["def_adj"]}
ac = {ac}
"""
def test_ac_to_aac():
for ac in range(-10, 20):
aac = ac_to_aac(ac)
assert ac + aac == 19
def test_get_alignment():
for class_id in VALID_CLASS_IDS:
alignment = get_alignment(class_id)
if class_id in [1, 2, 3, 7, 8, 11, 13, 18, 19]:
allowed_alignments = ["CE", "CG", "LE", "LG", "N"]
elif class_id in [4, 24, 25, 26, 31]:
allowed_alignments = ["CE", "CG", "LE", "N"]
elif class_id == 10:
allowed_alignments = ["CG", "LG", "N"]
elif class_id in [14, 22, 30]:
allowed_alignments = ["CE", "LE", "N"]
elif class_id in [15, 16, 21, 23, 29, 32]:
allowed_alignments = ["CE", "CG", "N"]
elif class_id in [12, 28]:
allowed_alignments = ["LE", "LG", "N"]
elif class_id in [5, 6, 20]:
allowed_alignments = ["CE", "CG"]
elif class_id == 33:
allowed_alignments = ["LE", "N"]
elif class_id == 9:
allowed_alignments = ["LG"]
elif class_id == 27:
allowed_alignments = ["LE"]
elif class_id == 17:
allowed_alignments = ["N"]
else:
raise ValueError(f"Unexpected class_id: {class_id}")
assert (
alignment["short_name"] in allowed_alignments
), f"""
Unexpected alignment '{alignment}' not in
allowed values {allowed_alignments}
"""
@pytest.mark.repeat(20)
def test_get_deity():
for short_align in VALID_ALIGMENTS_SHORT:
deity = get_deity(short_align)
assert deity["deity_name"] in VALID_DEITIES
def test_get_thief_skills():
for class_id in [
1,
2,
3,
7,
9,
11,
12,
13,
14,
15,
16,
17,
19,
20,
21,
27,
28,
29,
30,
]:
thief_skills = get_thief_skills(class_id, 1, 10, 10, 10)
assert (
thief_skills is None
), f"class_id: {class_id} is not supposed to have thief skills"
expected_thief_skills = [
{"thief_skill": "climb", "skill_name": "Climb", "skill_roll": 8, "stat": "dx"},
{
"thief_skill": "decipher_script",
"skill_name": "Decipher Script",
"skill_roll": 0,
"stat": "in",
},
{
"thief_skill": "discern_noise",
"skill_name": "Discern Noise",
"skill_roll": 4,
"stat": "ws",
},
{"thief_skill": "hide", "skill_name": "Hide", "skill_roll": 5, "stat": "dx"},
{
"thief_skill": "manipulate_traps",
"skill_name": "Manipulate Traps",
"skill_roll": 3,
"stat": "dx",
},
{
"thief_skill": "move_silently",
"skill_name": "Move Silently",
"skill_roll": 5,
"stat": "dx",
},
{
"thief_skill": "open_locks",
"skill_name": "Open Locks",
"skill_roll": 3,
"stat": "dx",
},
{
"thief_skill": "pick_pockets",
"skill_name": "Pick Pockets",
"skill_roll": 4,
"stat": "dx",
},
{
"thief_skill": "read_scrolls",
"skill_name": "Read Scrolls",
"skill_roll": None,
"stat": "in",
},
]
thief_skills = get_thief_skills(4, 1, 10, 10, 10)
assert thief_skills == expected_thief_skills
# level 1 thief with 16's
expected_thief_skills = [
{"thief_skill": "climb", "skill_name": "Climb", "skill_roll": 9, "stat": "dx"},
{
"thief_skill": "decipher_script",
"skill_name": "Decipher Script",
"skill_roll": 1,
"stat": "in",
},
{
"thief_skill": "discern_noise",
"skill_name": "Discern Noise",
"skill_roll": 5,
"stat": "ws",
},
{"thief_skill": "hide", "skill_name": "Hide", "skill_roll": 6, "stat": "dx"},
{
"thief_skill": "manipulate_traps",
"skill_name": "Manipulate Traps",
"skill_roll": 4,
"stat": "dx",
},
{
"thief_skill": "move_silently",
"skill_name": "Move Silently",
"skill_roll": 6,
"stat": "dx",
},
{
"thief_skill": "open_locks",
"skill_name": "Open Locks",
"skill_roll": 4,
"stat": "dx",
},
{
"thief_skill": "pick_pockets",
"skill_name": "Pick Pockets",
"skill_roll": 5,
"stat": "dx",
},
{
"thief_skill": "read_scrolls",
"skill_name": "Read Scrolls",
"skill_roll": None,
"stat": "in",
},
]
thief_skills = get_thief_skills(4, 1, 16, 16, 16)
assert thief_skills == expected_thief_skills
expected_thief_skills = [
{"thief_skill": "climb", "skill_name": "Climb", "skill_roll": 10, "stat": "dx"},
{
"thief_skill": "decipher_script",
"skill_name": "Decipher Script",
"skill_roll": 5,
"stat": "in",
},
{
"thief_skill": "discern_noise",
"skill_name": "Discern Noise",
"skill_roll": 9,
"stat": "ws",
},
{"thief_skill": "hide", "skill_name": "Hide", "skill_roll": 10, "stat": "dx"},
{
"thief_skill": "manipulate_traps",
"skill_name": "Manipulate Traps",
"skill_roll": 8,
"stat": "dx",
},
{
"thief_skill": "move_silently",
"skill_name": "Move Silently",
"skill_roll": 10,
"stat": "dx",
},
{
"thief_skill": "open_locks",
"skill_name": "Open Locks",
"skill_roll": 8,
"stat": "dx",
},
{
"thief_skill": "pick_pockets",
"skill_name": "Pick Pockets",
"skill_roll": 9,
"stat": "dx",
},
{
"thief_skill": "read_scrolls",
"skill_name": "Read Scrolls",
"skill_roll": 5,
"stat": "in",
},
]
thief_skills = get_thief_skills(4, 12, 10, 10, 10)
assert thief_skills == expected_thief_skills
# level 12 thief with 16's
expected_thief_skills = [
{"thief_skill": "climb", "skill_name": "Climb", "skill_roll": 11, "stat": "dx"},
{
"thief_skill": "decipher_script",
"skill_name": "Decipher Script",
"skill_roll": 6,
"stat": "in",
},
{
"thief_skill": "discern_noise",
"skill_name": "Discern Noise",
"skill_roll": 10,
"stat": "ws",
},
{"thief_skill": "hide", "skill_name": "Hide", "skill_roll": 11, "stat": "dx"},
{
"thief_skill": "manipulate_traps",
"skill_name": "Manipulate Traps",
"skill_roll": 9,
"stat": "dx",
},
{
"thief_skill": "move_silently",
"skill_name": "Move Silently",
"skill_roll": 11,
"stat": "dx",
},
{
"thief_skill": "open_locks",
"skill_name": "Open Locks",
"skill_roll": 9,
"stat": "dx",
},
{
"thief_skill": "pick_pockets",
"skill_name": "Pick Pockets",
"skill_roll": 10,
"stat": "dx",
},
{
"thief_skill": "read_scrolls",
"skill_name": "Read Scrolls",
"skill_roll": 6,
"stat": "in",
},
]
thief_skills = get_thief_skills(4, 12, 16, 16, 16)
assert thief_skills == expected_thief_skills
def test_get_caster_schools():
for class_id in VALID_CLASS_IDS:
schools = get_caster_schools(class_id)
if class_id == 21:
assert schools in [
["clr", "mag"],
["clr", "nec"],
["drd", "mag"],
["drd", "nec"],
]
else:
assert schools == VALID_SCHOOLS_BY_CLASS_ID[class_id]
def test_get_turn_undead_matrix():
for ta in VALID_TA:
for turn_adj in [-1, 0, 1]:
turn_undead_matrix = get_turn_undead_matrix(ta, turn_adj)
if ta == 0:
assert turn_undead_matrix is None
if ta == 1 and turn_adj == -1:
assert turn_undead_matrix == {
"undead_type_00": "9:12",
"undead_type_01": "6:12",
"undead_type_02": "3:12",
"undead_type_03": "NT",
"undead_type_04": "NT",
"undead_type_05": "NT",
"undead_type_06": "NT",
"undead_type_07": "NT",
"undead_type_08": "NT",
"undead_type_09": "NT",
"undead_type_10": "NT",
"undead_type_11": "NT",
"undead_type_12": "NT",
"undead_type_13": "NT",
}
if ta == 1 and turn_adj == 0:
assert turn_undead_matrix == {
"undead_type_00": "10:12",
"undead_type_01": "7:12",
"undead_type_02": "4:12",
"undead_type_03": "1:12",
"undead_type_04": "NT",
"undead_type_05": "NT",
"undead_type_06": "NT",
"undead_type_07": "NT",
"undead_type_08": "NT",
"undead_type_09": "NT",
"undead_type_10": "NT",
"undead_type_11": "NT",
"undead_type_12": "NT",
"undead_type_13": "NT",
}
if ta == 12 and turn_adj == 0:
assert turn_undead_matrix == {
"undead_type_00": "UD",
"undead_type_01": "UD",
"undead_type_02": "UD",
"undead_type_03": "UD",
"undead_type_04": "UD",
"undead_type_05": "UD",
"undead_type_06": "D",
"undead_type_07": "D",
"undead_type_08": "D",
"undead_type_09": "T",
"undead_type_10": "T",
"undead_type_11": "10:12",
"undead_type_12": "7:12",
"undead_type_13": "4:12",
}
if ta == 12 and turn_adj == 1:
assert turn_undead_matrix == {
"undead_type_00": "UD",
"undead_type_01": "UD",
"undead_type_02": "UD",
"undead_type_03": "UD",
"undead_type_04": "UD",
"undead_type_05": "UD",
"undead_type_06": "D",
"undead_type_07": "D",
"undead_type_08": "D",
"undead_type_09": "T",
"undead_type_10": "T",
"undead_type_11": "11:12",
"undead_type_12": "8:12",
"undead_type_13": "5:12",
}
def test_spell_data():
for school in VALID_SCHOOLS:
for spell_level in VALID_SPELL_LEVELS:
for d100_roll in range(1, 101):
spell = get_random_spell(school, spell_level, d100_roll)
assert spell is not None
def test_get_random_spell():
for school in VALID_SCHOOLS:
for spell_level in VALID_SPELL_LEVELS:
for i in range(1000):
spell = get_random_spell(school, spell_level)
assert spell["school"] == school
assert spell["spell_level"] == spell_level
assert spell["reversible"] in [None, True, False]
def test_get_spells():
for class_id in VALID_CLASS_IDS:
for level in VALID_LEVELS:
cl_data = get_class_level_data(class_id, level)
ca = cl_data["ca"]
spells = get_spells(class_id, level, ca)
if ca > 0:
assert spells, f"{class_id=} {level=} {spells=}"
schools = list(spells.keys())
else:
schools = []
if ca > 1 and class_id != 21:
assert schools == VALID_SCHOOLS_BY_CLASS_ID[class_id]
elif class_id == 21:
assert schools in [
["clr", "mag"],
["clr", "nec"],
["drd", "mag"],
["drd", "nec"],
]
if ca == 0:
assert spells is None, f"{class_id=} {level=}"
def test_get_random_familiar():
for i in range(1000):
animal = get_random_familiar()
assert animal in VALID_FAMILIARS, f"{animal=} not in {VALID_FAMILIARS}"
| true
| true
|
f71484d9b44030ff88de3d344a897e3b616df745
| 3,140
|
py
|
Python
|
4_forthproject/forthproject/settings.py
|
merry-hyelyn/LIKE_LION
|
26d6642a88d5c075447c60d43a70a7d0f082fb07
|
[
"MIT"
] | null | null | null |
4_forthproject/forthproject/settings.py
|
merry-hyelyn/LIKE_LION
|
26d6642a88d5c075447c60d43a70a7d0f082fb07
|
[
"MIT"
] | null | null | null |
4_forthproject/forthproject/settings.py
|
merry-hyelyn/LIKE_LION
|
26d6642a88d5c075447c60d43a70a7d0f082fb07
|
[
"MIT"
] | null | null | null |
"""
Django settings for forthproject project.
Generated by 'django-admin startproject' using Django 2.1.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8sjx*ixqizq%)vswdwn82p(w8en&cknd@dey%8h7ex@e&bqx_4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'student.apps.StudentConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'forthproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'forthproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.737705
| 91
| 0.698726
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '8sjx*ixqizq%)vswdwn82p(w8en&cknd@dey%8h7ex@e&bqx_4'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'student.apps.StudentConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'forthproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'forthproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| true
| true
|
f71485e72b788fa273c442546c715e7aa086321c
| 8,018
|
py
|
Python
|
gasex/diff.py
|
dnicholson/gasex-python
|
53b8c3ff4e64e724d8883bdef299d465621b124f
|
[
"MIT"
] | 1
|
2019-04-06T17:52:30.000Z
|
2019-04-06T17:52:30.000Z
|
gasex/diff.py
|
dnicholson/gasex-python
|
53b8c3ff4e64e724d8883bdef299d465621b124f
|
[
"MIT"
] | null | null | null |
gasex/diff.py
|
dnicholson/gasex-python
|
53b8c3ff4e64e724d8883bdef299d465621b124f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
% Diffusion coeff and Schmidt number for gases in fresh/sea water
%=========================================================================
% Modified by D. Nicholson from MATLAB gas_diffusion Version 2.0 16 July 2013
% Author: Roberta C. Hamme (University of Victoria)
% Diffusion values for 'He','Ne','Ar','Kr','Xe','N2','O2','CH4','N2' and 'CO2' are calculated from
% gas_diffusion Version 2.0 functions
% salinity correction is of the form: D = D0 * (1 - 0.049 * SP / 35.5)
%
%
% Support for additional gases ('CO2','N2O','CH4','RN','SF6','DMS','CFC12','CFC11','CH3BR','CCL4')
% has been added based on Wanninkhof 2014
%
% Table 1:
% Sc = A + Bt + Ct2+ dt3+ Et4(t in °C). The last column is the calculated Schmidt number for 20°C.
% The Schmidt number is the kinematic viscosity of waterdivided by the molecular diffusion
% coefficient of the gas. The kinematic viscosity for fresh water and seawater are from
% Sharqawy et al. (2010). The dif-fusion coefficients of gases are from the following:
% 3He, He, Ne, Kr, Xe, CH4, CO2, and Rn measured by Jähne et al. (1987); Ar, O2, N2, N2O,
% and CCl4fitusing Wilke and Chang (1955) as adapted by Hayduk and Laudie (1974); SF6
% measured by King and Saltzman (1995); DMS measured by Saltzman etal. (1993); CFC-11 and
% CFC-12 measured by Zheng et al. (1998); CH3Br measured by De Bruyn and Saltzman (1997a).
%
%
% REFERENCE:
% He, Ne, Kr, Xe, CH4, CO2, H2 freshwater values from Jahne et al., 1987.
% "Measurement of Diffusion Coeffients of Sparingly Soluble Gases in Water"
% J. Geophys. Res., 92(C10), 10767-10776.
% Ar freshwaters values are extrapolated from Jahne et al. 1987
% He, Ne, Kr, Xe values at each temperature were fitted to D vs. mass^-0.5
% relationship to predict Ar at those temperatures, then Ar was fit to a
% ln(D_Ar) vs. 1/T(K) relationship to obtain Eyring equation coefficients
% O2 and N2 freshwater values from Ferrell and Himmelblau, 1967.
% "Diffusion coefficients of nitrogen and oxygen in water"
% J. Chem. Eng. Data, 12(1), 111-115, doi: 10.1021/je60032a036.
% Correction for salinity is based on Jahne's observed average 4.9% decrease in
% diffusivity for H2 and He in 35.5 ppt NaCl solution
%
% for Ne, the Jahne values compare well with and fall between those of
% Wise and Houghton 1968 and Holz et al. 1994
% for Ar, the extrapolated Jahne values compare well with Wise and Houghton 1968,
% O'Brien and Hyslop 1977, and a numerical simulation by Bourg et al. 2008
% but are higher than other reported values
% for Kr, the Jahne values compare well with Wise and Houghton 1968,
% and a numerical simulation by Bourg et al. 2008
% for Xe, the Jahne values compare well with Pollack 1981, and a numerical
% simulation by Bourg et al. 2008, but fall significantly above Wise and Houghton 1968
% and below Weingartner et al. 1992
% for O2, there is general agreement among measurements. The Ferrel and Himmelblau values
% agree reasonably well with Baird and Davidson 1962, Wise and Houghton 1966,
% Duda and Vrentas 1968, O'Brien and Hyslop 1977, and the Wilke and Change (1955) theory
% as tabulated by Wanninkhof 1992, but lie below Krieger et al 1967
% for N2, there is less agreement. The Ferrel and Himmelblau values
% agree reasonably well with Baird and Davidson 1962, O'Brien and Hyslop 1977,
% and the Wilke and Change (1955) theory as tabulated by Wanninkhof 1992,
% but lie significantly below the values of Wise and Houghton 1966 and Krieger et al 1967
% for He, I did not investigate comparisons of data, but chose Jahne
% since their work for other gases appears to be the best
% for CO2, CH4 and H2: Jahne 1987
%
%
%
% DISCLAIMER:
% This software is provided "as is" without warranty of any kind.
%=========================================================================
"""
from __future__ import division
import numpy as np
from numpy.polynomial.polynomial import polyval
from ._utilities import match_args_return
from gasex.phys import R as R
from gasex.phys import visc as visc
# Currently supported gases
# TODO: find N2O, CO diffusivities
GAS_LIST = ('HE','NE','AR','KR','XE','N2','O2','CH4','N2','CO2')
@match_args_return
def diff(SP,pt,*,gas=None):
"""
DESCRIPTION
-----------
Diffusion coefficients of various gases in fresh/sea water
PARAMETERS
-----------
SP = practical salinity [PSS-78]
pt = potential temperature [degree C]
gas = 'He','Ne','Ar','Kr','Xe','N2','O2','CH4','N2' or 'CO2'
OUTPUT:
D = diffusion coefficient [m^2 s-1]
"""
g_up = gas.upper()
if g_up not in GAS_LIST:
raise ValueError("gas: must be one of ", GAS_LIST)
AEa_dict = {'O2': (4.286e-6, 18700),\
'HE': (0.8180e-6, 11700),\
'NE': (1.6080e-6, 14840),\
'AR': (2.227e-6, 16680),\
'KR': (6.3930e-6, 20200),\
'XE': (9.0070e-6, 21610),\
'N2': (3.4120e-6, 18500),\
'CH4':(3.0470e-6, 18360),\
'CO2':(5.0190e-6, 19510),\
'H2': (3.3380e-6, 16060)}
if g_up in AEa_dict.keys():
#freshwater diffusivity
AEa = AEa_dict[g_up]
D0 = AEa[0] * np.exp(-AEa[1] / (R * (pt+273.15)))
#salinity correction
D = D0 * (1 - 0.049 * SP / 35.5)
else:
raise ValueError("gas: must be one of ", AEa_dict.keys())
return D
@match_args_return
def schmidt(SP,pt,*,gas=None):
g_up = gas.upper()
if g_up not in GAS_LIST:
raise ValueError("gas", g_up, " does not match one of ", GAS_LIST)
Sc = visc(SP,pt) / diff(SP,pt,gas=gas)
return Sc
@match_args_return
def schmidt_W14(pt,*,gas=None,sw=True):
"""Schmidt number @ 35 psu based on Wanninkhof 2014 Table 1
Args:
pt ([array like]): potential temperature [degree C]
gas ([string]): abbreviation for gas. Defaults to None.
sw (bool, optional): if True, then calculates for SP = 35, of false,
calculates for fresh water. Defaults to True.
Raises:
ValueError: [description]
Returns:
[type]: Schmidt number [dimensionless]
"""
W14_LIST = ('CO2','N2O','CH4','RN','SF6','DMS','CFC12','CFC11','CH3BR','CCL4')
g_up = gas.upper()
if sw:
A_dict = {'CO2': (2116.8,-136.25,4.7353,-0.092307,0.0007555 ),\
'N2O': (2356.2,-166.38,6.3952,-0.13422,0.0011506 ),\
'CH4':(2101.2,-131.54,4.4931,-0.08676,0.00070663),
'RN': (3489.6,-244.56,8.9713,-0.18022,0.0014985 ),
'SF6':(3177.5,-200.57,6.8865,-0.13335,0.0010877 ),
'DMS':(2855.7,-177.63,6.0438,-0.11645,0.00094743),
'CFC12':(3828.1,-249.86, 8.7603, -0.1716, 0.001408 ),
'CFC11':(3579.2, -222.63, 7.5749, -0.14595, 0.0011874 ),
'CH3BR':(2181.8, -138.4, 4.7663, -0.092448, 0.0007547 ),
'CCL4': (4398.7, -308.25, 11.798, -0.24709, 0.0021159) }
else:
A_dict = {'CO2': (1923.6, -125.06, 4.3773, -0.085681, 0.00070284 ),\
'N2O': (2141.2, -152.56, 5.8963, -0.12411, 0.0010655 ),\
'CH4':(1909.4, -120.78, 4.1555, -0.080578, 0.00065777),
'RN': (3171, -224.28, 8.2809, -0.16699, 0.0013915 ),
'SF6':(3035, -196.35, 6.851, -0.13387, 0.0010972 ),
'DMS':(2595, -163.12, 5.5902, -0.10817, 0.00088204),
'CFC12':(3478.6, -229.32, 8.0961, -0.15923, 0.0013095 ),
'CFC11':(3460, -217.49, 7.4537, -0.14423, 0.0011761 ),
'CH3BR':(2109.2, -135.17, 4.6884, -0.091317, 0.00074715 ),
'CCL4': (3997.2, -282.69, 10.88, -0.22855, 0.0019605) }
if g_up in A_dict.keys():
A = A_dict[g_up]
else:
raise ValueError("gas", g_up, " does not match one of ", A_dict.keys())
Sc = polyval(pt,A)
return Sc
| 43.814208
| 99
| 0.607383
|
from __future__ import division
import numpy as np
from numpy.polynomial.polynomial import polyval
from ._utilities import match_args_return
from gasex.phys import R as R
from gasex.phys import visc as visc
GAS_LIST = ('HE','NE','AR','KR','XE','N2','O2','CH4','N2','CO2')
@match_args_return
def diff(SP,pt,*,gas=None):
g_up = gas.upper()
if g_up not in GAS_LIST:
raise ValueError("gas: must be one of ", GAS_LIST)
AEa_dict = {'O2': (4.286e-6, 18700),\
'HE': (0.8180e-6, 11700),\
'NE': (1.6080e-6, 14840),\
'AR': (2.227e-6, 16680),\
'KR': (6.3930e-6, 20200),\
'XE': (9.0070e-6, 21610),\
'N2': (3.4120e-6, 18500),\
'CH4':(3.0470e-6, 18360),\
'CO2':(5.0190e-6, 19510),\
'H2': (3.3380e-6, 16060)}
if g_up in AEa_dict.keys():
AEa = AEa_dict[g_up]
D0 = AEa[0] * np.exp(-AEa[1] / (R * (pt+273.15)))
D = D0 * (1 - 0.049 * SP / 35.5)
else:
raise ValueError("gas: must be one of ", AEa_dict.keys())
return D
@match_args_return
def schmidt(SP,pt,*,gas=None):
g_up = gas.upper()
if g_up not in GAS_LIST:
raise ValueError("gas", g_up, " does not match one of ", GAS_LIST)
Sc = visc(SP,pt) / diff(SP,pt,gas=gas)
return Sc
@match_args_return
def schmidt_W14(pt,*,gas=None,sw=True):
W14_LIST = ('CO2','N2O','CH4','RN','SF6','DMS','CFC12','CFC11','CH3BR','CCL4')
g_up = gas.upper()
if sw:
A_dict = {'CO2': (2116.8,-136.25,4.7353,-0.092307,0.0007555 ),\
'N2O': (2356.2,-166.38,6.3952,-0.13422,0.0011506 ),\
'CH4':(2101.2,-131.54,4.4931,-0.08676,0.00070663),
'RN': (3489.6,-244.56,8.9713,-0.18022,0.0014985 ),
'SF6':(3177.5,-200.57,6.8865,-0.13335,0.0010877 ),
'DMS':(2855.7,-177.63,6.0438,-0.11645,0.00094743),
'CFC12':(3828.1,-249.86, 8.7603, -0.1716, 0.001408 ),
'CFC11':(3579.2, -222.63, 7.5749, -0.14595, 0.0011874 ),
'CH3BR':(2181.8, -138.4, 4.7663, -0.092448, 0.0007547 ),
'CCL4': (4398.7, -308.25, 11.798, -0.24709, 0.0021159) }
else:
A_dict = {'CO2': (1923.6, -125.06, 4.3773, -0.085681, 0.00070284 ),\
'N2O': (2141.2, -152.56, 5.8963, -0.12411, 0.0010655 ),\
'CH4':(1909.4, -120.78, 4.1555, -0.080578, 0.00065777),
'RN': (3171, -224.28, 8.2809, -0.16699, 0.0013915 ),
'SF6':(3035, -196.35, 6.851, -0.13387, 0.0010972 ),
'DMS':(2595, -163.12, 5.5902, -0.10817, 0.00088204),
'CFC12':(3478.6, -229.32, 8.0961, -0.15923, 0.0013095 ),
'CFC11':(3460, -217.49, 7.4537, -0.14423, 0.0011761 ),
'CH3BR':(2109.2, -135.17, 4.6884, -0.091317, 0.00074715 ),
'CCL4': (3997.2, -282.69, 10.88, -0.22855, 0.0019605) }
if g_up in A_dict.keys():
A = A_dict[g_up]
else:
raise ValueError("gas", g_up, " does not match one of ", A_dict.keys())
Sc = polyval(pt,A)
return Sc
| true
| true
|
f7148669ff553acc8e2aab92e20c57cfaa89f4d4
| 952
|
py
|
Python
|
testing/functional_tests/test_convert_to_onnx.py
|
cakester/ivadomed
|
321a91c7e3c82e6296764895e39695b04a80c8af
|
[
"MIT"
] | null | null | null |
testing/functional_tests/test_convert_to_onnx.py
|
cakester/ivadomed
|
321a91c7e3c82e6296764895e39695b04a80c8af
|
[
"MIT"
] | 6
|
2021-03-24T16:23:29.000Z
|
2021-04-08T15:22:53.000Z
|
testing/functional_tests/test_convert_to_onnx.py
|
cakester/ivadomed
|
321a91c7e3c82e6296764895e39695b04a80c8af
|
[
"MIT"
] | null | null | null |
import logging
import pytest
import os
from functional_tests.t_utils import remove_tmp_dir, create_tmp_dir, __data_testing_dir__
from ivadomed.scripts import convert_to_onnx
from ivadomed.utils import ArgParseException
logger = logging.getLogger(__name__)
__model_path__ = os.path.join(__data_testing_dir__, 'spinegeneric_model.pt')
def setup_function():
create_tmp_dir()
def test_convert_to_onnx():
convert_to_onnx.main(args=['-m', f'{__model_path__}', '-d', '2'])
assert os.path.exists(os.path.join(__data_testing_dir__, 'spinegeneric_model.onnx'))
def test_convert_to_onnx_no_model():
with pytest.raises(ArgParseException, match=r"Error parsing args"):
convert_to_onnx.main(args=['-d', '2'])
def test_convert_to_onnx_no_dimension():
with pytest.raises(ArgParseException, match=r"Error parsing args"):
convert_to_onnx.main(args=['-m', f'{__model_path__}'])
def teardown_function():
remove_tmp_dir()
| 28.848485
| 89
| 0.765756
|
import logging
import pytest
import os
from functional_tests.t_utils import remove_tmp_dir, create_tmp_dir, __data_testing_dir__
from ivadomed.scripts import convert_to_onnx
from ivadomed.utils import ArgParseException
logger = logging.getLogger(__name__)
__model_path__ = os.path.join(__data_testing_dir__, 'spinegeneric_model.pt')
def setup_function():
create_tmp_dir()
def test_convert_to_onnx():
convert_to_onnx.main(args=['-m', f'{__model_path__}', '-d', '2'])
assert os.path.exists(os.path.join(__data_testing_dir__, 'spinegeneric_model.onnx'))
def test_convert_to_onnx_no_model():
with pytest.raises(ArgParseException, match=r"Error parsing args"):
convert_to_onnx.main(args=['-d', '2'])
def test_convert_to_onnx_no_dimension():
with pytest.raises(ArgParseException, match=r"Error parsing args"):
convert_to_onnx.main(args=['-m', f'{__model_path__}'])
def teardown_function():
remove_tmp_dir()
| true
| true
|
f714889560de33dc5cf5c279055a50f15365c010
| 6,460
|
py
|
Python
|
courses/machine_learning/deepdive/05_artandscience/simplernn/trainer/model.py
|
alixhami/training-data-analyst
|
826a9270e784a64ea3bd62c34689518280df71a8
|
[
"Apache-2.0"
] | 1
|
2019-02-12T21:40:03.000Z
|
2019-02-12T21:40:03.000Z
|
courses/machine_learning/deepdive/05_artandscience/simplernn/trainer/model.py
|
alixhami/training-data-analyst
|
826a9270e784a64ea3bd62c34689518280df71a8
|
[
"Apache-2.0"
] | 11
|
2020-01-28T22:39:44.000Z
|
2022-03-11T23:42:53.000Z
|
courses/machine_learning/deepdive/05_artandscience/simplernn/trainer/model.py
|
vega42/GoogleCloudPlatform-training-data-analyst
|
3eb60cb6c8b55fd7f38414c1082da36b8e62558e
|
[
"Apache-2.0"
] | 1
|
2021-01-15T10:20:27.000Z
|
2021-01-15T10:20:27.000Z
|
#!/usr/bin/env python3
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow.contrib.metrics as metrics
import tensorflow.contrib.rnn as rnn
tf.logging.set_verbosity(tf.logging.INFO)
SEQ_LEN = 10
DEFAULTS = [[0.0] for x in range(0, SEQ_LEN)]
BATCH_SIZE = 20
TIMESERIES_INPUT_LAYER = 'rawdata'
TIMESERIES_COL = '{}_input'.format(TIMESERIES_INPUT_LAYER)
# In each sequence, column index 0 to N_INPUTS - 1 are features, and column index N_INPUTS to SEQ_LEN are labels
N_OUTPUTS = 1
N_INPUTS = SEQ_LEN - N_OUTPUTS
LSTM_SIZE = 3 # number of hidden layers in each of the LSTM cells
# Read data and convert to needed format
def read_dataset(filename, mode, batch_size):
def _input_fn():
# Provide the ability to decode a CSV
def decode_csv(line):
# all_data is a list of scalar tensors
all_data = tf.decode_csv(line, record_defaults = DEFAULTS)
inputs = all_data[:len(all_data) - N_OUTPUTS] # first N_INPUTS values
labels = all_data[len(all_data) - N_OUTPUTS:] # last N_OUTPUTS values
# Convert each list of rank R tensors to one rank R+1 tensor
inputs = tf.stack(inputs, axis = 0)
labels = tf.stack(labels, axis = 0)
# Convert input R+1 tensor into a feature dictionary of one R+1 tensor
features = {TIMESERIES_COL: inputs}
return features, labels
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename)
# Create dataset from file list
dataset = tf.data.TextLineDataset(file_list).map(decode_csv)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
return _input_fn
# Create inference model using Keras
# The model here is a dnn regressor
def make_keras_estimator(output_dir):
from tensorflow import keras
model = keras.models.Sequential()
model.add(keras.layers.Dense(32, input_shape=(N_INPUTS,), name=TIMESERIES_INPUT_LAYER))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(1))
model.compile(loss = 'mean_squared_error',
optimizer = 'adam',
metrics = ['mae', 'mape']) # mean absolute [percentage] error
return keras.estimator.model_to_estimator(model, model_dir=output_dir)
# Create the inference model
def simple_rnn(features, labels, mode):
# 0. Reformat input shape to become a sequence
x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)
# 1. Configure the RNN
lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias = 1.0)
outputs, _ = rnn.static_rnn(lstm_cell, x, dtype = tf.float32)
# Slice to keep only the last cell of the RNN
outputs = outputs[-1]
#print('last outputs={}'.format(outputs))
# Output is result of linear activation of last layer of RNN
weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))
bias = tf.Variable(tf.random_normal([N_OUTPUTS]))
predictions = tf.matmul(outputs, weight) + bias
# 2. Loss function, training/eval ops
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
loss = tf.losses.mean_squared_error(labels, predictions)
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = 0.01,
optimizer = "SGD")
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(labels, predictions)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"predicted": predictions}
# 4. Create export outputs
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions)}
# 4. Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = predictions_dict,
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = export_outputs)
# Create serving input function
def serving_input_fn():
feature_placeholders = {
TIMESERIES_COL: tf.placeholder(tf.float32, [None, N_INPUTS])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis = [2])
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# Create custom estimator's train and evaluate function
def train_and_evaluate(output_dir, use_keras):
if use_keras:
estimator = make_keras_estimator(output_dir)
else:
estimator = tf.estimator.Estimator(model_fn = simple_rnn,
model_dir = output_dir)
train_spec = tf.estimator.TrainSpec(read_dataset('train.csv',
tf.estimator.ModeKeys.TRAIN,
512),
max_steps = 1000)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(read_dataset('valid.csv',
tf.estimator.ModeKeys.EVAL,
512),
steps = None,
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| 39.151515
| 112
| 0.657276
|
import tensorflow as tf
import tensorflow.contrib.metrics as metrics
import tensorflow.contrib.rnn as rnn
tf.logging.set_verbosity(tf.logging.INFO)
SEQ_LEN = 10
DEFAULTS = [[0.0] for x in range(0, SEQ_LEN)]
BATCH_SIZE = 20
TIMESERIES_INPUT_LAYER = 'rawdata'
TIMESERIES_COL = '{}_input'.format(TIMESERIES_INPUT_LAYER)
N_OUTPUTS = 1
N_INPUTS = SEQ_LEN - N_OUTPUTS
LSTM_SIZE = 3
def read_dataset(filename, mode, batch_size):
def _input_fn():
def decode_csv(line):
all_data = tf.decode_csv(line, record_defaults = DEFAULTS)
inputs = all_data[:len(all_data) - N_OUTPUTS]
labels = all_data[len(all_data) - N_OUTPUTS:]
inputs = tf.stack(inputs, axis = 0)
labels = tf.stack(labels, axis = 0)
features = {TIMESERIES_COL: inputs}
return features, labels
file_list = tf.gfile.Glob(filename)
dataset = tf.data.TextLineDataset(file_list).map(decode_csv)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1
dataset = dataset.repeat(num_epochs).batch(batch_size)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
return _input_fn
def make_keras_estimator(output_dir):
from tensorflow import keras
model = keras.models.Sequential()
model.add(keras.layers.Dense(32, input_shape=(N_INPUTS,), name=TIMESERIES_INPUT_LAYER))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(1))
model.compile(loss = 'mean_squared_error',
optimizer = 'adam',
metrics = ['mae', 'mape'])
return keras.estimator.model_to_estimator(model, model_dir=output_dir)
def simple_rnn(features, labels, mode):
x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)
lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias = 1.0)
outputs, _ = rnn.static_rnn(lstm_cell, x, dtype = tf.float32)
outputs = outputs[-1]
weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))
bias = tf.Variable(tf.random_normal([N_OUTPUTS]))
predictions = tf.matmul(outputs, weight) + bias
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
loss = tf.losses.mean_squared_error(labels, predictions)
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = 0.01,
optimizer = "SGD")
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(labels, predictions)
}
else:
loss = None
train_op = None
eval_metric_ops = None
predictions_dict = {"predicted": predictions}
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions)}
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = predictions_dict,
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = export_outputs)
def serving_input_fn():
feature_placeholders = {
TIMESERIES_COL: tf.placeholder(tf.float32, [None, N_INPUTS])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis = [2])
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
def train_and_evaluate(output_dir, use_keras):
if use_keras:
estimator = make_keras_estimator(output_dir)
else:
estimator = tf.estimator.Estimator(model_fn = simple_rnn,
model_dir = output_dir)
train_spec = tf.estimator.TrainSpec(read_dataset('train.csv',
tf.estimator.ModeKeys.TRAIN,
512),
max_steps = 1000)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(read_dataset('valid.csv',
tf.estimator.ModeKeys.EVAL,
512),
steps = None,
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| true
| true
|
f71488cdf51ba686efdcf99b240b487d5d66ed67
| 16,474
|
py
|
Python
|
env/lib/python2.7/site-packages/pip/cmdoptions.py
|
lindamar/ecclesi
|
cad07fc78daf6facd1b74cc1cb1872aaf4771fa2
|
[
"MIT"
] | 168
|
2015-05-29T13:56:01.000Z
|
2022-02-17T07:38:17.000Z
|
env/lib/python2.7/site-packages/pip/cmdoptions.py
|
lindamar/ecclesi
|
cad07fc78daf6facd1b74cc1cb1872aaf4771fa2
|
[
"MIT"
] | 3,243
|
2017-02-07T15:30:01.000Z
|
2022-03-31T16:42:19.000Z
|
env/lib/python2.7/site-packages/pip/cmdoptions.py
|
lindamar/ecclesi
|
cad07fc78daf6facd1b74cc1cb1872aaf4771fa2
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
"""
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parses general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
from __future__ import absolute_import
from functools import partial
from optparse import OptionGroup, SUPPRESS_HELP, Option
import warnings
from pip.index import (
FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary,
fmt_ctl_no_use_wheel)
from pip.models import PyPI
from pip.locations import USER_CACHE_DIR, src_prefix
from pip.utils.hashes import STRONG_HASHES
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option())
return option_group
def resolve_wheel_no_use_binary(options):
if not options.use_wheel:
control = options.format_control
fmt_ctl_no_use_wheel(control)
def check_install_build_global(options, check_options=None):
"""Disable wheels if per-setup.py call options are set.
:param options: The OptionParser options to update.
:param check_options: The options to check, if not supplied defaults to
options.
"""
if check_options is None:
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
fmt_ctl_no_binary(control)
warnings.warn(
'Disabling all use of wheels due to the use of --build-options '
'/ --global-options / --install-options.', stacklevel=2)
###########
# options #
###########
help_ = partial(
Option,
'-h', '--help',
dest='help',
action='help',
help='Show help.')
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = partial(
Option,
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = partial(
Option,
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
)
version = partial(
Option,
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = partial(
Option,
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help=('Give less output. Option is additive, and can be used up to 3'
' times (corresponding to WARNING, ERROR, and CRITICAL logging'
' levels).')
)
log = partial(
Option,
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
)
no_input = partial(
Option,
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = partial(
Option,
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
retries = partial(
Option,
'--retries',
dest='retries',
type='int',
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).")
timeout = partial(
Option,
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = partial(
Option,
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = partial(
Option,
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
def exists_action():
return Option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b', 'a'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.")
cert = partial(
Option,
'--cert',
dest='cert',
type='str',
metavar='path',
help="Path to alternate CA bundle.")
client_cert = partial(
Option,
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.")
index_url = partial(
Option,
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default=PyPI.simple_url,
help="Base URL of Python Package Index (default %default). "
"This should point to a repository compliant with PEP 503 "
"(the simple repository API) or a local directory laid out "
"in the same format.")
def extra_index_url():
return Option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help="Extra URLs of package indexes to use in addition to "
"--index-url. Should follow the same rules as "
"--index-url."
)
no_index = partial(
Option,
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
def find_links():
return Option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to "
"archives. If a local path or file:// url that's a directory, "
"then look for archives in the directory listing.")
def allow_external():
return Option(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
allow_all_external = partial(
Option,
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help=SUPPRESS_HELP,
)
def trusted_host():
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host as trusted, even though it does not have valid "
"or any HTTPS.",
)
# Remove after 7.0
no_allow_external = partial(
Option,
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 7.0
def allow_unsafe():
return Option(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
# Remove after 7.0
no_allow_unsafe = partial(
Option,
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = partial(
Option,
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
def constraints():
return Option(
'-c', '--constraint',
dest='constraints',
action='append',
default=[],
metavar='file',
help='Constrain versions using the given constraints file. '
'This option can be used multiple times.')
def requirements():
return Option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
def editable():
return Option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = partial(
Option,
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
)
# XXX: deprecated, remove in 9.0
use_wheel = partial(
Option,
'--use-wheel',
dest='use_wheel',
action='store_true',
default=True,
help=SUPPRESS_HELP,
)
# XXX: deprecated, remove in 9.0
no_use_wheel = partial(
Option,
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations. DEPRECATED in favour of --no-binary.'),
)
def _get_format_control(values, option):
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.no_binary, existing.only_binary)
def _handle_only_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.only_binary, existing.no_binary)
def no_binary():
return Option(
"--no-binary", dest="format_control", action="callback",
callback=_handle_no_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use binary packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all binary packages, :none: to empty the set, or one or "
"more package names with commas between them. Note that some "
"packages are tricky to compile and may fail to install when "
"this option is used on them.")
def only_binary():
return Option(
"--only-binary", dest="format_control", action="callback",
callback=_handle_only_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use source packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all source packages, :none: to empty the set, or one or "
"more package names with commas between them. Packages without "
"binary distributions will fail to install when this option is "
"used on them.")
cache_dir = partial(
Option,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
)
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="store_false",
help="Disable the cache.",
)
no_deps = partial(
Option,
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = partial(
Option,
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in.'
)
ignore_requires_python = partial(
Option,
'--ignore-requires-python',
dest='ignore_requires_python',
action='store_true',
help='Ignore the Requires-Python information.')
install_options = partial(
Option,
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.")
global_options = partial(
Option,
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = partial(
Option,
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
pre = partial(
Option,
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.")
# Deprecated, Remove later
always_unzip = partial(
Option,
'-Z', '--always-unzip',
dest='always_unzip',
action='store_true',
help=SUPPRESS_HELP,
)
def _merge_hash(option, opt_str, value, parser):
"""Given a value spelled "algo:digest", append the digest to a list
pointed to in a dict by the algo name."""
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(':', 1)
except ValueError:
parser.error('Arguments to %s must be a hash name '
'followed by a value, like --hash=sha256:abcde...' %
opt_str)
if algo not in STRONG_HASHES:
parser.error('Allowed hash algorithms for %s are %s.' %
(opt_str, ', '.join(STRONG_HASHES)))
parser.values.hashes.setdefault(algo, []).append(digest)
hash = partial(
Option,
'--hash',
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest='hashes',
action='callback',
callback=_merge_hash,
type='string',
help="Verify that the package's archive matches this "
'hash before installing. Example: --hash=sha256:abcdef...')
require_hashes = partial(
Option,
'--require-hashes',
dest='require_hashes',
action='store_true',
default=False,
help='Require a hash to check each requirement against, for '
'repeatable installs. This option is implied when any package in a '
'requirements file has a --hash option.')
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
no_input,
proxy,
retries,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
]
}
non_deprecated_index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
process_dependency_links,
]
}
index_group = {
'name': 'Package Index Options (including deprecated options)',
'options': non_deprecated_index_group['options'] + [
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
]
}
| 25.984227
| 79
| 0.622253
|
from __future__ import absolute_import
from functools import partial
from optparse import OptionGroup, SUPPRESS_HELP, Option
import warnings
from pip.index import (
FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary,
fmt_ctl_no_use_wheel)
from pip.models import PyPI
from pip.locations import USER_CACHE_DIR, src_prefix
from pip.utils.hashes import STRONG_HASHES
def make_option_group(group, parser):
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option())
return option_group
def resolve_wheel_no_use_binary(options):
if not options.use_wheel:
control = options.format_control
fmt_ctl_no_use_wheel(control)
def check_install_build_global(options, check_options=None):
if check_options is None:
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
fmt_ctl_no_binary(control)
warnings.warn(
'Disabling all use of wheels due to the use of --build-options '
'/ --global-options / --install-options.', stacklevel=2)
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = partial(
Option,
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = partial(
Option,
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
)
version = partial(
Option,
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = partial(
Option,
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help=('Give less output. Option is additive, and can be used up to 3'
' times (corresponding to WARNING, ERROR, and CRITICAL logging'
' levels).')
)
log = partial(
Option,
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
)
no_input = partial(
Option,
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = partial(
Option,
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
retries = partial(
Option,
'--retries',
dest='retries',
type='int',
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).")
timeout = partial(
Option,
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = partial(
Option,
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = partial(
Option,
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
def exists_action():
return Option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b', 'a'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.")
cert = partial(
Option,
'--cert',
dest='cert',
type='str',
metavar='path',
help="Path to alternate CA bundle.")
client_cert = partial(
Option,
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.")
index_url = partial(
Option,
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default=PyPI.simple_url,
help="Base URL of Python Package Index (default %default). "
"This should point to a repository compliant with PEP 503 "
"(the simple repository API) or a local directory laid out "
"in the same format.")
def extra_index_url():
return Option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help="Extra URLs of package indexes to use in addition to "
"--index-url. Should follow the same rules as "
"--index-url."
)
no_index = partial(
Option,
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
def find_links():
return Option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to "
"archives. If a local path or file:// url that's a directory, "
"then look for archives in the directory listing.")
def allow_external():
return Option(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
allow_all_external = partial(
Option,
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help=SUPPRESS_HELP,
)
def trusted_host():
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host as trusted, even though it does not have valid "
"or any HTTPS.",
)
no_allow_external = partial(
Option,
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
def allow_unsafe():
return Option(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
no_allow_unsafe = partial(
Option,
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
process_dependency_links = partial(
Option,
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
def constraints():
return Option(
'-c', '--constraint',
dest='constraints',
action='append',
default=[],
metavar='file',
help='Constrain versions using the given constraints file. '
'This option can be used multiple times.')
def requirements():
return Option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
def editable():
return Option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = partial(
Option,
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
)
use_wheel = partial(
Option,
'--use-wheel',
dest='use_wheel',
action='store_true',
default=True,
help=SUPPRESS_HELP,
)
no_use_wheel = partial(
Option,
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations. DEPRECATED in favour of --no-binary.'),
)
def _get_format_control(values, option):
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.no_binary, existing.only_binary)
def _handle_only_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.only_binary, existing.no_binary)
def no_binary():
return Option(
"--no-binary", dest="format_control", action="callback",
callback=_handle_no_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use binary packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all binary packages, :none: to empty the set, or one or "
"more package names with commas between them. Note that some "
"packages are tricky to compile and may fail to install when "
"this option is used on them.")
def only_binary():
return Option(
"--only-binary", dest="format_control", action="callback",
callback=_handle_only_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use source packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all source packages, :none: to empty the set, or one or "
"more package names with commas between them. Packages without "
"binary distributions will fail to install when this option is "
"used on them.")
cache_dir = partial(
Option,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
)
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="store_false",
help="Disable the cache.",
)
no_deps = partial(
Option,
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = partial(
Option,
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in.'
)
ignore_requires_python = partial(
Option,
'--ignore-requires-python',
dest='ignore_requires_python',
action='store_true',
help='Ignore the Requires-Python information.')
install_options = partial(
Option,
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.")
global_options = partial(
Option,
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = partial(
Option,
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
pre = partial(
Option,
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.")
# Deprecated, Remove later
always_unzip = partial(
Option,
'-Z', '--always-unzip',
dest='always_unzip',
action='store_true',
help=SUPPRESS_HELP,
)
def _merge_hash(option, opt_str, value, parser):
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(':', 1)
except ValueError:
parser.error('Arguments to %s must be a hash name '
'followed by a value, like --hash=sha256:abcde...' %
opt_str)
if algo not in STRONG_HASHES:
parser.error('Allowed hash algorithms for %s are %s.' %
(opt_str, ', '.join(STRONG_HASHES)))
parser.values.hashes.setdefault(algo, []).append(digest)
hash = partial(
Option,
'--hash',
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest='hashes',
action='callback',
callback=_merge_hash,
type='string',
help="Verify that the package's archive matches this "
'hash before installing. Example: --hash=sha256:abcdef...')
require_hashes = partial(
Option,
'--require-hashes',
dest='require_hashes',
action='store_true',
default=False,
help='Require a hash to check each requirement against, for '
'repeatable installs. This option is implied when any package in a '
'requirements file has a --hash option.')
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
no_input,
proxy,
retries,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
]
}
non_deprecated_index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
process_dependency_links,
]
}
index_group = {
'name': 'Package Index Options (including deprecated options)',
'options': non_deprecated_index_group['options'] + [
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
]
}
| true
| true
|
f71489253e48c8415077de99962ccba5eff495e5
| 962
|
py
|
Python
|
xmrig_exporter/exporter.py
|
leonardochaia/xmrig_exporter
|
56cb4a6f4b4a4df3f972fe20478269f79d6da34f
|
[
"MIT"
] | 2
|
2019-09-19T00:15:20.000Z
|
2021-06-25T19:35:03.000Z
|
xmrig_exporter/exporter.py
|
leonardochaia/xmrig_exporter
|
56cb4a6f4b4a4df3f972fe20478269f79d6da34f
|
[
"MIT"
] | 1
|
2019-09-19T02:29:34.000Z
|
2019-09-19T02:41:45.000Z
|
xmrig_exporter/exporter.py
|
leonardochaia/xmrig_exporter
|
56cb4a6f4b4a4df3f972fe20478269f79d6da34f
|
[
"MIT"
] | 2
|
2019-09-19T01:36:19.000Z
|
2021-04-13T01:29:13.000Z
|
import argparse
import http.server
import logging
import sys
import prometheus_client
import xmrig_exporter
def main():
parser = argparse.ArgumentParser("Xmrig Exporter")
parser.add_argument("--port", type=int, default=9189)
parser.add_argument("--bind_address", default="0.0.0.0")
parser.add_argument("--url", required=True)
parser.add_argument("--token")
parser.add_argument("--verbose", "-v", action="count")
args = parser.parse_args()
if args.verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=level)
collector = xmrig_exporter.XmrigCollector(args.url, token=args.token)
prometheus_client.REGISTRY.register(collector)
handler = prometheus_client.MetricsHandler.factory(
prometheus_client.REGISTRY)
server = http.server.HTTPServer(
(args.bind_address, args.port), handler)
server.serve_forever()
| 25.315789
| 73
| 0.702703
|
import argparse
import http.server
import logging
import sys
import prometheus_client
import xmrig_exporter
def main():
parser = argparse.ArgumentParser("Xmrig Exporter")
parser.add_argument("--port", type=int, default=9189)
parser.add_argument("--bind_address", default="0.0.0.0")
parser.add_argument("--url", required=True)
parser.add_argument("--token")
parser.add_argument("--verbose", "-v", action="count")
args = parser.parse_args()
if args.verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=level)
collector = xmrig_exporter.XmrigCollector(args.url, token=args.token)
prometheus_client.REGISTRY.register(collector)
handler = prometheus_client.MetricsHandler.factory(
prometheus_client.REGISTRY)
server = http.server.HTTPServer(
(args.bind_address, args.port), handler)
server.serve_forever()
| true
| true
|
f714898b0c1aae3362b96ae223d64cae5cb39b1a
| 442
|
py
|
Python
|
run_search_goods.py
|
nikolay-py/product_optimizer
|
3d7da484984e63791849ce8a12b285a1ba2daacf
|
[
"MIT"
] | null | null | null |
run_search_goods.py
|
nikolay-py/product_optimizer
|
3d7da484984e63791849ce8a12b285a1ba2daacf
|
[
"MIT"
] | 1
|
2021-07-05T13:42:19.000Z
|
2021-07-05T14:14:44.000Z
|
run_search_goods.py
|
nikolay-py/product_optimizer
|
3d7da484984e63791849ce8a12b285a1ba2daacf
|
[
"MIT"
] | null | null | null |
"""A utility for experimenting with searching."""
from project.parsers.crud import get_goods
from project.parsers.search_goods import db
from project.recipes.models import Recipe
if __name__ == "__main__":
recipe = db.query(Recipe).filter(Recipe.id == 2).first().product_list
goods_list = get_goods(recipe,10)
for inhidient in goods_list:
print('----------------------------------------------')
print(inhidient)
| 34
| 73
| 0.649321
|
from project.parsers.crud import get_goods
from project.parsers.search_goods import db
from project.recipes.models import Recipe
if __name__ == "__main__":
recipe = db.query(Recipe).filter(Recipe.id == 2).first().product_list
goods_list = get_goods(recipe,10)
for inhidient in goods_list:
print('----------------------------------------------')
print(inhidient)
| true
| true
|
f71489c3ef52e026137535b36258928677e4ed4f
| 1,562
|
py
|
Python
|
plateo/parsers/file_parsers.py
|
Edinburgh-Genome-Foundry/plateo
|
c9a608658325f3c507788d9b966a3f3c8e516bc5
|
[
"MIT"
] | 22
|
2018-01-29T21:34:25.000Z
|
2021-12-14T15:31:49.000Z
|
plateo/parsers/file_parsers.py
|
Edinburgh-Genome-Foundry/plateo
|
c9a608658325f3c507788d9b966a3f3c8e516bc5
|
[
"MIT"
] | 3
|
2017-09-20T16:08:45.000Z
|
2021-05-28T17:45:14.000Z
|
plateo/parsers/file_parsers.py
|
Edinburgh-Genome-Foundry/plateo
|
c9a608658325f3c507788d9b966a3f3c8e516bc5
|
[
"MIT"
] | 5
|
2018-09-18T08:53:37.000Z
|
2021-04-28T08:44:38.000Z
|
"""Misc. file parsers that are useful for other parsers"""
from xml.sax import saxutils, parse, parseString
class ExcelHandler(saxutils.handler.ContentHandler):
"""
This class is taken from the Python Cookbook so I guess the copyright
goes to them.
Memo: changed the handler from DefaultHandler to ContentHandler as the
former doesn't seem to exist in Py2. (?)
"""
def __init__(self):
self.chars = []
self.cells = []
self.rows = []
self.tables = []
def characters(self, content):
self.chars.append(content)
def startElement(self, name, atts):
if name == "Cell":
self.chars = []
elif name == "Row":
self.cells = []
elif name == "Table":
self.rows = []
def endElement(self, name):
if name == "Cell":
self.cells.append(''.join(self.chars))
elif name == "Row":
self.rows.append(self.cells)
elif name == "Table":
self.tables.append(self.rows)
def parse_excel_xml(xml_file=None, xml_string=None):
"""Return a list of the tables (2D arrays) in the Excel XML.
Provide either the path to an XML file, or a string of XML content.
"""
handler = ExcelHandler()
if xml_file is not None:
parse(xml_file, handler)
elif xml_string is not None:
parseString(xml_string, handler)
else:
raise ValueError("At least one of xml_file or xml_string should be"
" provided.")
return handler.tables
| 28.925926
| 75
| 0.596671
|
from xml.sax import saxutils, parse, parseString
class ExcelHandler(saxutils.handler.ContentHandler):
def __init__(self):
self.chars = []
self.cells = []
self.rows = []
self.tables = []
def characters(self, content):
self.chars.append(content)
def startElement(self, name, atts):
if name == "Cell":
self.chars = []
elif name == "Row":
self.cells = []
elif name == "Table":
self.rows = []
def endElement(self, name):
if name == "Cell":
self.cells.append(''.join(self.chars))
elif name == "Row":
self.rows.append(self.cells)
elif name == "Table":
self.tables.append(self.rows)
def parse_excel_xml(xml_file=None, xml_string=None):
handler = ExcelHandler()
if xml_file is not None:
parse(xml_file, handler)
elif xml_string is not None:
parseString(xml_string, handler)
else:
raise ValueError("At least one of xml_file or xml_string should be"
" provided.")
return handler.tables
| true
| true
|
f71489efe090002bbbd4dcf98ceba635d27bfa14
| 7,361
|
py
|
Python
|
rlcard/games/leducholdem/game.py
|
NiccoloSacchi/rlcard
|
046129e8616b12e25652957869a94ab5fd838ae1
|
[
"MIT"
] | null | null | null |
rlcard/games/leducholdem/game.py
|
NiccoloSacchi/rlcard
|
046129e8616b12e25652957869a94ab5fd838ae1
|
[
"MIT"
] | null | null | null |
rlcard/games/leducholdem/game.py
|
NiccoloSacchi/rlcard
|
046129e8616b12e25652957869a94ab5fd838ae1
|
[
"MIT"
] | 1
|
2020-11-20T16:38:37.000Z
|
2020-11-20T16:38:37.000Z
|
import numpy as np
from copy import copy
from rlcard.games.leducholdem.dealer import LeducholdemDealer as Dealer
from rlcard.games.leducholdem.player import LeducholdemPlayer as Player
from rlcard.games.leducholdem.judger import LeducholdemJudger as Judger
from rlcard.games.leducholdem.round import LeducholdemRound as Round
from rlcard.games.limitholdem.game import LimitholdemGame
class LeducholdemGame(LimitholdemGame):
def __init__(self, allow_step_back=False):
''' Initialize the class leducholdem Game
'''
self.allow_step_back = allow_step_back
''' No big/small blind
# Some configarations of the game
# These arguments are fixed in Leduc Hold'em Game
# Raise amount and allowed times
self.raise_amount = 2
self.allowed_raise_num = 2
self.num_players = 2
'''
# Some configarations of the game
# These arguments can be specified for creating new games
# Small blind and big blind
self.small_blind = 1
self.big_blind = 2 * self.small_blind
# Raise amount and allowed times
self.raise_amount = self.big_blind
self.allowed_raise_num = 2
self.num_players = 2
def init_game(self):
''' Initialilze the game of Limit Texas Hold'em
This version supports two-player limit texas hold'em
Returns:
(tuple): Tuple containing:
(dict): The first state of the game
(int): Current player's id
'''
# Initilize a dealer that can deal cards
self.dealer = Dealer()
# Initilize two players to play the game
self.players = [Player(i) for i in range(self.num_players)]
# Initialize a judger class which will decide who wins in the end
self.judger = Judger()
# Prepare for the first round
for i in range(self.num_players):
self.players[i].hand = self.dealer.deal_card()
# Randomly choose a small blind and a big blind
s = np.random.randint(0, self.num_players)
b = (s + 1) % self.num_players
self.players[b].in_chips = self.big_blind
self.players[s].in_chips = self.small_blind
self.public_card = None
# The player with small blind plays the first
self.game_pointer = s
# Initilize a bidding round, in the first round, the big blind and the small blind needs to
# be passed to the round for processing.
self.round = Round(raise_amount=self.raise_amount,
allowed_raise_num=self.allowed_raise_num,
num_players=self.num_players)
self.round.start_new_round(game_pointer=self.game_pointer, raised=[p.in_chips for p in self.players])
# Count the round. There are 2 rounds in each game.
self.round_counter = 0
# Save the hisory for stepping back to the last state.
self.history = []
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def step(self, action):
''' Get the next state
Args:
action (str): a specific action. (call, raise, fold, or check)
Returns:
(tuple): Tuple containing:
(dict): next player's state
(int): next plater's id
'''
if self.allow_step_back:
# First snapshot the current state
r = copy(self.round)
r_raised = copy(self.round.raised)
gp = self.game_pointer
r_c = self.round_counter
d_deck = copy(self.dealer.deck)
p = copy(self.public_card)
ps = [copy(self.players[i]) for i in range(self.num_players)]
ps_hand = [copy(self.players[i].hand) for i in range(self.num_players)]
self.history.append((r, r_raised, gp, r_c, d_deck, p, ps, ps_hand))
# Then we proceed to the next round
self.game_pointer = self.round.proceed_round(self.players, action)
# If a round is over, we deal more public cards
if self.round.is_over():
# For the first round, we deal 1 card as public card. Double the raise amount for the second round
if self.round_counter == 0:
self.public_card = self.dealer.deal_card()
self.round.raise_amount = 2 * self.raise_amount
self.round_counter += 1
self.round.start_new_round(self.game_pointer)
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def get_state(self, player):
''' Return player's state
Args:
player_id (int): player id
Returns:
(dict): The state of the player
'''
chips = [self.players[i].in_chips for i in range(self.num_players)]
legal_actions = self.get_legal_actions()
state = self.players[player].get_state(self.public_card, chips, legal_actions)
state['current_player'] = self.game_pointer
return state
def is_over(self):
''' Check if the game is over
Returns:
(boolean): True if the game is over
'''
alive_players = [1 if p.status=='alive' else 0 for p in self.players]
# If only one player is alive, the game is over.
if sum(alive_players) == 1:
return True
# If all rounds are finshed
if self.round_counter >= 2:
return True
return False
def get_payoffs(self):
''' Return the payoffs of the game
Returns:
(list): Each entry corresponds to the payoff of one player
'''
chips_payoffs = self.judger.judge_game(self.players, self.public_card)
payoffs = np.array(chips_payoffs) / (self.big_blind)
return payoffs
def step_back(self):
''' Return to the previous state of the game
Returns:
(bool): True if the game steps back successfully
'''
if len(self.history) > 0:
self.round, r_raised, self.game_pointer, self.round_counter, d_deck, self.public_card, self.players, ps_hand = self.history.pop()
self.round.raised = r_raised
self.dealer.deck = d_deck
for i, hand in enumerate(ps_hand):
self.players[i].hand = hand
return True
return False
# Test the game
#if __name__ == "__main__":
# game = LeducholdemGame(allow_step_back=True)
# while True:
# print('New Game')
# state, game_pointer = game.init_game()
# print(game_pointer, state)
# i = 1
# while not game.is_over():
# i += 1
# legal_actions = game.get_legal_actions()
# if i == 4:
# print('Step back')
# print(game.step_back())
# game_pointer = game.get_player_id()
# print(game_pointer)
# state = game.get_state(game_pointer)
# legal_actions = game.get_legal_actions()
# # action = input()
# action = np.random.choice(legal_actions)
# print(game_pointer, action, legal_actions, state)
# state, game_pointer = game.step(action)
# print(game_pointer, state)
#
# print(game.get_payoffs())
| 34.237209
| 141
| 0.603586
|
import numpy as np
from copy import copy
from rlcard.games.leducholdem.dealer import LeducholdemDealer as Dealer
from rlcard.games.leducholdem.player import LeducholdemPlayer as Player
from rlcard.games.leducholdem.judger import LeducholdemJudger as Judger
from rlcard.games.leducholdem.round import LeducholdemRound as Round
from rlcard.games.limitholdem.game import LimitholdemGame
class LeducholdemGame(LimitholdemGame):
def __init__(self, allow_step_back=False):
self.allow_step_back = allow_step_back
self.small_blind = 1
self.big_blind = 2 * self.small_blind
self.raise_amount = self.big_blind
self.allowed_raise_num = 2
self.num_players = 2
def init_game(self):
self.dealer = Dealer()
self.players = [Player(i) for i in range(self.num_players)]
self.judger = Judger()
for i in range(self.num_players):
self.players[i].hand = self.dealer.deal_card()
s = np.random.randint(0, self.num_players)
b = (s + 1) % self.num_players
self.players[b].in_chips = self.big_blind
self.players[s].in_chips = self.small_blind
self.public_card = None
self.game_pointer = s
self.round = Round(raise_amount=self.raise_amount,
allowed_raise_num=self.allowed_raise_num,
num_players=self.num_players)
self.round.start_new_round(game_pointer=self.game_pointer, raised=[p.in_chips for p in self.players])
self.round_counter = 0
self.history = []
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def step(self, action):
if self.allow_step_back:
r = copy(self.round)
r_raised = copy(self.round.raised)
gp = self.game_pointer
r_c = self.round_counter
d_deck = copy(self.dealer.deck)
p = copy(self.public_card)
ps = [copy(self.players[i]) for i in range(self.num_players)]
ps_hand = [copy(self.players[i].hand) for i in range(self.num_players)]
self.history.append((r, r_raised, gp, r_c, d_deck, p, ps, ps_hand))
self.game_pointer = self.round.proceed_round(self.players, action)
if self.round.is_over():
if self.round_counter == 0:
self.public_card = self.dealer.deal_card()
self.round.raise_amount = 2 * self.raise_amount
self.round_counter += 1
self.round.start_new_round(self.game_pointer)
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def get_state(self, player):
chips = [self.players[i].in_chips for i in range(self.num_players)]
legal_actions = self.get_legal_actions()
state = self.players[player].get_state(self.public_card, chips, legal_actions)
state['current_player'] = self.game_pointer
return state
def is_over(self):
alive_players = [1 if p.status=='alive' else 0 for p in self.players]
if sum(alive_players) == 1:
return True
if self.round_counter >= 2:
return True
return False
def get_payoffs(self):
chips_payoffs = self.judger.judge_game(self.players, self.public_card)
payoffs = np.array(chips_payoffs) / (self.big_blind)
return payoffs
def step_back(self):
if len(self.history) > 0:
self.round, r_raised, self.game_pointer, self.round_counter, d_deck, self.public_card, self.players, ps_hand = self.history.pop()
self.round.raised = r_raised
self.dealer.deck = d_deck
for i, hand in enumerate(ps_hand):
self.players[i].hand = hand
return True
return False
| true
| true
|
f7148a75a4e4a85ae3bd6f491ecb0aa85bbc5afc
| 471
|
py
|
Python
|
Python/Functions_base/Functions/replace_ElecNaming.py
|
DanielHuji-RB/RB-article
|
e5a9ba30edfb030db1cd3bcf562c6abff3f9d48e
|
[
"MIT"
] | null | null | null |
Python/Functions_base/Functions/replace_ElecNaming.py
|
DanielHuji-RB/RB-article
|
e5a9ba30edfb030db1cd3bcf562c6abff3f9d48e
|
[
"MIT"
] | null | null | null |
Python/Functions_base/Functions/replace_ElecNaming.py
|
DanielHuji-RB/RB-article
|
e5a9ba30edfb030db1cd3bcf562c6abff3f9d48e
|
[
"MIT"
] | null | null | null |
#Daniel Sand
import pandas as pd
import numpy as np
fileName='/Tscores.csv'
newFileName='/Tscores_v3.csv'
df=pd.read_csv(fileName, sep=',')
#6 differnt electordes
oldFormat=['0-1','0-2','0-3','2-Jan','3-Jan','3-Feb']
newFormat=['0_1','0_2','0_3','2_1','3_1','3_2']
for iCont in range(0, len(oldFormat)):
currElec_old = oldFormat[iCont]
currElec_new = newFormat[iCont]
df.loc[df.Elec==currElec_old,'Elec']=currElec_new
df.to_csv(path_or_buf=newFileName)
| 22.428571
| 53
| 0.696391
|
import pandas as pd
import numpy as np
fileName='/Tscores.csv'
newFileName='/Tscores_v3.csv'
df=pd.read_csv(fileName, sep=',')
oldFormat=['0-1','0-2','0-3','2-Jan','3-Jan','3-Feb']
newFormat=['0_1','0_2','0_3','2_1','3_1','3_2']
for iCont in range(0, len(oldFormat)):
currElec_old = oldFormat[iCont]
currElec_new = newFormat[iCont]
df.loc[df.Elec==currElec_old,'Elec']=currElec_new
df.to_csv(path_or_buf=newFileName)
| true
| true
|
f7148acd3d12984fc7698af4459254aa88540a51
| 188
|
py
|
Python
|
setup.py
|
jjc2718/mpmp
|
9960d8d3e20e4fc9319e5420e083fece5bfb3d9e
|
[
"BSD-3-Clause"
] | 1
|
2021-11-02T05:47:38.000Z
|
2021-11-02T05:47:38.000Z
|
setup.py
|
jjc2718/mpmp
|
9960d8d3e20e4fc9319e5420e083fece5bfb3d9e
|
[
"BSD-3-Clause"
] | 63
|
2020-12-03T23:55:55.000Z
|
2022-03-29T17:55:29.000Z
|
setup.py
|
jjc2718/mpmp
|
9960d8d3e20e4fc9319e5420e083fece5bfb3d9e
|
[
"BSD-3-Clause"
] | 3
|
2020-12-01T18:50:00.000Z
|
2022-02-18T12:32:38.000Z
|
from setuptools import setup
setup(
name='mpmp',
author='Jake Crawford',
version='0.0.1',
description='Multimodal Pan-cancer Mutation Prediction',
packages=['mpmp']
)
| 18.8
| 60
| 0.670213
|
from setuptools import setup
setup(
name='mpmp',
author='Jake Crawford',
version='0.0.1',
description='Multimodal Pan-cancer Mutation Prediction',
packages=['mpmp']
)
| true
| true
|
f7148c067979d0d62792963a826a42a813df0ab1
| 8,199
|
py
|
Python
|
ludwig/features/image_feature.py
|
ThinkBigAnalytics/ludwig
|
0a3159af4cc91f57251f3dec0cdb863c7003cf00
|
[
"Apache-2.0"
] | 1
|
2019-07-31T19:11:02.000Z
|
2019-07-31T19:11:02.000Z
|
ludwig/features/image_feature.py
|
ThinkBigAnalytics/ludwig
|
0a3159af4cc91f57251f3dec0cdb863c7003cf00
|
[
"Apache-2.0"
] | null | null | null |
ludwig/features/image_feature.py
|
ThinkBigAnalytics/ludwig
|
0a3159af4cc91f57251f3dec0cdb863c7003cf00
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import os
import h5py
import numpy as np
import tensorflow as tf
from skimage.io import imread
from ludwig.constants import *
from ludwig.features.base_feature import BaseFeature
from ludwig.features.base_feature import InputFeature
from ludwig.models.modules.image_encoders import ResNetEncoder
from ludwig.models.modules.image_encoders import Stacked2DCNN
from ludwig.utils.image_utils import resize_image
from ludwig.utils.misc import get_from_registry
from ludwig.utils.misc import set_default_value
class ImageBaseFeature(BaseFeature):
def __init__(self, feature):
super().__init__(feature)
self.type = IMAGE
preprocessing_defaults = {
'missing_value_strategy': BACKFILL,
'in_memory': True,
'resize_method': 'crop_or_pad'
}
@staticmethod
def get_feature_meta(column, preprocessing_parameters):
return {
'preprocessing': preprocessing_parameters
}
@staticmethod
def add_feature_data(
feature,
dataset_df,
data,
metadata,
preprocessing_parameters
):
set_default_value(
feature,
'in_memory',
preprocessing_parameters['in_memory']
)
if ('height' in preprocessing_parameters or
'width' in preprocessing_parameters):
should_resize = True
try:
provided_height = int(preprocessing_parameters[HEIGHT])
provided_width = int(preprocessing_parameters[WIDTH])
except ValueError as e:
raise ValueError(
'Image height and width must be set and have '
'positive integer values: ' + str(e)
)
if (provided_height <= 0 or provided_width <= 0):
raise ValueError(
'Image height and width must be positive integers'
)
else:
should_resize = False
csv_path = os.path.dirname(os.path.abspath(dataset_df.csv))
num_images = len(dataset_df)
height = 0
width = 0
num_channels = 1
if num_images > 0:
# here if a width and height have not been specified
# we assume that all images have the same wifth and im_height
# thus the width and height of the first one are the same
# of all the other ones
first_image = imread(
os.path.join(csv_path, dataset_df[feature['name']][0])
)
height = first_image.shape[0]
width = first_image.shape[1]
if first_image.ndim == 2:
num_channels = 1
else:
num_channels = first_image.shape[2]
if should_resize:
height = provided_height
width = provided_width
metadata[feature['name']]['preprocessing']['height'] = height
metadata[feature['name']]['preprocessing']['width'] = width
metadata[feature['name']]['preprocessing'][
'num_channels'] = num_channels
if feature['in_memory']:
data[feature['name']] = np.empty(
(num_images, height, width, num_channels),
dtype=np.int8
)
for i in range(len(dataset_df)):
filename = os.path.join(
csv_path,
dataset_df[feature['name']][i]
)
img = imread(filename)
if img.ndim == 2:
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(
img,
(height, width),
preprocessing_parameters['resize_method']
)
data[feature['name']][i, :, :, :] = img
else:
data_fp = os.path.splitext(dataset_df.csv)[0] + '.hdf5'
mode = 'w'
if os.path.isfile(data_fp):
mode = 'r+'
with h5py.File(data_fp, mode) as h5_file:
image_dataset = h5_file.create_dataset(
feature['name'] + '_data',
(num_images, height, width, num_channels),
dtype=np.uint8
)
for i in range(len(dataset_df)):
filename = os.path.join(
csv_path,
dataset_df[feature['name']][i]
)
img = imread(filename)
if img.ndim == 2:
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(
img,
(height, width),
preprocessing_parameters['resize_method'],
)
image_dataset[i, :height, :width, :] = img
data[feature['name']] = np.arange(num_images)
class ImageInputFeature(ImageBaseFeature, InputFeature):
def __init__(self, feature):
super().__init__(feature)
self.height = 0
self.width = 0
self.num_channels = 0
self.in_memory = True
self.data_hdf5_fp = ''
self.encoder = 'stacked_cnn'
encoder_parameters = self.overwrite_defaults(feature)
self.encoder_obj = self.get_image_encoder(encoder_parameters)
def get_image_encoder(self, encoder_parameters):
return get_from_registry(
self.encoder, image_encoder_registry)(
**encoder_parameters
)
def _get_input_placeholder(self):
# None dimension is for dealing with variable batch size
return tf.placeholder(
tf.float32,
shape=[None, self.height, self.width, self.num_channels],
name=self.name,
)
def build_input(
self,
regularizer,
dropout_rate,
is_training=False,
**kwargs
):
placeholder = self._get_input_placeholder()
logging.debug(' targets_placeholder: {0}'.format(placeholder))
feature_representation, feature_representation_size = self.encoder_obj(
placeholder,
regularizer,
dropout_rate,
is_training,
)
logging.debug(
' feature_representation: {0}'.format(feature_representation)
)
feature_representation = {
'name': self.name,
'type': self.type,
'representation': feature_representation,
'size': feature_representation_size,
'placeholder': placeholder
}
return feature_representation
@staticmethod
def update_model_definition_with_metadata(
input_feature,
feature_metadata,
*args,
**kwargs
):
for dim in ['height', 'width', 'num_channels']:
input_feature[dim] = feature_metadata['preprocessing'][dim]
input_feature['data_hdf5_fp'] = (
kwargs['model_definition']['data_hdf5_fp']
)
@staticmethod
def populate_defaults(input_feature):
set_default_value(input_feature, 'tied_weights', None)
image_encoder_registry = {
'stacked_cnn': Stacked2DCNN,
'resnet': ResNetEncoder
}
| 32.796
| 80
| 0.558971
|
import logging
import os
import h5py
import numpy as np
import tensorflow as tf
from skimage.io import imread
from ludwig.constants import *
from ludwig.features.base_feature import BaseFeature
from ludwig.features.base_feature import InputFeature
from ludwig.models.modules.image_encoders import ResNetEncoder
from ludwig.models.modules.image_encoders import Stacked2DCNN
from ludwig.utils.image_utils import resize_image
from ludwig.utils.misc import get_from_registry
from ludwig.utils.misc import set_default_value
class ImageBaseFeature(BaseFeature):
def __init__(self, feature):
super().__init__(feature)
self.type = IMAGE
preprocessing_defaults = {
'missing_value_strategy': BACKFILL,
'in_memory': True,
'resize_method': 'crop_or_pad'
}
@staticmethod
def get_feature_meta(column, preprocessing_parameters):
return {
'preprocessing': preprocessing_parameters
}
@staticmethod
def add_feature_data(
feature,
dataset_df,
data,
metadata,
preprocessing_parameters
):
set_default_value(
feature,
'in_memory',
preprocessing_parameters['in_memory']
)
if ('height' in preprocessing_parameters or
'width' in preprocessing_parameters):
should_resize = True
try:
provided_height = int(preprocessing_parameters[HEIGHT])
provided_width = int(preprocessing_parameters[WIDTH])
except ValueError as e:
raise ValueError(
'Image height and width must be set and have '
'positive integer values: ' + str(e)
)
if (provided_height <= 0 or provided_width <= 0):
raise ValueError(
'Image height and width must be positive integers'
)
else:
should_resize = False
csv_path = os.path.dirname(os.path.abspath(dataset_df.csv))
num_images = len(dataset_df)
height = 0
width = 0
num_channels = 1
if num_images > 0:
first_image = imread(
os.path.join(csv_path, dataset_df[feature['name']][0])
)
height = first_image.shape[0]
width = first_image.shape[1]
if first_image.ndim == 2:
num_channels = 1
else:
num_channels = first_image.shape[2]
if should_resize:
height = provided_height
width = provided_width
metadata[feature['name']]['preprocessing']['height'] = height
metadata[feature['name']]['preprocessing']['width'] = width
metadata[feature['name']]['preprocessing'][
'num_channels'] = num_channels
if feature['in_memory']:
data[feature['name']] = np.empty(
(num_images, height, width, num_channels),
dtype=np.int8
)
for i in range(len(dataset_df)):
filename = os.path.join(
csv_path,
dataset_df[feature['name']][i]
)
img = imread(filename)
if img.ndim == 2:
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(
img,
(height, width),
preprocessing_parameters['resize_method']
)
data[feature['name']][i, :, :, :] = img
else:
data_fp = os.path.splitext(dataset_df.csv)[0] + '.hdf5'
mode = 'w'
if os.path.isfile(data_fp):
mode = 'r+'
with h5py.File(data_fp, mode) as h5_file:
image_dataset = h5_file.create_dataset(
feature['name'] + '_data',
(num_images, height, width, num_channels),
dtype=np.uint8
)
for i in range(len(dataset_df)):
filename = os.path.join(
csv_path,
dataset_df[feature['name']][i]
)
img = imread(filename)
if img.ndim == 2:
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(
img,
(height, width),
preprocessing_parameters['resize_method'],
)
image_dataset[i, :height, :width, :] = img
data[feature['name']] = np.arange(num_images)
class ImageInputFeature(ImageBaseFeature, InputFeature):
def __init__(self, feature):
super().__init__(feature)
self.height = 0
self.width = 0
self.num_channels = 0
self.in_memory = True
self.data_hdf5_fp = ''
self.encoder = 'stacked_cnn'
encoder_parameters = self.overwrite_defaults(feature)
self.encoder_obj = self.get_image_encoder(encoder_parameters)
def get_image_encoder(self, encoder_parameters):
return get_from_registry(
self.encoder, image_encoder_registry)(
**encoder_parameters
)
def _get_input_placeholder(self):
return tf.placeholder(
tf.float32,
shape=[None, self.height, self.width, self.num_channels],
name=self.name,
)
def build_input(
self,
regularizer,
dropout_rate,
is_training=False,
**kwargs
):
placeholder = self._get_input_placeholder()
logging.debug(' targets_placeholder: {0}'.format(placeholder))
feature_representation, feature_representation_size = self.encoder_obj(
placeholder,
regularizer,
dropout_rate,
is_training,
)
logging.debug(
' feature_representation: {0}'.format(feature_representation)
)
feature_representation = {
'name': self.name,
'type': self.type,
'representation': feature_representation,
'size': feature_representation_size,
'placeholder': placeholder
}
return feature_representation
@staticmethod
def update_model_definition_with_metadata(
input_feature,
feature_metadata,
*args,
**kwargs
):
for dim in ['height', 'width', 'num_channels']:
input_feature[dim] = feature_metadata['preprocessing'][dim]
input_feature['data_hdf5_fp'] = (
kwargs['model_definition']['data_hdf5_fp']
)
@staticmethod
def populate_defaults(input_feature):
set_default_value(input_feature, 'tied_weights', None)
image_encoder_registry = {
'stacked_cnn': Stacked2DCNN,
'resnet': ResNetEncoder
}
| true
| true
|
f7148ccd4e901a2e9e3c5c5b644f2f81ee5e045c
| 133,646
|
py
|
Python
|
tensorflow/python/keras/layers/convolutional.py
|
devinlife/tensorflow
|
1445444c15a396410f25ae91b7d1c19d724e2afc
|
[
"Apache-2.0"
] | 8
|
2020-07-29T18:50:45.000Z
|
2021-07-25T07:06:43.000Z
|
tensorflow/python/keras/layers/convolutional.py
|
devinlife/tensorflow
|
1445444c15a396410f25ae91b7d1c19d724e2afc
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/keras/layers/convolutional.py
|
devinlife/tensorflow
|
1445444c15a396410f25ae91b7d1c19d724e2afc
|
[
"Apache-2.0"
] | 11
|
2020-05-31T13:14:56.000Z
|
2021-12-14T04:39:25.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras convolution layers and image transformation layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=g-classes-have-attributes
class Conv(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self, rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
if filters is not None and not isinstance(filters, int):
filters = int(filters)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
if not all(self.kernel_size):
raise ValueError('The argument `kernel_size` cannot contain 0(s). '
'Received: %s' % (kernel_size,))
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if (self.padding == 'causal' and not isinstance(self,
(Conv1D, SeparableConv1D))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and ``SeparableConv1D`.')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
kernel_shape = self.kernel_size + (input_channel, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_channel})
self._build_conv_op_input_shape = input_shape
self._build_input_channel = input_channel
self._padding_op = self._get_padding_op()
self._conv_op_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
self.built = True
def call(self, inputs):
if self._recreate_conv_op(inputs):
self._convolution_op = nn_ops.Convolution(
inputs.get_shape(),
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
self._build_conv_op_input_shape = inputs.get_shape()
# Apply causal padding to inputs for Conv1D.
if self.padding == 'causal' and self.__class__.__name__ == 'Conv1D':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def _spatial_output_shape(self, spatial_input_shape):
return [
conv_utils.conv_output_length(
length,
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
for i, length in enumerate(spatial_input_shape)
]
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape(
[input_shape[0]] + self._spatial_output_shape(input_shape[1:-1]) +
[self.filters])
else:
return tensor_shape.TensorShape(
[input_shape[0], self.filters] +
self._spatial_output_shape(input_shape[2:]))
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return 1
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
def _recreate_conv_op(self, inputs):
"""Recreate conv_op if necessary.
Check if the input_shape in call() is different from that in build().
If the most-specific input shape describing the build and call shapes is not
equal to the shape we currently built with, then we need to rebuild the
_convolution_op to avoid incorrect behavior.
Args:
inputs: The input data to call() method.
Returns:
`True` or `False` to indicate whether to recreate the conv_op.
"""
call_input_shape = inputs.get_shape()
# If the most specific compatible shape between _build_input_shape and
# call_input_shape is not _build_input_shape then we must re-build.
return self._build_conv_op_input_shape.most_specific_compatible_shape(
call_input_shape) != self._build_conv_op_input_shape
@keras_export('keras.layers.Conv1D', 'keras.layers.Convolution1D')
class Conv1D(Conv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Examples:
>>> # The inputs are 128-length vectors with 10 timesteps, and the batch size
>>> # is 4.
>>> input_shape = (4, 10, 128)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv1D(
... 32, 3, activation='relu',input_shape=input_shape)(x)
>>> print(y.shape)
(4, 8, 32)
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
`"causal"` results in causal (dilated) convolutions, e.g. `output[t]`
does not depend on `input[t+1:]`. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (
see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
Returns:
A tensor of rank 3 representing
`activation(conv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv2D', 'keras.layers.Convolution2D')
class Conv2D(Conv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Examples:
>>> # The inputs are 28x28 RGB images with `channels_last` and the batch
>>> # size is 4.
>>> input_shape = (4, 28, 28, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv2D(
... 2, 3, activation='relu', input_shape=input_shape)(x)
>>> print(y.shape)
(4, 26, 26, 2)
>>> # With `dilation_rate` as 2.
>>> input_shape = (4, 28, 28, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv2D(
... 2, 3, activation='relu', dilation_rate=2, input_shape=input_shape)(x)
>>> print(y.shape)
(4, 24, 24, 2)
>>> # With `padding` as "same".
>>> input_shape = (4, 28, 28, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv2D(
... 2, 3, activation='relu', padding="same", input_shape=input_shape)(x)
>>> print(y.shape)
(4, 28, 28, 2)
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (
see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`(batch_size, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
Returns:
A tensor of rank 4 representing
`activation(conv2d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv3D', 'keras.layers.Convolution3D')
class Conv3D(Conv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Examples:
>>> # The inputs are 28x28x28 volumes with a single channel, and the
>>> # batch size is 4
>>> input_shape =(4, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape)(x)
>>> print(y.shape)
(4, 26, 26, 26, 2)
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along each spatial
dimension.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (
see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (
see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
5D tensor with shape:
`(batch_size, channels, conv_dim1, conv_dim2, conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch_size, conv_dim1, conv_dim2, conv_dim3, channels)` if
data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch_size, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch_size, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
Returns:
A tensor of rank 5 representing
`activation(conv3d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3D, self).__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv1DTranspose',
'keras.layers.Convolution1DTranspose')
class Conv1DTranspose(Conv1D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 3)` for data with 128 time steps and 3 channels.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer length of the 1D convolution window.
strides: An integer specifying the stride of the convolution along the
time dimension. Specifying a stride value != 1 is incompatible with
specifying a `dilation_rate` value != 1. Defaults to 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer specifying the amount of padding along
the time dimension of the output tensor.
The amount of output padding must be lower than the stride.
If set to `None` (default), the output shape is inferred.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, length)`.
dilation_rate: an integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying a `dilation_rate` value != 1 is
incompatible with specifying a stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
3D tensor with shape:
`(batch_size, steps, channels)`
Output shape:
3D tensor with shape:
`(batch_size, new_steps, filters)`
If `output_padding` is specified:
```
new_timesteps = ((timesteps - 1) * strides + kernel_size -
2 * padding + output_padding)
```
Returns:
A tensor of rank 3 representing
`activation(conv1dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep learning](
https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 1, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 3:
raise ValueError('Inputs should have rank 3. Received input shape: ' +
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=3, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
t_axis = 2
else:
t_axis = 1
length = inputs_shape[t_axis]
if self.output_padding is None:
output_padding = None
else:
output_padding = self.output_padding[0]
# Infer the dynamic output shape:
out_length = conv_utils.deconv_output_length(
length, self.kernel_size[0], padding=self.padding,
output_padding=output_padding, stride=self.strides[0],
dilation=self.dilation_rate[0])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_length)
else:
output_shape = (batch_size, out_length, self.filters)
data_format = conv_utils.convert_data_format(self.data_format, ndim=3)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn_ops.conv1d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding.upper(),
data_format=data_format,
dilations=self.dilation_rate)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, t_axis = 1, 2
else:
c_axis, t_axis = 2, 1
if self.output_padding is None:
output_padding = None
else:
output_padding = self.output_padding[0]
output_shape[c_axis] = self.filters
output_shape[t_axis] = conv_utils.deconv_output_length(
output_shape[t_axis],
self.kernel_size[0],
padding=self.padding,
output_padding=output_padding,
stride=self.strides[0],
dilation=self.dilation_rate[0])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv1DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
@keras_export('keras.layers.Conv2DTranspose',
'keras.layers.Convolution2DTranspose')
class Conv2DTranspose(Conv2D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`(batch_size, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
If `output_padding` is specified:
```
new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
output_padding[0])
new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
output_padding[1])
```
Returns:
A tensor of rank 4 representing
`activation(conv2dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 2, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input shape: ' +
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
# Use the constant height and weight when possible.
# TODO(scottzhu): Extract this into a utility function that can be applied
# to all convolutional layers, which currently lost the static shape
# information due to tf.shape().
height, width = None, None
if inputs.shape.rank is not None:
dims = inputs.shape.as_list()
height = dims[h_axis]
width = dims[w_axis]
height = height if height is not None else inputs_shape[h_axis]
width = width if width is not None else inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = array_ops.stack(output_shape)
outputs = backend.conv2d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
@keras_export('keras.layers.Conv3DTranspose',
'keras.layers.Convolution3DTranspose')
class Conv3DTranspose(Conv3D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth, height
and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 3 integers,
specifying the amount of padding along the depth, height, and
width.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, depth, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (
see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (
see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
5D tensor with shape:
`(batch_size, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch_size, depth, rows, cols, channels)` if data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch_size, filters, new_depth, new_rows, new_cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch_size, new_depth, new_rows, new_cols, filters)` if
data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
If `output_padding` is specified::
```
new_depth = ((depth - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
output_padding[0])
new_rows = ((rows - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
output_padding[1])
new_cols = ((cols - 1) * strides[2] + kernel_size[2] - 2 * padding[2] +
output_padding[2])
```
Returns:
A tensor of rank 5 representing
`activation(conv3dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 3, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 5:
raise ValueError('Inputs should have rank 5, received input shape:',
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined, found None: ' + str(input_shape))
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim})
self.kernel = self.add_weight(
'kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
d_axis, h_axis, w_axis = 2, 3, 4
else:
d_axis, h_axis, w_axis = 1, 2, 3
depth = inputs_shape[d_axis]
height = inputs_shape[h_axis]
width = inputs_shape[w_axis]
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_depth = conv_utils.deconv_output_length(depth,
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_depth, out_height,
out_width)
strides = (1, 1, stride_d, stride_h, stride_w)
else:
output_shape = (batch_size, out_depth, out_height, out_width,
self.filters)
strides = (1, stride_d, stride_h, stride_w, 1)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn.conv3d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides,
data_format=conv_utils.convert_data_format(self.data_format, ndim=5),
padding=self.padding.upper())
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4
else:
c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[d_axis] = conv_utils.deconv_output_length(
output_shape[d_axis],
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv3DTranspose, self).get_config()
config.pop('dilation_rate')
config['output_padding'] = self.output_padding
return config
class SeparableConv(Conv):
"""Abstract base layer for separable nD convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv, self).__init__(
rank=rank,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
bias_initializer=initializers.get(bias_initializer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.pointwise_initializer = initializers.get(pointwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
depthwise_kernel_shape = self.kernel_size + (input_dim,
self.depth_multiplier)
pointwise_kernel_shape = (
1,) * self.rank + (self.depth_multiplier * input_dim, self.filters)
self.depthwise_kernel = self.add_weight(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype)
self.pointwise_kernel = self.add_weight(
name='pointwise_kernel',
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
constraint=self.pointwise_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'depth_multiplier':
self.depth_multiplier,
'dilation_rate':
self.dilation_rate,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'depthwise_initializer':
initializers.serialize(self.depthwise_initializer),
'pointwise_initializer':
initializers.serialize(self.pointwise_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'depthwise_regularizer':
regularizers.serialize(self.depthwise_regularizer),
'pointwise_regularizer':
regularizers.serialize(self.pointwise_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint':
constraints.serialize(self.depthwise_constraint),
'pointwise_constraint':
constraints.serialize(self.pointwise_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SeparableConv1D',
'keras.layers.SeparableConvolution1D')
class SeparableConv1D(SeparableConv):
"""Depthwise separable 1D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel (
see `keras.initializers`).
pointwise_initializer: An initializer for the pointwise convolution kernel (
see `keras.initializers`).
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used (see `keras.initializers`).
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel (see `keras.regularizers`).
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel (see `keras.regularizers`).
bias_regularizer: Optional regularizer for the bias vector (
see `keras.regularizers`).
activity_regularizer: Optional regularizer function for the output (
see `keras.regularizers`).
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training (
see `keras.constraints`).
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer` (
see `keras.constraints`).
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer` (
see `keras.constraints`).
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
Input shape:
3D tensor with shape:
`(batch_size, channels, steps)` if data_format='channels_first'
or 5D tensor with shape:
`(batch_size, steps, channels)` if data_format='channels_last'.
Output shape:
3D tensor with shape:
`(batch_size, filters, new_steps)` if data_format='channels_first'
or 3D tensor with shape:
`(batch_size, new_steps, filters)` if data_format='channels_last'.
`new_steps` value might have changed due to padding or strides.
Returns:
A tensor of rank 3 representing
`activation(separableconv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.padding == 'causal':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
if self.data_format == 'channels_last':
strides = (1,) + self.strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + self.strides * 2
spatial_start_dim = 2
# Explicitly broadcast inputs and kernels to 4D.
# TODO(fchollet): refactor when a native separable_conv1d op is available.
inputs = array_ops.expand_dims(inputs, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(self.pointwise_kernel, 0)
dilation_rate = (1,) + self.dilation_rate
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
outputs = nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=op_padding.upper(),
rate=dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
outputs = array_ops.squeeze(outputs, [spatial_start_dim])
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.SeparableConv2D',
'keras.layers.SeparableConvolution2D')
class SeparableConv2D(SeparableConv):
"""Depthwise separable 2D convolution.
Separable convolutions consist of first performing
a depthwise spatial convolution
(which acts on each input channel separately)
followed by a pointwise convolution which mixes the resulting
output channels. The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Intuitively, separable convolutions can be understood as
a way to factorize a convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix (
see `keras.initializers`).
pointwise_initializer: Initializer for the pointwise kernel matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix (see `keras.regularizers`).
pointwise_regularizer: Regularizer function applied to
the pointwise kernel matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (
see `keras.regularizers`).
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix (
see `keras.constraints`).
pointwise_constraint: Constraint function applied to
the pointwise kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`(batch_size, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
Returns:
A tensor of rank 4 representing
`activation(separableconv2d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
# Apply the actual ops.
if self.data_format == 'channels_last':
strides = (1,) + self.strides + (1,)
else:
strides = (1, 1) + self.strides
outputs = nn.separable_conv2d(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=strides,
padding=self.padding.upper(),
rate=self.dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.DepthwiseConv2D')
class DepthwiseConv2D(Conv2D):
"""Depthwise separable 2D convolution.
Depthwise Separable convolutions consist of performing
just the first step in a depthwise spatial convolution
(which acts on each input channel separately).
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Arguments:
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation') (
see `keras.regularizers`).
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`[batch_size, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch_size, rows, cols, channels]` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`[batch_size, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch_size, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
Returns:
A tensor of rank 4 representing
`activation(depthwiseconv2d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0],
self.kernel_size[1],
input_dim,
self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = backend.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.use_bias:
outputs = backend.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding,
self.strides[0],
self.dilation_rate[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding,
self.strides[1],
self.dilation_rate[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
@keras_export('keras.layers.UpSampling1D')
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Examples:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = tf.keras.layers.UpSampling1D(size=2)(x)
>>> print(y)
tf.Tensor(
[[[ 0 1 2]
[ 0 1 2]
[ 3 4 5]
[ 3 4 5]]
[[ 6 7 8]
[ 6 7 8]
[ 9 10 11]
[ 9 10 11]]], shape=(2, 4, 3), dtype=int64)
Arguments:
size: Integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super(UpSampling1D, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
output = backend.repeat_elements(inputs, self.size, axis=1)
return output
def get_config(self):
config = {'size': self.size}
base_config = super(UpSampling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling2D')
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by `size[0]` and `size[1]` respectively.
Examples:
>>> input_shape = (2, 2, 1, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[[ 0 1 2]]
[[ 3 4 5]]]
[[[ 6 7 8]]
[[ 9 10 11]]]]
>>> y = tf.keras.layers.UpSampling2D(size=(1, 2))(x)
>>> print(y)
tf.Tensor(
[[[[ 0 1 2]
[ 0 1 2]]
[[ 3 4 5]
[ 3 4 5]]]
[[[ 6 7 8]
[ 6 7 8]]
[[ 9 10 11]
[ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64)
Arguments:
size: Int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
interpolation: A string, one of `nearest` or `bilinear`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(self,
size=(2, 2),
data_format=None,
interpolation='nearest',
**kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
if interpolation not in {'nearest', 'bilinear'}:
raise ValueError('`interpolation` argument should be one of `"nearest"` '
'or `"bilinear"`.')
self.interpolation = interpolation
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return backend.resize_images(
inputs, self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation)
def get_config(self):
config = {
'size': self.size,
'data_format': self.data_format,
'interpolation': self.interpolation
}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling3D')
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by `size[0]`, `size[1]` and `size[2]` respectively.
Examples:
>>> input_shape = (2, 1, 2, 1, 3)
>>> x = tf.constant(1, shape=input_shape)
>>> y = tf.keras.layers.UpSampling3D(size=2)(x)
>>> print(y.shape)
(2, 2, 4, 2, 3)
Arguments:
size: Int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 3, 'size')
self.input_spec = InputSpec(ndim=5)
super(UpSampling3D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
dim1 = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
dim2 = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
dim3 = self.size[2] * input_shape[
4] if input_shape[4] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
else:
dim1 = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
dim2 = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
dim3 = self.size[2] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.resize_volumes(
inputs, self.size[0], self.size[1], self.size[2], self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding1D')
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Examples:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = tf.keras.layers.ZeroPadding1D(padding=2)(x)
>>> print(y)
tf.Tensor(
[[[ 0 0 0]
[ 0 0 0]
[ 0 1 2]
[ 3 4 5]
[ 0 0 0]
[ 0 0 0]]
[[ 0 0 0]
[ 0 0 0]
[ 6 7 8]
[ 9 10 11]
[ 0 0 0]
[ 0 0 0]]], shape=(2, 6, 3), dtype=int64)
Arguments:
padding: Int, or tuple of int (length 2), or dictionary.
- If int:
How many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of int (length 2):
How many zeros to add at the beginning and the end of
the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch_size, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch_size, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return backend.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding2D')
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros
at the top, bottom, left and right side of an image tensor.
Examples:
>>> input_shape = (1, 1, 2, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[[0 1]
[2 3]]]]
>>> y = tf.keras.layers.ZeroPadding2D(padding=1)(x)
>>> print(y)
tf.Tensor(
[[[[0 0]
[0 0]
[0 0]
[0 0]]
[[0 0]
[0 1]
[2 3]
[0 0]]
[[0 0]
[0 0]
[0 0]
[0 0]]]], shape=(1, 3, 4, 2), dtype=int64)
Arguments:
padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, padded_rows, padded_cols)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super(ZeroPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return backend.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding3D')
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Examples:
>>> input_shape = (1, 1, 2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = tf.keras.layers.ZeroPadding3D(padding=2)(x)
>>> print(y.shape)
(1, 5, 6, 6, 3)
Arguments:
padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 3 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_padded_axis, second_padded_axis, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super(ZeroPadding3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding), (padding,
padding))
elif hasattr(padding, '__len__'):
if len(padding) != 3:
raise ValueError('`padding` should have 3 elements. '
'Found: ' + str(padding))
dim1_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
dim2_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
dim3_padding = conv_utils.normalize_tuple(padding[2], 2,
'3rd entry of padding')
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
'`padding` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_pad, right_dim1_pad),'
' (left_dim2_pad, right_dim2_pad),'
' (left_dim3_pad, right_dim2_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] + 2 * self.padding[0][0]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + 2 * self.padding[1][0]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + 2 * self.padding[2][0]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] + 2 * self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + 2 * self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + 2 * self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping1D')
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Examples:
>>> input_shape = (2, 3, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1]
[ 2 3]
[ 4 5]]
[[ 6 7]
[ 8 9]
[10 11]]]
>>> y = tf.keras.layers.Cropping1D(cropping=1)(x)
>>> print(y)
tf.Tensor(
[[[2 3]]
[[8 9]]], shape=(2, 1, 2), dtype=int64)
Arguments:
cropping: Int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided, the same value will be used for both.
Input shape:
3D tensor with shape `(batch_size, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch_size, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping2D')
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. height and width.
Examples:
>>> input_shape = (2, 28, 28, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = tf.keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x)
>>> print(y.shape)
(2, 24, 20, 3)
Arguments:
cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, cropped_rows, cropped_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, cropped_rows, cropped_cols)`
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping3D')
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g. spatial or spatio-temporal).
Examples:
>>> input_shape = (2, 28, 28, 10, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = tf.keras.layers.Cropping3D(cropping=(2, 4, 2))(x)
>>> print(y.shape)
(2, 24, 20, 6, 3)
Arguments:
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to depth, height, and width.
- If tuple of 3 ints: interpreted as two different
symmetric cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_cropped_axis, second_cropped_axis, third_cropped_axis,
depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type
-self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution1D = SeparableConv1D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Convolution3DTranspose = Conv3DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
| 40.535638
| 104
| 0.657221
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.tf_export import keras_export
class Conv(Layer):
def __init__(self, rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
if filters is not None and not isinstance(filters, int):
filters = int(filters)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
if not all(self.kernel_size):
raise ValueError('The argument `kernel_size` cannot contain 0(s). '
'Received: %s' % (kernel_size,))
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if (self.padding == 'causal' and not isinstance(self,
(Conv1D, SeparableConv1D))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and ``SeparableConv1D`.')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
kernel_shape = self.kernel_size + (input_channel, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_channel})
self._build_conv_op_input_shape = input_shape
self._build_input_channel = input_channel
self._padding_op = self._get_padding_op()
self._conv_op_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
self.built = True
def call(self, inputs):
if self._recreate_conv_op(inputs):
self._convolution_op = nn_ops.Convolution(
inputs.get_shape(),
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
self._build_conv_op_input_shape = inputs.get_shape()
if self.padding == 'causal' and self.__class__.__name__ == 'Conv1D':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def _spatial_output_shape(self, spatial_input_shape):
return [
conv_utils.conv_output_length(
length,
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
for i, length in enumerate(spatial_input_shape)
]
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape(
[input_shape[0]] + self._spatial_output_shape(input_shape[1:-1]) +
[self.filters])
else:
return tensor_shape.TensorShape(
[input_shape[0], self.filters] +
self._spatial_output_shape(input_shape[2:]))
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return 1
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
def _recreate_conv_op(self, inputs):
call_input_shape = inputs.get_shape()
return self._build_conv_op_input_shape.most_specific_compatible_shape(
call_input_shape) != self._build_conv_op_input_shape
@keras_export('keras.layers.Conv1D', 'keras.layers.Convolution1D')
class Conv1D(Conv):
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv2D', 'keras.layers.Convolution2D')
class Conv2D(Conv):
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv3D', 'keras.layers.Convolution3D')
class Conv3D(Conv):
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3D, self).__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv1DTranspose',
'keras.layers.Convolution1DTranspose')
class Conv1DTranspose(Conv1D):
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 1, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 3:
raise ValueError('Inputs should have rank 3. Received input shape: ' +
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=3, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
t_axis = 2
else:
t_axis = 1
length = inputs_shape[t_axis]
if self.output_padding is None:
output_padding = None
else:
output_padding = self.output_padding[0]
out_length = conv_utils.deconv_output_length(
length, self.kernel_size[0], padding=self.padding,
output_padding=output_padding, stride=self.strides[0],
dilation=self.dilation_rate[0])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_length)
else:
output_shape = (batch_size, out_length, self.filters)
data_format = conv_utils.convert_data_format(self.data_format, ndim=3)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn_ops.conv1d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding.upper(),
data_format=data_format,
dilations=self.dilation_rate)
if not context.executing_eagerly():
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, t_axis = 1, 2
else:
c_axis, t_axis = 2, 1
if self.output_padding is None:
output_padding = None
else:
output_padding = self.output_padding[0]
output_shape[c_axis] = self.filters
output_shape[t_axis] = conv_utils.deconv_output_length(
output_shape[t_axis],
self.kernel_size[0],
padding=self.padding,
output_padding=output_padding,
stride=self.strides[0],
dilation=self.dilation_rate[0])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv1DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
@keras_export('keras.layers.Conv2DTranspose',
'keras.layers.Convolution2DTranspose')
class Conv2DTranspose(Conv2D):
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 2, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input shape: ' +
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
height, width = None, None
if inputs.shape.rank is not None:
dims = inputs.shape.as_list()
height = dims[h_axis]
width = dims[w_axis]
height = height if height is not None else inputs_shape[h_axis]
width = width if width is not None else inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = array_ops.stack(output_shape)
outputs = backend.conv2d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if not context.executing_eagerly():
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
@keras_export('keras.layers.Conv3DTranspose',
'keras.layers.Convolution3DTranspose')
class Conv3DTranspose(Conv3D):
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 3, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 5:
raise ValueError('Inputs should have rank 5, received input shape:',
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined, found None: ' + str(input_shape))
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim})
self.kernel = self.add_weight(
'kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
d_axis, h_axis, w_axis = 2, 3, 4
else:
d_axis, h_axis, w_axis = 1, 2, 3
depth = inputs_shape[d_axis]
height = inputs_shape[h_axis]
width = inputs_shape[w_axis]
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
out_depth = conv_utils.deconv_output_length(depth,
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_depth, out_height,
out_width)
strides = (1, 1, stride_d, stride_h, stride_w)
else:
output_shape = (batch_size, out_depth, out_height, out_width,
self.filters)
strides = (1, stride_d, stride_h, stride_w, 1)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn.conv3d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides,
data_format=conv_utils.convert_data_format(self.data_format, ndim=5),
padding=self.padding.upper())
if not context.executing_eagerly():
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4
else:
c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[d_axis] = conv_utils.deconv_output_length(
output_shape[d_axis],
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv3DTranspose, self).get_config()
config.pop('dilation_rate')
config['output_padding'] = self.output_padding
return config
class SeparableConv(Conv):
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv, self).__init__(
rank=rank,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
bias_initializer=initializers.get(bias_initializer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.pointwise_initializer = initializers.get(pointwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
depthwise_kernel_shape = self.kernel_size + (input_dim,
self.depth_multiplier)
pointwise_kernel_shape = (
1,) * self.rank + (self.depth_multiplier * input_dim, self.filters)
self.depthwise_kernel = self.add_weight(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype)
self.pointwise_kernel = self.add_weight(
name='pointwise_kernel',
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
constraint=self.pointwise_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'depth_multiplier':
self.depth_multiplier,
'dilation_rate':
self.dilation_rate,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'depthwise_initializer':
initializers.serialize(self.depthwise_initializer),
'pointwise_initializer':
initializers.serialize(self.pointwise_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'depthwise_regularizer':
regularizers.serialize(self.depthwise_regularizer),
'pointwise_regularizer':
regularizers.serialize(self.pointwise_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint':
constraints.serialize(self.depthwise_constraint),
'pointwise_constraint':
constraints.serialize(self.pointwise_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SeparableConv1D',
'keras.layers.SeparableConvolution1D')
class SeparableConv1D(SeparableConv):
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.padding == 'causal':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
if self.data_format == 'channels_last':
strides = (1,) + self.strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + self.strides * 2
spatial_start_dim = 2
inputs = array_ops.expand_dims(inputs, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(self.pointwise_kernel, 0)
dilation_rate = (1,) + self.dilation_rate
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
outputs = nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=op_padding.upper(),
rate=dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
outputs = array_ops.squeeze(outputs, [spatial_start_dim])
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.SeparableConv2D',
'keras.layers.SeparableConvolution2D')
class SeparableConv2D(SeparableConv):
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.data_format == 'channels_last':
strides = (1,) + self.strides + (1,)
else:
strides = (1, 1) + self.strides
outputs = nn.separable_conv2d(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=strides,
padding=self.padding.upper(),
rate=self.dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.DepthwiseConv2D')
class DepthwiseConv2D(Conv2D):
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0],
self.kernel_size[1],
input_dim,
self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = backend.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.use_bias:
outputs = backend.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding,
self.strides[0],
self.dilation_rate[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding,
self.strides[1],
self.dilation_rate[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
@keras_export('keras.layers.UpSampling1D')
class UpSampling1D(Layer):
def __init__(self, size=2, **kwargs):
super(UpSampling1D, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
output = backend.repeat_elements(inputs, self.size, axis=1)
return output
def get_config(self):
config = {'size': self.size}
base_config = super(UpSampling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling2D')
class UpSampling2D(Layer):
def __init__(self,
size=(2, 2),
data_format=None,
interpolation='nearest',
**kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
if interpolation not in {'nearest', 'bilinear'}:
raise ValueError('`interpolation` argument should be one of `"nearest"` '
'or `"bilinear"`.')
self.interpolation = interpolation
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return backend.resize_images(
inputs, self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation)
def get_config(self):
config = {
'size': self.size,
'data_format': self.data_format,
'interpolation': self.interpolation
}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling3D')
class UpSampling3D(Layer):
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 3, 'size')
self.input_spec = InputSpec(ndim=5)
super(UpSampling3D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
dim1 = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
dim2 = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
dim3 = self.size[2] * input_shape[
4] if input_shape[4] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
else:
dim1 = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
dim2 = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
dim3 = self.size[2] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.resize_volumes(
inputs, self.size[0], self.size[1], self.size[2], self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding1D')
class ZeroPadding1D(Layer):
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return backend.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding2D')
class ZeroPadding2D(Layer):
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super(ZeroPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return backend.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding3D')
class ZeroPadding3D(Layer):
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super(ZeroPadding3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding), (padding,
padding))
elif hasattr(padding, '__len__'):
if len(padding) != 3:
raise ValueError('`padding` should have 3 elements. '
'Found: ' + str(padding))
dim1_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
dim2_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
dim3_padding = conv_utils.normalize_tuple(padding[2], 2,
'3rd entry of padding')
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
'`padding` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_pad, right_dim1_pad),'
' (left_dim2_pad, right_dim2_pad),'
' (left_dim3_pad, right_dim2_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] + 2 * self.padding[0][0]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + 2 * self.padding[1][0]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + 2 * self.padding[2][0]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] + 2 * self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + 2 * self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + 2 * self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping1D')
class Cropping1D(Layer):
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping2D')
class Cropping2D(Layer):
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
def call(self, inputs):
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :]
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping3D')
class Cropping3D(Layer):
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution1D = SeparableConv1D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Convolution3DTranspose = Conv3DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
| true
| true
|
f7148d6111bc6f731ff2f47e5c14db68aaca4f46
| 590
|
py
|
Python
|
src/aoc/__init__.py
|
CreatingNull/AoC-2021
|
ee0aec7aa2f4d7cb2d62838d39f8c74edae8cc96
|
[
"MIT"
] | null | null | null |
src/aoc/__init__.py
|
CreatingNull/AoC-2021
|
ee0aec7aa2f4d7cb2d62838d39f8c74edae8cc96
|
[
"MIT"
] | null | null | null |
src/aoc/__init__.py
|
CreatingNull/AoC-2021
|
ee0aec7aa2f4d7cb2d62838d39f8c74edae8cc96
|
[
"MIT"
] | null | null | null |
"""Root package for the challenge.
Contains generic functionality not specific to days.
"""
import logging
import sys
from functools import partial
from pathlib import Path
ROOT_PATH = Path(Path(__file__).parents[1])
open_utf8 = partial(open, encoding="UTF-8") # Open with explict encoding
# Configuring the global logger
log = logging.getLogger()
log.setLevel(logging.INFO)
__handler = logging.StreamHandler(sys.stdout)
__handler.setLevel(logging.DEBUG)
__handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
log.addHandler(__handler)
| 25.652174
| 77
| 0.766102
|
import logging
import sys
from functools import partial
from pathlib import Path
ROOT_PATH = Path(Path(__file__).parents[1])
open_utf8 = partial(open, encoding="UTF-8")
log = logging.getLogger()
log.setLevel(logging.INFO)
__handler = logging.StreamHandler(sys.stdout)
__handler.setLevel(logging.DEBUG)
__handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
log.addHandler(__handler)
| true
| true
|
f7148e83286d37d30f28041686db57b3c928f8c8
| 5,117
|
py
|
Python
|
command/download.py
|
aungthuphyo21/aungthuphyo22
|
462c515e7dc987aa877dd8e38ccd1e3e6abeab3d
|
[
"MIT"
] | null | null | null |
command/download.py
|
aungthuphyo21/aungthuphyo22
|
462c515e7dc987aa877dd8e38ccd1e3e6abeab3d
|
[
"MIT"
] | null | null | null |
command/download.py
|
aungthuphyo21/aungthuphyo22
|
462c515e7dc987aa877dd8e38ccd1e3e6abeab3d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import argparse
import subprocess
from bddown_core import Pan
from util import convert_none, parse_url, add_http, logger
from config import global_config
def download_command(filename, savedir, link, cookies, limit=None, output_dir=None):
reload(sys)
sys.setdefaultencoding("utf-8")
bool(output_dir) and not os.path.exists(output_dir) and os.makedirs(output_dir)
print("\033[32m" + filename + "\033[0m")
pan_ua = 'netdisk;5.2.6;PC;PC-Windows;6.2.9200;WindowsBaiduYunGuanJia'
cmd = 'aria2c -c -d "{savedir}" -o "{filename}" -s10 -x10' \
' --user-agent="{useragent}" --header "Referer:http://pan.baidu.com/disk/home"' \
' {cookies} {limit} {dir}' \
' "{link}"'.format(savedir=savedir, filename=filename, useragent=pan_ua, link=link,
cookies=convert_none("--header \"Cookies: ", cookies),
limit=convert_none('--max-download-limit=', limit),
dir=convert_none('--dir=', output_dir))
print(cmd)
subprocess.call(cmd, shell=True)
def select_download(fis):
if len(fis) <= 1:
return fis
print("File list:")
counter = 1
for fi in fis:
savedir = fi.path.replace(fi.parent_path, '', 1)[1:]
print(str(counter) + ')', savedir + "/" + unicode(fi.filename).encode('utf8'))
counter += 1
input_numbers = raw_input("Please select files to download(e.g., 1,3-5,7):\n")
selected_numbers = []
for part in input_numbers.split(','):
x = part.split('-')
if len(x) == 1:
selected_numbers += [int(x[0])]
elif len(x) == 2:
selected_numbers += range(int(x[0]), int(x[1])+1)
else:
print("Error, your input seems illegal." + str(len(x)))
return None
# ensure no duplicate numbers
selected_numbers = list(set(selected_numbers))
selected_fis = [fis[i-1] for i in selected_numbers]
print("Download list:")
counter = 1
for sfi in selected_fis:
savedir = sfi.path.replace(sfi.parent_path, '', 1)[1:]
print(str(counter) + ')', savedir + "/" + unicode(sfi.filename).encode('utf8'))
counter += 1
return selected_fis
def download(args):
limit = global_config.limit
output_dir = global_config.dir
parser = argparse.ArgumentParser(description="download command arg parser")
parser.add_argument('-L', '--limit', action="store", dest='limit', help="Max download speed limit.")
parser.add_argument('-D', '--dir', action="store", dest='output_dir', help="Download task to dir.")
parser.add_argument('-S', '--secret', action="store", dest='secret', help="Retrieval password.", default="")
parser.add_argument('-P', '--partial', action="count", help="Partial download.")
if not args:
parser.print_help()
exit(1)
namespace, links = parser.parse_known_args(args)
secret = namespace.secret
if namespace.limit:
limit = namespace.limit
if namespace.output_dir:
output_dir = namespace.output_dir
# if is wap
links = [link.replace("wap/link", "share/link") for link in links]
# add 'http://'
links = map(add_http, links)
for url in links:
res = parse_url(url)
# normal
if res.get('type') == 1:
pan = Pan()
fis = pan.get_file_infos(url, secret)
if namespace.partial:
while True:
fis = select_download(fis)
if fis is not None:
break
for fi in fis:
cookies = 'BDUSS={0}'.format(pan.bduss) if pan.bduss else ''
if cookies and pan.pcsett:
cookies += ';pcsett={0}'.format(pan.pcsett)
if cookies:
cookies += '"'
savedir = fi.path.replace(fi.parent_path, '', 1)[1:]
download_command(fi.filename, savedir, fi.dlink, cookies=cookies, limit=limit, output_dir=output_dir)
elif res.get('type') == 4:
pan = Pan()
fsid = res.get('fsid')
newUrl = res.get('url')
infos = pan.get_file_infos(newUrl, secret, fsid)
cookies = 'BDUSS={0}'.format(pan.bduss) if pan.bduss else ''
if cookies and pan.pcsett:
cookies += ';pcsett={0}'.format(pan.pcsett)
if cookies:
cookies += '"'
for info in infos:
download_command(info.filename, info.dlink, cookies=cookies, limit=limit, output_dir=output_dir)
# album
elif res.get('type') == 2:
raise NotImplementedError('This function has not implemented.')
# home
elif res.get('type') == 3:
raise NotImplementedError('This function has not implemented.')
elif res.get('type') == 0:
logger.debug(url, extra={"type": "wrong link", "method": "None"})
continue
else:
continue
sys.exit(0)
| 37.07971
| 117
| 0.577682
|
from __future__ import print_function
import os
import sys
import argparse
import subprocess
from bddown_core import Pan
from util import convert_none, parse_url, add_http, logger
from config import global_config
def download_command(filename, savedir, link, cookies, limit=None, output_dir=None):
reload(sys)
sys.setdefaultencoding("utf-8")
bool(output_dir) and not os.path.exists(output_dir) and os.makedirs(output_dir)
print("\033[32m" + filename + "\033[0m")
pan_ua = 'netdisk;5.2.6;PC;PC-Windows;6.2.9200;WindowsBaiduYunGuanJia'
cmd = 'aria2c -c -d "{savedir}" -o "{filename}" -s10 -x10' \
' --user-agent="{useragent}" --header "Referer:http://pan.baidu.com/disk/home"' \
' {cookies} {limit} {dir}' \
' "{link}"'.format(savedir=savedir, filename=filename, useragent=pan_ua, link=link,
cookies=convert_none("--header \"Cookies: ", cookies),
limit=convert_none('--max-download-limit=', limit),
dir=convert_none('--dir=', output_dir))
print(cmd)
subprocess.call(cmd, shell=True)
def select_download(fis):
if len(fis) <= 1:
return fis
print("File list:")
counter = 1
for fi in fis:
savedir = fi.path.replace(fi.parent_path, '', 1)[1:]
print(str(counter) + ')', savedir + "/" + unicode(fi.filename).encode('utf8'))
counter += 1
input_numbers = raw_input("Please select files to download(e.g., 1,3-5,7):\n")
selected_numbers = []
for part in input_numbers.split(','):
x = part.split('-')
if len(x) == 1:
selected_numbers += [int(x[0])]
elif len(x) == 2:
selected_numbers += range(int(x[0]), int(x[1])+1)
else:
print("Error, your input seems illegal." + str(len(x)))
return None
# ensure no duplicate numbers
selected_numbers = list(set(selected_numbers))
selected_fis = [fis[i-1] for i in selected_numbers]
print("Download list:")
counter = 1
for sfi in selected_fis:
savedir = sfi.path.replace(sfi.parent_path, '', 1)[1:]
print(str(counter) + ')', savedir + "/" + unicode(sfi.filename).encode('utf8'))
counter += 1
return selected_fis
def download(args):
limit = global_config.limit
output_dir = global_config.dir
parser = argparse.ArgumentParser(description="download command arg parser")
parser.add_argument('-L', '--limit', action="store", dest='limit', help="Max download speed limit.")
parser.add_argument('-D', '--dir', action="store", dest='output_dir', help="Download task to dir.")
parser.add_argument('-S', '--secret', action="store", dest='secret', help="Retrieval password.", default="")
parser.add_argument('-P', '--partial', action="count", help="Partial download.")
if not args:
parser.print_help()
exit(1)
namespace, links = parser.parse_known_args(args)
secret = namespace.secret
if namespace.limit:
limit = namespace.limit
if namespace.output_dir:
output_dir = namespace.output_dir
# if is wap
links = [link.replace("wap/link", "share/link") for link in links]
# add 'http://'
links = map(add_http, links)
for url in links:
res = parse_url(url)
# normal
if res.get('type') == 1:
pan = Pan()
fis = pan.get_file_infos(url, secret)
if namespace.partial:
while True:
fis = select_download(fis)
if fis is not None:
break
for fi in fis:
cookies = 'BDUSS={0}'.format(pan.bduss) if pan.bduss else ''
if cookies and pan.pcsett:
cookies += ';pcsett={0}'.format(pan.pcsett)
if cookies:
cookies += '"'
savedir = fi.path.replace(fi.parent_path, '', 1)[1:]
download_command(fi.filename, savedir, fi.dlink, cookies=cookies, limit=limit, output_dir=output_dir)
elif res.get('type') == 4:
pan = Pan()
fsid = res.get('fsid')
newUrl = res.get('url')
infos = pan.get_file_infos(newUrl, secret, fsid)
cookies = 'BDUSS={0}'.format(pan.bduss) if pan.bduss else ''
if cookies and pan.pcsett:
cookies += ';pcsett={0}'.format(pan.pcsett)
if cookies:
cookies += '"'
for info in infos:
download_command(info.filename, info.dlink, cookies=cookies, limit=limit, output_dir=output_dir)
# album
elif res.get('type') == 2:
raise NotImplementedError('This function has not implemented.')
# home
elif res.get('type') == 3:
raise NotImplementedError('This function has not implemented.')
elif res.get('type') == 0:
logger.debug(url, extra={"type": "wrong link", "method": "None"})
continue
else:
continue
sys.exit(0)
| true
| true
|
f7148f1f8278d6d236a00eec569bd331d09b34c0
| 1,299
|
py
|
Python
|
main.py
|
TakeItIsi/Snakehogs-
|
6c92184a564738a1af8bb7e0b9a3bc74689fde49
|
[
"MIT"
] | null | null | null |
main.py
|
TakeItIsi/Snakehogs-
|
6c92184a564738a1af8bb7e0b9a3bc74689fde49
|
[
"MIT"
] | null | null | null |
main.py
|
TakeItIsi/Snakehogs-
|
6c92184a564738a1af8bb7e0b9a3bc74689fde49
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame
import os
from Controlador.Basic_Controller import Controlador
from Controlador.Menu_Controller import Menu
from Controlador.Versus_Controller import Versus_Controlador
__author__ = "Isidora Ulloa"
__license__ = "GPL"
__version__ = "1.0.0"
__email__ = "isidora.ulloa@ug.uchile.cl"
pygame.init()
titulo = Menu("Recursos/menu sprite full.png","", True)
while True:
while titulo.mainloop:
titulo.run()
if titulo.versus.on:
instruccion = Menu("Recursos/instrucciones 12.png","", False)
program = Versus_Controlador(titulo.walls.on)
else:
instruccion = Menu("Recursos/instrucciones 11.png", "", False)
program = Controlador(titulo.walls.on)
while instruccion.mainloop:
instruccion.run()
while program.run==True:
program.update()
pygame.time.wait(program.refresh)
if titulo.versus.on:
fin = Menu(program.end,"Cuy: " + str(program.puntaje2.counter) + " Erizo: " + str(program.puntaje1.counter), False)
else:
fin= Menu(program.end, " Puntos: " + str(program.puntaje.counter), False)
while fin.running==True:
fin.run()
titulo.mainloop=True
| 34.184211
| 151
| 0.638953
|
import pygame
import os
from Controlador.Basic_Controller import Controlador
from Controlador.Menu_Controller import Menu
from Controlador.Versus_Controller import Versus_Controlador
__author__ = "Isidora Ulloa"
__license__ = "GPL"
__version__ = "1.0.0"
__email__ = "isidora.ulloa@ug.uchile.cl"
pygame.init()
titulo = Menu("Recursos/menu sprite full.png","", True)
while True:
while titulo.mainloop:
titulo.run()
if titulo.versus.on:
instruccion = Menu("Recursos/instrucciones 12.png","", False)
program = Versus_Controlador(titulo.walls.on)
else:
instruccion = Menu("Recursos/instrucciones 11.png", "", False)
program = Controlador(titulo.walls.on)
while instruccion.mainloop:
instruccion.run()
while program.run==True:
program.update()
pygame.time.wait(program.refresh)
if titulo.versus.on:
fin = Menu(program.end,"Cuy: " + str(program.puntaje2.counter) + " Erizo: " + str(program.puntaje1.counter), False)
else:
fin= Menu(program.end, " Puntos: " + str(program.puntaje.counter), False)
while fin.running==True:
fin.run()
titulo.mainloop=True
| true
| true
|
f7148f4a83cab36a038bcd04077b0e24ca251006
| 1,137
|
py
|
Python
|
migrations/versions/8b25b23d386f_chquery_the_comments_migration.py
|
macc254/Personal-Blog
|
3af6083f78e65636a9cf6caa1a3598b13b3b0134
|
[
"Unlicense"
] | null | null | null |
migrations/versions/8b25b23d386f_chquery_the_comments_migration.py
|
macc254/Personal-Blog
|
3af6083f78e65636a9cf6caa1a3598b13b3b0134
|
[
"Unlicense"
] | null | null | null |
migrations/versions/8b25b23d386f_chquery_the_comments_migration.py
|
macc254/Personal-Blog
|
3af6083f78e65636a9cf6caa1a3598b13b3b0134
|
[
"Unlicense"
] | null | null | null |
"""ChQuery the comments Migration
Revision ID: 8b25b23d386f
Revises: a21b8da19c7d
Create Date: 2022-03-12 09:58:47.379065
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8b25b23d386f'
down_revision = 'a21b8da19c7d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('comments', sa.Column('blogs_id', sa.Integer(), nullable=False))
op.drop_constraint('comments_pitch_id_fkey', 'comments', type_='foreignkey')
op.create_foreign_key(None, 'comments', 'blogs', ['blogs_id'], ['id'])
op.drop_column('comments', 'pitch_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('comments', sa.Column('pitch_id', sa.INTEGER(), autoincrement=False, nullable=False))
op.drop_constraint(None, 'comments', type_='foreignkey')
op.create_foreign_key('comments_pitch_id_fkey', 'comments', 'blogs', ['pitch_id'], ['id'])
op.drop_column('comments', 'blogs_id')
# ### end Alembic commands ###
| 32.485714
| 103
| 0.700967
|
from alembic import op
import sqlalchemy as sa
revision = '8b25b23d386f'
down_revision = 'a21b8da19c7d'
branch_labels = None
depends_on = None
def upgrade():
oreign_key(None, 'comments', 'blogs', ['blogs_id'], ['id'])
op.drop_column('comments', 'pitch_id')
tch_id'], ['id'])
op.drop_column('comments', 'blogs_id')
| true
| true
|
f714902580ef72ea76cb338fc2ad31571450e93b
| 2,584
|
py
|
Python
|
openGaussBase/testcase/SYSTEM_CATALOGS&SYSTEM_VIEWS/SYSTEM_VIEW/Opengauss_Function_System_View_Case0044.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SYSTEM_CATALOGS&SYSTEM_VIEWS/SYSTEM_VIEW/Opengauss_Function_System_View_Case0044.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SYSTEM_CATALOGS&SYSTEM_VIEWS/SYSTEM_VIEW/Opengauss_Function_System_View_Case0044.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 系统视图
Case Name : 测试系统视图PG_STAT_DATABASE字段与数据类型
Description :
1.查看系统视图PG_STAT_DATABASE的结构
2.该视图字段与对应字段数据类型是否正确
Expect :
1.查看系统视图PG_STAT_DATABASE的结构成功
2.该视图字段与字段数据类型对应正确
History :
"""
import unittest
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
LOG = Logger()
class SystemView(unittest.TestCase):
def setUp(self):
LOG.info('----------------this is setup-----------------------')
LOG.info(
'------Opengauss_Function_System_View_Case0044开始执行----------')
self.com = Common()
self.comsh = CommonSH('dbuser')
self.expect_result_dict = {
'Column': ['datid', 'datname', 'numbackends', 'xact_commit',
'xact_rollback', 'blks_read', 'blks_hit',
'tup_returned', 'tup_fetched', 'tup_inserted',
'tup_updated', 'tup_deleted', 'conflicts',
'temp_files', 'temp_bytes', 'deadlocks',
'blk_read_time', 'blk_write_time', 'stats_reset'],
'Type': ['oid', 'name', 'integer', 'bigint', 'bigint', 'bigint',
'bigint', 'bigint', 'bigint', 'bigint',
'bigint', 'bigint', 'bigint', 'bigint', 'bigint',
'bigint', 'double precision', 'double precision',
'timestamp with time zone']}
def test_index_file_damaged(self):
LOG.info(
'--------------------查看表结构--------------------------')
msg = self.comsh.execut_db_sql('\d PG_STAT_DATABASE')
LOG.info(msg)
result_dict = self.com.format_sql_result(msg)
LOG.info(result_dict)
del result_dict['Modifiers']
self.assertDictEqual(self.expect_result_dict, result_dict)
def tearDown(self):
LOG.info('----------------this is tearDown-----------------------')
# 无须清理环境
LOG.info(
'---Opengauss_Function_System_View_Case0044执行完成------------')
| 36.394366
| 84
| 0.589783
|
import unittest
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
LOG = Logger()
class SystemView(unittest.TestCase):
def setUp(self):
LOG.info('----------------this is setup-----------------------')
LOG.info(
'------Opengauss_Function_System_View_Case0044开始执行----------')
self.com = Common()
self.comsh = CommonSH('dbuser')
self.expect_result_dict = {
'Column': ['datid', 'datname', 'numbackends', 'xact_commit',
'xact_rollback', 'blks_read', 'blks_hit',
'tup_returned', 'tup_fetched', 'tup_inserted',
'tup_updated', 'tup_deleted', 'conflicts',
'temp_files', 'temp_bytes', 'deadlocks',
'blk_read_time', 'blk_write_time', 'stats_reset'],
'Type': ['oid', 'name', 'integer', 'bigint', 'bigint', 'bigint',
'bigint', 'bigint', 'bigint', 'bigint',
'bigint', 'bigint', 'bigint', 'bigint', 'bigint',
'bigint', 'double precision', 'double precision',
'timestamp with time zone']}
def test_index_file_damaged(self):
LOG.info(
'--------------------查看表结构--------------------------')
msg = self.comsh.execut_db_sql('\d PG_STAT_DATABASE')
LOG.info(msg)
result_dict = self.com.format_sql_result(msg)
LOG.info(result_dict)
del result_dict['Modifiers']
self.assertDictEqual(self.expect_result_dict, result_dict)
def tearDown(self):
LOG.info('----------------this is tearDown-----------------------')
LOG.info(
'---Opengauss_Function_System_View_Case0044执行完成------------')
| true
| true
|
f7149050818fa7e7d8741a281c8eedd9cd01f51f
| 185
|
py
|
Python
|
tests/resource/test_module.py
|
asyncee/pycamunda
|
f4834d224ff99fcf80874efeaedf68a8a2efa926
|
[
"MIT"
] | null | null | null |
tests/resource/test_module.py
|
asyncee/pycamunda
|
f4834d224ff99fcf80874efeaedf68a8a2efa926
|
[
"MIT"
] | null | null | null |
tests/resource/test_module.py
|
asyncee/pycamunda
|
f4834d224ff99fcf80874efeaedf68a8a2efa926
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
def test_all_contains_only_valid_names():
import pycamunda.resource
for name in pycamunda.resource.__all__:
getattr(pycamunda.resource, name)
| 20.555556
| 43
| 0.708108
|
def test_all_contains_only_valid_names():
import pycamunda.resource
for name in pycamunda.resource.__all__:
getattr(pycamunda.resource, name)
| true
| true
|
f71491334e653edf6dda365969aee2cd5e3794d8
| 531
|
py
|
Python
|
ex14.py
|
YunMeMeThaw/python_exercises
|
151d5d3695d578059611ac09c94b3677442197d7
|
[
"MIT"
] | null | null | null |
ex14.py
|
YunMeMeThaw/python_exercises
|
151d5d3695d578059611ac09c94b3677442197d7
|
[
"MIT"
] | null | null | null |
ex14.py
|
YunMeMeThaw/python_exercises
|
151d5d3695d578059611ac09c94b3677442197d7
|
[
"MIT"
] | null | null | null |
from sys import argv
script, user_name = argv
prompt = '> '
print("Hi %s, I'm the %s script." % (user_name, script))
print("I'd like to ask you a few questions.")
print("Do you like me %s?" % user_name)
likes = input(prompt)
print("Where do you live %s?" % user_name)
lives = input(prompt)
print("What kind of computer do you hava?")
computer = input(prompt)
print("""
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And you have a %r computer. Nice.
""" %(likes, lives, computer))
| 31.235294
| 56
| 0.6629
|
from sys import argv
script, user_name = argv
prompt = '> '
print("Hi %s, I'm the %s script." % (user_name, script))
print("I'd like to ask you a few questions.")
print("Do you like me %s?" % user_name)
likes = input(prompt)
print("Where do you live %s?" % user_name)
lives = input(prompt)
print("What kind of computer do you hava?")
computer = input(prompt)
print("""
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And you have a %r computer. Nice.
""" %(likes, lives, computer))
| true
| true
|
f714924a2eb8a90be404ca381f44409a550c4985
| 8,049
|
py
|
Python
|
python/mxnet/base.py
|
ChidanandKumarKS/mxnet
|
1ed8b19849046bce92fd3d4a390b2adc405b584a
|
[
"Apache-2.0"
] | 1
|
2018-09-08T05:58:17.000Z
|
2018-09-08T05:58:17.000Z
|
python/mxnet/base.py
|
ChidanandKumarKS/mxnet
|
1ed8b19849046bce92fd3d4a390b2adc405b584a
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/base.py
|
ChidanandKumarKS/mxnet
|
1ed8b19849046bce92fd3d4a390b2adc405b584a
|
[
"Apache-2.0"
] | 1
|
2018-09-04T10:46:25.000Z
|
2018-09-04T10:46:25.000Z
|
# coding: utf-8
# pylint: disable=invalid-name, no-member
"""ctypes library of mxnet and helper functions."""
from __future__ import absolute_import
import sys
import ctypes
import atexit
import warnings
import inspect
import numpy as np
from . import libinfo
warnings.filterwarnings('default', category=DeprecationWarning)
__all__ = ['MXNetError']
#----------------------------
# library loading
#----------------------------
if sys.version_info[0] == 3:
string_types = str,
numeric_types = (float, int, np.float32, np.int32)
integer_types = int
# this function is needed for python3
# to convert ctypes.char_p .value back to python str
py_str = lambda x: x.decode('utf-8')
else:
string_types = basestring,
numeric_types = (float, int, long, np.float32, np.int32)
integer_types = (int, long)
py_str = lambda x: x
class _NullType(object):
"""Placeholder for arguments"""
def __repr__(self):
return '_Null'
_Null = _NullType()
class MXNetError(Exception):
"""Error that will be throwed by all mxnet functions."""
pass
class NotImplementedForSymbol(MXNetError):
def __init__(self, function, alias, *args):
super(NotImplementedForSymbol, self).__init__()
self.function = function.__name__
self.alias = alias
self.args = [str(type(a)) for a in args]
def __str__(self):
msg = 'Function {}'.format(self.function)
if self.alias:
msg += ' (namely operator "{}")'.format(self.alias)
if self.args:
msg += ' with arguments ({})'.format(', '.join(self.args))
msg += ' is not implemented for Symbol and only available in NDArray.'
return msg
def _load_lib():
"""Load library by searching possible path."""
lib_path = libinfo.find_lib_path()
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)
# DMatrix functions
lib.MXGetLastError.restype = ctypes.c_char_p
return lib
# version number
__version__ = libinfo.__version__
# library instance of mxnet
_LIB = _load_lib()
# type definitions
mx_uint = ctypes.c_uint
mx_float = ctypes.c_float
mx_float_p = ctypes.POINTER(mx_float)
mx_real_t = np.float32
NDArrayHandle = ctypes.c_void_p
FunctionHandle = ctypes.c_void_p
OpHandle = ctypes.c_void_p
CachedOpHandle = ctypes.c_void_p
SymbolHandle = ctypes.c_void_p
ExecutorHandle = ctypes.c_void_p
DataIterCreatorHandle = ctypes.c_void_p
DataIterHandle = ctypes.c_void_p
KVStoreHandle = ctypes.c_void_p
RecordIOHandle = ctypes.c_void_p
RtcHandle = ctypes.c_void_p
#----------------------------
# helper function definition
#----------------------------
def check_call(ret):
"""Check the return value of C API call.
This function will raise an exception when an error occurs.
Wrap every API call with this function.
Parameters
----------
ret : int
return value from API calls.
"""
if ret != 0:
raise MXNetError(py_str(_LIB.MXGetLastError()))
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Python string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = mx.base.c_str("Hello, World")
>>> print x.value
Hello, World
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Python string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = mx.base.c_str("Hello, World")
>>> print x.value
Hello, World
"""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Create ctypes array from a Python array.
Parameters
----------
ctype : ctypes data type
Data type of the array we want to convert to, such as mx_float.
values : tuple or list
Data content.
Returns
-------
out : ctypes array
Created ctypes array.
Examples
--------
>>> x = mx.base.c_array(mx.base.mx_float, [1, 2, 3])
>>> print len(x)
3
>>> x[1]
2.0
"""
return (ctype * len(values))(*values)
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type.
Parameters
----------
cptr : ctypes.POINTER(ctypes.c_char)
Pointer to the raw memory region.
length : int
The length of the buffer.
Returns
-------
buffer : bytearray
The raw byte memory buffer.
"""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise TypeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
def ctypes2numpy_shared(cptr, shape):
"""Convert a ctypes pointer to a numpy array.
The resulting NumPy array shares the memory with the pointer.
Parameters
----------
cptr : ctypes.POINTER(mx_float)
pointer to the memory region
shape : tuple
Shape of target `NDArray`.
Returns
-------
out : numpy_array
A numpy array : numpy array.
"""
if not isinstance(cptr, ctypes.POINTER(mx_float)):
raise RuntimeError('expected float pointer')
size = 1
for s in shape:
size *= s
dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))
return np.frombuffer(dbuffer, dtype=np.float32).reshape(shape)
def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):
"""Build argument docs in python style.
arg_names : list of str
Argument names.
arg_types : list of str
Argument type information.
arg_descs : list of str
Argument description information.
remove_dup : boolean, optional
Whether remove duplication or not.
Returns
-------
docstr : str
Python docstring of parameter sections.
"""
param_keys = set()
param_str = []
for key, type_info, desc in zip(arg_names, arg_types, arg_descs):
if key in param_keys and remove_dup:
continue
if key == 'num_args':
continue
param_keys.add(key)
ret = '%s : %s' % (key, type_info)
if len(desc) != 0:
ret += '\n ' + desc
param_str.append(ret)
doc_str = ('Parameters\n' +
'----------\n' +
'%s\n')
doc_str = doc_str % ('\n'.join(param_str))
return doc_str
def _notify_shutdown():
"""Notify MXNet about a shutdown."""
check_call(_LIB.MXNotifyShutdown())
atexit.register(_notify_shutdown)
def add_fileline_to_docstring(module, incursive=True):
"""Append the definition position to each function contained in module.
Examples
--------
# Put the following codes at the end of a file
add_fileline_to_docstring(__name__)
"""
def _add_fileline(obj):
"""Add fileinto to a object.
"""
if obj.__doc__ is None or 'From:' in obj.__doc__:
return
fname = inspect.getsourcefile(obj)
if fname is None:
return
try:
line = inspect.getsourcelines(obj)[-1]
except IOError:
return
obj.__doc__ += '\n\nFrom:%s:%d' % (fname, line)
if isinstance(module, str):
module = sys.modules[module]
for _, obj in inspect.getmembers(module):
if inspect.isbuiltin(obj):
continue
if inspect.isfunction(obj):
_add_fileline(obj)
if inspect.ismethod(obj):
_add_fileline(obj.__func__)
if inspect.isclass(obj) and incursive:
add_fileline_to_docstring(obj, False)
| 26.564356
| 78
| 0.60815
|
from __future__ import absolute_import
import sys
import ctypes
import atexit
import warnings
import inspect
import numpy as np
from . import libinfo
warnings.filterwarnings('default', category=DeprecationWarning)
__all__ = ['MXNetError']
if sys.version_info[0] == 3:
string_types = str,
numeric_types = (float, int, np.float32, np.int32)
integer_types = int
py_str = lambda x: x.decode('utf-8')
else:
string_types = basestring,
numeric_types = (float, int, long, np.float32, np.int32)
integer_types = (int, long)
py_str = lambda x: x
class _NullType(object):
def __repr__(self):
return '_Null'
_Null = _NullType()
class MXNetError(Exception):
pass
class NotImplementedForSymbol(MXNetError):
def __init__(self, function, alias, *args):
super(NotImplementedForSymbol, self).__init__()
self.function = function.__name__
self.alias = alias
self.args = [str(type(a)) for a in args]
def __str__(self):
msg = 'Function {}'.format(self.function)
if self.alias:
msg += ' (namely operator "{}")'.format(self.alias)
if self.args:
msg += ' with arguments ({})'.format(', '.join(self.args))
msg += ' is not implemented for Symbol and only available in NDArray.'
return msg
def _load_lib():
lib_path = libinfo.find_lib_path()
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)
lib.MXGetLastError.restype = ctypes.c_char_p
return lib
__version__ = libinfo.__version__
_LIB = _load_lib()
mx_uint = ctypes.c_uint
mx_float = ctypes.c_float
mx_float_p = ctypes.POINTER(mx_float)
mx_real_t = np.float32
NDArrayHandle = ctypes.c_void_p
FunctionHandle = ctypes.c_void_p
OpHandle = ctypes.c_void_p
CachedOpHandle = ctypes.c_void_p
SymbolHandle = ctypes.c_void_p
ExecutorHandle = ctypes.c_void_p
DataIterCreatorHandle = ctypes.c_void_p
DataIterHandle = ctypes.c_void_p
KVStoreHandle = ctypes.c_void_p
RecordIOHandle = ctypes.c_void_p
RtcHandle = ctypes.c_void_p
def check_call(ret):
if ret != 0:
raise MXNetError(py_str(_LIB.MXGetLastError()))
if sys.version_info[0] < 3:
def c_str(string):
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Python string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = mx.base.c_str("Hello, World")
>>> print x.value
Hello, World
"""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
return (ctype * len(values))(*values)
def ctypes2buffer(cptr, length):
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise TypeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
def ctypes2numpy_shared(cptr, shape):
if not isinstance(cptr, ctypes.POINTER(mx_float)):
raise RuntimeError('expected float pointer')
size = 1
for s in shape:
size *= s
dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))
return np.frombuffer(dbuffer, dtype=np.float32).reshape(shape)
def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):
param_keys = set()
param_str = []
for key, type_info, desc in zip(arg_names, arg_types, arg_descs):
if key in param_keys and remove_dup:
continue
if key == 'num_args':
continue
param_keys.add(key)
ret = '%s : %s' % (key, type_info)
if len(desc) != 0:
ret += '\n ' + desc
param_str.append(ret)
doc_str = ('Parameters\n' +
'----------\n' +
'%s\n')
doc_str = doc_str % ('\n'.join(param_str))
return doc_str
def _notify_shutdown():
check_call(_LIB.MXNotifyShutdown())
atexit.register(_notify_shutdown)
def add_fileline_to_docstring(module, incursive=True):
def _add_fileline(obj):
if obj.__doc__ is None or 'From:' in obj.__doc__:
return
fname = inspect.getsourcefile(obj)
if fname is None:
return
try:
line = inspect.getsourcelines(obj)[-1]
except IOError:
return
obj.__doc__ += '\n\nFrom:%s:%d' % (fname, line)
if isinstance(module, str):
module = sys.modules[module]
for _, obj in inspect.getmembers(module):
if inspect.isbuiltin(obj):
continue
if inspect.isfunction(obj):
_add_fileline(obj)
if inspect.ismethod(obj):
_add_fileline(obj.__func__)
if inspect.isclass(obj) and incursive:
add_fileline_to_docstring(obj, False)
| true
| true
|
f7149262bdb3bfb5bea3bb847f496514f2be6606
| 4,880
|
py
|
Python
|
scripts/automation.py
|
Alexey19/Python-UIAutomation-for-Windows
|
43d33bed99da66c31bc8471694422352291ae1fb
|
[
"Apache-2.0"
] | null | null | null |
scripts/automation.py
|
Alexey19/Python-UIAutomation-for-Windows
|
43d33bed99da66c31bc8471694422352291ae1fb
|
[
"Apache-2.0"
] | null | null | null |
scripts/automation.py
|
Alexey19/Python-UIAutomation-for-Windows
|
43d33bed99da66c31bc8471694422352291ae1fb
|
[
"Apache-2.0"
] | null | null | null |
#!python3
# -*- coding:utf-8 -*-
import sys
import time
from uiautomation import (Win32API, Logger, ControlFromCursor, GetRootControl, GetFocusedControl,
LogControl, EnumAndLogControlAncestors, EnumAndLogControl, ConsoleColor)
from uiautomation import VERSION
def usage():
Logger.ColorfulWrite("""usage
<Color=Cyan>-h</Color> show command <Color=Cyan>help</Color>
<Color=Cyan>-t</Color> delay <Color=Cyan>time</Color>, default 3 seconds, begin to enumerate after Value seconds, this must be an integer
you can delay a few seconds and make a window active so automation can enumerate the active window
<Color=Cyan>-d</Color> enumerate tree <Color=Cyan>depth</Color>, this must be an integer, if it is null, enumerate the whole tree
<Color=Cyan>-r</Color> enumerate from <Color=Cyan>root</Color>:Desktop window, if it is null, enumerate from foreground window
<Color=Cyan>-f</Color> enumerate from <Color=Cyan>focused</Color> control, if it is null, enumerate from foreground window
<Color=Cyan>-c</Color> enumerate the control under <Color=Cyan>cursor</Color>, if depth is < 0, enumerate from its ancestor up to depth
<Color=Cyan>-a</Color> show <Color=Cyan>ancestors</Color> of the control under cursor
<Color=Cyan>-n</Color> show control full <Color=Cyan>name</Color>
<Color=Cyan>-m</Color> show <Color=Cyan>more</Color> properties
if <Color=Red>UnicodeError</Color> or <Color=Red>LookupError</Color> occurred when printing,
try to change the active code page of console window by using <Color=Cyan>chcp</Color> or see the log file <Color=Cyan>@AutomationLog.txt</Color>
chcp, get current active code page
chcp 936, set active code page to gbk
chcp 65001, set active code page to utf-8
examples:
automation.py -t3
automation.py -t3 -r -d1 -m -n
automation.py -c -t3
""", writeToFile = False)
def main():
# if not IsPy3 and sys.getdefaultencoding() == 'ascii':
# reload(sys)
# sys.setdefaultencoding('utf-8')
import getopt
Logger.Write('UIAutomation {} (Python {}.{}.{}, {} bit)\n'.format(VERSION, sys.version_info.major, sys.version_info.minor, sys.version_info.micro, 64 if sys.maxsize > 0xFFFFFFFF else 32))
options, args = getopt.getopt(sys.argv[1:], 'hrfcamnd:t:',
['help', 'root', 'focus', 'cursor', 'ancestor', 'showMore', 'showAllName', 'depth=',
'time='])
root = False
focus = False
cursor = False
ancestor = False
foreground = True
showAllName = False
showMore = False
depth = 0xFFFFFFFF
seconds = 3
for (o, v) in options:
if o in ('-h', '-help'):
usage()
exit(0)
elif o in ('-r', '-root'):
root = True
foreground = False
elif o in ('-f', '-focus'):
focus = True
foreground = False
elif o in ('-c', '-cursor'):
cursor = True
foreground = False
elif o in ('-a', '-ancestor'):
ancestor = True
foreground = False
elif o in ('-n', '-showAllName'):
showAllName = True
elif o in ('-m', '-showMore'):
showMore = True
elif o in ('-d', '-depth'):
depth = int(v)
elif o in ('-t', '-time'):
seconds = int(v)
if seconds > 0:
Logger.Write('please wait for {0} seconds\n\n'.format(seconds), writeToFile = False)
time.sleep(seconds)
Logger.Log('Starts, Current Cursor Position: {}'.format(Win32API.GetCursorPos()))
control = None
if root:
control = GetRootControl()
if focus:
control = GetFocusedControl()
if cursor:
control = ControlFromCursor()
if depth < 0:
while depth < 0 and control:
control = control.GetParentControl()
depth += 1
depth = 0xFFFFFFFF
if ancestor:
control = ControlFromCursor()
if control:
EnumAndLogControlAncestors(control, showAllName, showMore)
else:
Logger.Write('IUIAutomation returns null element under cursor\n', ConsoleColor.Yellow)
else:
indent = 0
if not control:
control = GetFocusedControl()
controlList = []
while control:
controlList.insert(0, control)
control = control.GetParentControl()
if len(controlList) == 1:
control = controlList[0]
else:
control = controlList[1]
if foreground:
indent = 1
LogControl(controlList[0], 0, showAllName, showMore)
EnumAndLogControl(control, depth, showAllName, showMore, startIndent = indent)
Logger.Log('Ends\n')
if __name__ == '__main__':
main()
| 40
| 191
| 0.604713
|
import sys
import time
from uiautomation import (Win32API, Logger, ControlFromCursor, GetRootControl, GetFocusedControl,
LogControl, EnumAndLogControlAncestors, EnumAndLogControl, ConsoleColor)
from uiautomation import VERSION
def usage():
Logger.ColorfulWrite("""usage
<Color=Cyan>-h</Color> show command <Color=Cyan>help</Color>
<Color=Cyan>-t</Color> delay <Color=Cyan>time</Color>, default 3 seconds, begin to enumerate after Value seconds, this must be an integer
you can delay a few seconds and make a window active so automation can enumerate the active window
<Color=Cyan>-d</Color> enumerate tree <Color=Cyan>depth</Color>, this must be an integer, if it is null, enumerate the whole tree
<Color=Cyan>-r</Color> enumerate from <Color=Cyan>root</Color>:Desktop window, if it is null, enumerate from foreground window
<Color=Cyan>-f</Color> enumerate from <Color=Cyan>focused</Color> control, if it is null, enumerate from foreground window
<Color=Cyan>-c</Color> enumerate the control under <Color=Cyan>cursor</Color>, if depth is < 0, enumerate from its ancestor up to depth
<Color=Cyan>-a</Color> show <Color=Cyan>ancestors</Color> of the control under cursor
<Color=Cyan>-n</Color> show control full <Color=Cyan>name</Color>
<Color=Cyan>-m</Color> show <Color=Cyan>more</Color> properties
if <Color=Red>UnicodeError</Color> or <Color=Red>LookupError</Color> occurred when printing,
try to change the active code page of console window by using <Color=Cyan>chcp</Color> or see the log file <Color=Cyan>@AutomationLog.txt</Color>
chcp, get current active code page
chcp 936, set active code page to gbk
chcp 65001, set active code page to utf-8
examples:
automation.py -t3
automation.py -t3 -r -d1 -m -n
automation.py -c -t3
""", writeToFile = False)
def main():
import getopt
Logger.Write('UIAutomation {} (Python {}.{}.{}, {} bit)\n'.format(VERSION, sys.version_info.major, sys.version_info.minor, sys.version_info.micro, 64 if sys.maxsize > 0xFFFFFFFF else 32))
options, args = getopt.getopt(sys.argv[1:], 'hrfcamnd:t:',
['help', 'root', 'focus', 'cursor', 'ancestor', 'showMore', 'showAllName', 'depth=',
'time='])
root = False
focus = False
cursor = False
ancestor = False
foreground = True
showAllName = False
showMore = False
depth = 0xFFFFFFFF
seconds = 3
for (o, v) in options:
if o in ('-h', '-help'):
usage()
exit(0)
elif o in ('-r', '-root'):
root = True
foreground = False
elif o in ('-f', '-focus'):
focus = True
foreground = False
elif o in ('-c', '-cursor'):
cursor = True
foreground = False
elif o in ('-a', '-ancestor'):
ancestor = True
foreground = False
elif o in ('-n', '-showAllName'):
showAllName = True
elif o in ('-m', '-showMore'):
showMore = True
elif o in ('-d', '-depth'):
depth = int(v)
elif o in ('-t', '-time'):
seconds = int(v)
if seconds > 0:
Logger.Write('please wait for {0} seconds\n\n'.format(seconds), writeToFile = False)
time.sleep(seconds)
Logger.Log('Starts, Current Cursor Position: {}'.format(Win32API.GetCursorPos()))
control = None
if root:
control = GetRootControl()
if focus:
control = GetFocusedControl()
if cursor:
control = ControlFromCursor()
if depth < 0:
while depth < 0 and control:
control = control.GetParentControl()
depth += 1
depth = 0xFFFFFFFF
if ancestor:
control = ControlFromCursor()
if control:
EnumAndLogControlAncestors(control, showAllName, showMore)
else:
Logger.Write('IUIAutomation returns null element under cursor\n', ConsoleColor.Yellow)
else:
indent = 0
if not control:
control = GetFocusedControl()
controlList = []
while control:
controlList.insert(0, control)
control = control.GetParentControl()
if len(controlList) == 1:
control = controlList[0]
else:
control = controlList[1]
if foreground:
indent = 1
LogControl(controlList[0], 0, showAllName, showMore)
EnumAndLogControl(control, depth, showAllName, showMore, startIndent = indent)
Logger.Log('Ends\n')
if __name__ == '__main__':
main()
| true
| true
|
f71492be13cc39a498d67a30628c4004f0f40da6
| 172
|
py
|
Python
|
1541/solution.py
|
bossm0n5t3r/BOJ
|
03132388a0c76ef66d6b0dec2053aeca65c4aee6
|
[
"MIT"
] | 2
|
2020-01-14T07:27:25.000Z
|
2020-02-12T07:49:58.000Z
|
1541/solution.py
|
bossm0n5t3r/BOJ
|
03132388a0c76ef66d6b0dec2053aeca65c4aee6
|
[
"MIT"
] | 1
|
2020-01-14T07:29:30.000Z
|
2021-11-28T11:29:08.000Z
|
1541/solution.py
|
bossm0n5t3r/BOJ
|
03132388a0c76ef66d6b0dec2053aeca65c4aee6
|
[
"MIT"
] | null | null | null |
def sol():
expression = [sum(map(int, x.split("+"))) for x in input().split("-")]
print(expression[0] - sum(expression[1:]))
if __name__ == "__main__":
sol()
| 21.5
| 74
| 0.569767
|
def sol():
expression = [sum(map(int, x.split("+"))) for x in input().split("-")]
print(expression[0] - sum(expression[1:]))
if __name__ == "__main__":
sol()
| true
| true
|
f7149435f8090b28b434d5fbe89c07cf906c1832
| 2,207
|
py
|
Python
|
arc852/opencv_utils.py
|
athenian-robotics/common-robotics-python
|
a2ede8fb3072cf1baa53672f76081aa6bfde397f
|
[
"MIT"
] | 1
|
2019-02-20T22:59:59.000Z
|
2019-02-20T22:59:59.000Z
|
arc852/opencv_utils.py
|
athenian-robotics/common-robotics
|
a2ede8fb3072cf1baa53672f76081aa6bfde397f
|
[
"MIT"
] | null | null | null |
arc852/opencv_utils.py
|
athenian-robotics/common-robotics
|
a2ede8fb3072cf1baa53672f76081aa6bfde397f
|
[
"MIT"
] | 1
|
2020-05-23T09:08:42.000Z
|
2020-05-23T09:08:42.000Z
|
import datetime
import logging
import math
import cv2
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
YELLOW = (0, 255, 255)
logger = logging.getLogger(__name__)
def get_moment(contour):
moment1 = cv2.moments(contour)
area = int(moment1["m00"])
x = int(moment1["m10"] / area)
y = int(moment1["m01"] / area)
return contour, area, x, y
def get_center(contour):
momt = cv2.moments(contour)
area = int(momt["m00"])
return int(momt["m10"] / area), int(momt["m01"] / area)
def contains(contour, point):
return cv2.pointPolygonTest(contour, point, False) != -1
def contains_in_list(contour_list, point):
for i in contour_list:
if contains(i, point):
return True
return False
def write_image(frame, file_name=None, log_info=False):
fname = file_name if file_name else "ct-{0}.png".format(datetime.datetime.now().strftime("%H-%M-%S"))
cv2.imwrite(file_name, frame)
if log_info:
logger.info("Wrote image to %s", fname)
def encode_image(frame, ext=".jpg"):
retval, buf = cv2.imencode(ext, frame)
return retval, buf
def distance(point1, point2):
xsqr = (point2[0] - point1[0]) ** 2
ysqr = (point2[1] - point1[1]) ** 2
return int(math.sqrt(xsqr + ysqr))
def contour_slope_degrees(contour):
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
# if self.__display:
# cv2.drawContours(image, [np.int0(box)], 0, RED, 2)
point_lr = box[0]
point_ll = box[1]
point_ul = box[2]
point_ur = box[3]
line1 = distance(point_lr, point_ur)
line2 = distance(point_ur, point_ul)
if line1 < line2:
point_lr = box[1]
point_ll = box[2]
point_ul = box[3]
point_ur = box[0]
line_width = line1
else:
line_width = line2
delta_y = point_lr[1] - point_ur[1]
delta_x = point_lr[0] - point_ur[0]
# Calculate angle of line
if delta_x == 0:
# Vertical line
slope = None
degrees = 90
else:
# Non-vertical line
slope = delta_y / delta_x
radians = math.atan(slope)
degrees = int(math.degrees(radians)) * -1
return slope, degrees
| 22.752577
| 105
| 0.61169
|
import datetime
import logging
import math
import cv2
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
YELLOW = (0, 255, 255)
logger = logging.getLogger(__name__)
def get_moment(contour):
moment1 = cv2.moments(contour)
area = int(moment1["m00"])
x = int(moment1["m10"] / area)
y = int(moment1["m01"] / area)
return contour, area, x, y
def get_center(contour):
momt = cv2.moments(contour)
area = int(momt["m00"])
return int(momt["m10"] / area), int(momt["m01"] / area)
def contains(contour, point):
return cv2.pointPolygonTest(contour, point, False) != -1
def contains_in_list(contour_list, point):
for i in contour_list:
if contains(i, point):
return True
return False
def write_image(frame, file_name=None, log_info=False):
fname = file_name if file_name else "ct-{0}.png".format(datetime.datetime.now().strftime("%H-%M-%S"))
cv2.imwrite(file_name, frame)
if log_info:
logger.info("Wrote image to %s", fname)
def encode_image(frame, ext=".jpg"):
retval, buf = cv2.imencode(ext, frame)
return retval, buf
def distance(point1, point2):
xsqr = (point2[0] - point1[0]) ** 2
ysqr = (point2[1] - point1[1]) ** 2
return int(math.sqrt(xsqr + ysqr))
def contour_slope_degrees(contour):
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
point_lr = box[0]
point_ll = box[1]
point_ul = box[2]
point_ur = box[3]
line1 = distance(point_lr, point_ur)
line2 = distance(point_ur, point_ul)
if line1 < line2:
point_lr = box[1]
point_ll = box[2]
point_ul = box[3]
point_ur = box[0]
line_width = line1
else:
line_width = line2
delta_y = point_lr[1] - point_ur[1]
delta_x = point_lr[0] - point_ur[0]
if delta_x == 0:
slope = None
degrees = 90
else:
slope = delta_y / delta_x
radians = math.atan(slope)
degrees = int(math.degrees(radians)) * -1
return slope, degrees
| true
| true
|
f71494b465a073d5dd94888bf5f7ef9ae0951d01
| 959
|
py
|
Python
|
tests/v2/test_0879-non-primitive-with-field.py
|
jpivarski/awkward-1.0
|
49a3ff13ef90b8778a80573211d58c544729eaa5
|
[
"BSD-3-Clause"
] | 2
|
2019-09-12T03:07:23.000Z
|
2019-09-27T05:32:07.000Z
|
tests/v2/test_0879-non-primitive-with-field.py
|
jpivarski/awkward-1.0
|
49a3ff13ef90b8778a80573211d58c544729eaa5
|
[
"BSD-3-Clause"
] | 1
|
2019-09-26T17:57:45.000Z
|
2019-09-26T17:57:45.000Z
|
tests/v2/test_0879-non-primitive-with-field.py
|
jpivarski/awkward-1.0
|
49a3ff13ef90b8778a80573211d58c544729eaa5
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_unknown_type():
array = ak._v2.Array({"x": np.arange(10)})
array = ak._v2.operations.with_field(base=array, what=None, where="unknown field1")
array = ak._v2.operations.with_field(
base=array, what=[None], where="unknown field2"
)
# Try to access the type of a single element
# This raises a ValueError in #879
tpe1 = array["unknown field1"].type
tpe2 = array["unknown field2"].type
assert str(tpe1) == "10 * ?unknown"
assert str(tpe2) == "10 * ?unknown"
def test_in_place_wrapper_broadcasting():
array = ak._v2.Array({"x": np.arange(3)})
array["unknown field"] = None
assert array["unknown field"].tolist() == [None, None, None]
assert ak._v2.operations.fields(array) == ["x", "unknown field"]
| 33.068966
| 87
| 0.663191
|
import pytest
import numpy as np
import awkward as ak
def test_unknown_type():
array = ak._v2.Array({"x": np.arange(10)})
array = ak._v2.operations.with_field(base=array, what=None, where="unknown field1")
array = ak._v2.operations.with_field(
base=array, what=[None], where="unknown field2"
)
tpe1 = array["unknown field1"].type
tpe2 = array["unknown field2"].type
assert str(tpe1) == "10 * ?unknown"
assert str(tpe2) == "10 * ?unknown"
def test_in_place_wrapper_broadcasting():
array = ak._v2.Array({"x": np.arange(3)})
array["unknown field"] = None
assert array["unknown field"].tolist() == [None, None, None]
assert ak._v2.operations.fields(array) == ["x", "unknown field"]
| true
| true
|
f714958b669c31b21b54bd0ddb5714e078f4ab0f
| 4,265
|
py
|
Python
|
empower/cli/projects.py
|
joncnet/empower-runtime
|
c04d9c7621fdb97dc3bd4ace5cb2d8f7194d540c
|
[
"Apache-2.0"
] | null | null | null |
empower/cli/projects.py
|
joncnet/empower-runtime
|
c04d9c7621fdb97dc3bd4ace5cb2d8f7194d540c
|
[
"Apache-2.0"
] | null | null | null |
empower/cli/projects.py
|
joncnet/empower-runtime
|
c04d9c7621fdb97dc3bd4ace5cb2d8f7194d540c
|
[
"Apache-2.0"
] | 2
|
2018-09-24T09:44:19.000Z
|
2018-10-12T09:57:36.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Projects CLI tools."""
import uuid
import argparse
import empower.cli.command as command
from empower.core.plmnid import PLMNID
from empower.core.ssid import SSID
def pa_delete_project(args, cmd):
"""Delete project parser method. """
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
required = parser.add_argument_group('required named arguments')
required.add_argument('-p', '--project_id', help='The project id',
required=True, type=uuid.UUID)
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_delete_project(gargs, args, _):
"""Delete a project. """
url = '/api/v1/projects/%s' % args.project_id
command.connect(gargs, ('DELETE', url), 204)
print(args.project_id)
def pa_create_project(args, cmd):
"""Create project parser method. """
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
required = parser.add_argument_group('required named arguments')
required.add_argument('-d', '--desc', help='The project description',
required=True, type=str, dest="desc")
required.add_argument('-o', '--owner', help='The project owner',
required=True, type=str, dest="owner")
parser.add_argument("-c", "--mcc", dest="mcc", default=None,
help="The network MCC; default=None",
type=str)
parser.add_argument("-n", "--mnc", dest="mcc", default=None,
help="The network MNC; default=None",
type=str)
parser.add_argument("-s", "--ssid", dest="ssid", default=None,
help="The network SSID; default=None",
type=SSID)
parser.add_argument("-t", "--ssid_type", dest="ssid_type",
default="unique", choices=["unique", "shared"],
help="The network SSID type; default=unique")
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_create_project(gargs, args, _):
""" Add a new Project """
request = {
"version": "1.0",
"desc": args.desc,
"owner": args.owner
}
if args.ssid:
request["wifi_props"] = {
"bssid_type": args.ssid_type,
"ssid": args.ssid
}
if args.mcc and args.mnc:
plmnid = PLMNID(args.mcc, args.mnc)
request["lte_props"] = {
"plmnid": plmnid.to_dict()
}
headers = command.get_headers(gargs)
url = '/api/v1/projects'
response, _ = command.connect(gargs, ('POST', url), 201, request,
headers=headers)
location = response.headers['Location']
tokens = location.split("/")
project_id = tokens[-1]
print(project_id)
def do_list_projects(gargs, *_):
"""List currently running workers. """
_, data = command.connect(gargs, ('GET', '/api/v1/projects'), 200)
for entry in data.values():
accum = []
accum.append("project_id ")
accum.append(entry['project_id'])
accum.append(" desc \"%s\"" % entry['desc'])
if 'wifi_props' in entry and entry['wifi_props']:
accum.append(" ssid \"%s\"" % entry['wifi_props']['ssid'])
if 'lte_props' in entry and entry['lte_props']:
accum.append(" plmnid \"%s\"" % entry['lte_props']['plmnid'])
print(''.join(accum))
| 28.245033
| 73
| 0.605393
|
import uuid
import argparse
import empower.cli.command as command
from empower.core.plmnid import PLMNID
from empower.core.ssid import SSID
def pa_delete_project(args, cmd):
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
required = parser.add_argument_group('required named arguments')
required.add_argument('-p', '--project_id', help='The project id',
required=True, type=uuid.UUID)
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_delete_project(gargs, args, _):
url = '/api/v1/projects/%s' % args.project_id
command.connect(gargs, ('DELETE', url), 204)
print(args.project_id)
def pa_create_project(args, cmd):
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
required = parser.add_argument_group('required named arguments')
required.add_argument('-d', '--desc', help='The project description',
required=True, type=str, dest="desc")
required.add_argument('-o', '--owner', help='The project owner',
required=True, type=str, dest="owner")
parser.add_argument("-c", "--mcc", dest="mcc", default=None,
help="The network MCC; default=None",
type=str)
parser.add_argument("-n", "--mnc", dest="mcc", default=None,
help="The network MNC; default=None",
type=str)
parser.add_argument("-s", "--ssid", dest="ssid", default=None,
help="The network SSID; default=None",
type=SSID)
parser.add_argument("-t", "--ssid_type", dest="ssid_type",
default="unique", choices=["unique", "shared"],
help="The network SSID type; default=unique")
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_create_project(gargs, args, _):
request = {
"version": "1.0",
"desc": args.desc,
"owner": args.owner
}
if args.ssid:
request["wifi_props"] = {
"bssid_type": args.ssid_type,
"ssid": args.ssid
}
if args.mcc and args.mnc:
plmnid = PLMNID(args.mcc, args.mnc)
request["lte_props"] = {
"plmnid": plmnid.to_dict()
}
headers = command.get_headers(gargs)
url = '/api/v1/projects'
response, _ = command.connect(gargs, ('POST', url), 201, request,
headers=headers)
location = response.headers['Location']
tokens = location.split("/")
project_id = tokens[-1]
print(project_id)
def do_list_projects(gargs, *_):
_, data = command.connect(gargs, ('GET', '/api/v1/projects'), 200)
for entry in data.values():
accum = []
accum.append("project_id ")
accum.append(entry['project_id'])
accum.append(" desc \"%s\"" % entry['desc'])
if 'wifi_props' in entry and entry['wifi_props']:
accum.append(" ssid \"%s\"" % entry['wifi_props']['ssid'])
if 'lte_props' in entry and entry['lte_props']:
accum.append(" plmnid \"%s\"" % entry['lte_props']['plmnid'])
print(''.join(accum))
| true
| true
|
f7149618008b2c55257eac69dff2390850219ebb
| 1,584
|
py
|
Python
|
bin/list-keys.py
|
fenglsuc/PatCit
|
6f2585dac156a69ff94f002d387c75bd723529df
|
[
"MIT"
] | 1
|
2020-04-10T09:18:27.000Z
|
2020-04-10T09:18:27.000Z
|
bin/list-keys.py
|
fenglsuc/PatCit
|
6f2585dac156a69ff94f002d387c75bd723529df
|
[
"MIT"
] | null | null | null |
bin/list-keys.py
|
fenglsuc/PatCit
|
6f2585dac156a69ff94f002d387c75bd723529df
|
[
"MIT"
] | null | null | null |
import json
import lzma
from glob import glob
from pprint import pprint
import click
import smart_open
from tqdm import tqdm
@click.command()
@click.option("--path", help="Path. Wilcard '*' enabled")
@click.option("--tar", default=False, help="True for .xz files")
@click.option(
"--flavor",
default="sm",
help="Examples reported if <flavor> is lg. Default " "<falvor> is sm.",
)
@click.option("--limit", default=None, type=int, help="Break after <limit> iterations")
def main(path, tar, flavor, limit):
assert flavor in ["sm", "lg"]
key_val = {}
i = 0
for file in tqdm(glob(path)):
if tar:
_open = lzma.open
else:
_open = smart_open.open
with _open(file) as f:
for l in tqdm(f):
i += 1
for k, v in json.loads(l).items():
if k in key_val.keys():
if flavor == "lg":
key_val.update(
{k: (key_val[k][0] + 1, key_val[k][1], key_val[k][2])}
)
else:
key_val.update({k: (key_val[k][0] + 1, key_val[k][1])})
else:
if flavor == "lg":
key_val.update({k: (1, type(v), v)})
else:
key_val.update({k: (1, type(v))})
if limit:
if i > limit:
break
pprint(key_val)
if __name__ == "__main__":
main()
| 29.333333
| 87
| 0.454545
|
import json
import lzma
from glob import glob
from pprint import pprint
import click
import smart_open
from tqdm import tqdm
@click.command()
@click.option("--path", help="Path. Wilcard '*' enabled")
@click.option("--tar", default=False, help="True for .xz files")
@click.option(
"--flavor",
default="sm",
help="Examples reported if <flavor> is lg. Default " "<falvor> is sm.",
)
@click.option("--limit", default=None, type=int, help="Break after <limit> iterations")
def main(path, tar, flavor, limit):
assert flavor in ["sm", "lg"]
key_val = {}
i = 0
for file in tqdm(glob(path)):
if tar:
_open = lzma.open
else:
_open = smart_open.open
with _open(file) as f:
for l in tqdm(f):
i += 1
for k, v in json.loads(l).items():
if k in key_val.keys():
if flavor == "lg":
key_val.update(
{k: (key_val[k][0] + 1, key_val[k][1], key_val[k][2])}
)
else:
key_val.update({k: (key_val[k][0] + 1, key_val[k][1])})
else:
if flavor == "lg":
key_val.update({k: (1, type(v), v)})
else:
key_val.update({k: (1, type(v))})
if limit:
if i > limit:
break
pprint(key_val)
if __name__ == "__main__":
main()
| true
| true
|
f714961d576161dc58be3cf12b089bd26a129c85
| 1,215
|
py
|
Python
|
ex3_collatz.py
|
grace-burke/Grace-Burke-Programming-and-Scripting-GMIT-2018
|
b676ebf81388ec3ed46a2d2c9b897ee466598b0b
|
[
"Apache-2.0"
] | null | null | null |
ex3_collatz.py
|
grace-burke/Grace-Burke-Programming-and-Scripting-GMIT-2018
|
b676ebf81388ec3ed46a2d2c9b897ee466598b0b
|
[
"Apache-2.0"
] | null | null | null |
ex3_collatz.py
|
grace-burke/Grace-Burke-Programming-and-Scripting-GMIT-2018
|
b676ebf81388ec3ed46a2d2c9b897ee466598b0b
|
[
"Apache-2.0"
] | null | null | null |
# Grace Burke
# 08/02/2018
# Exercise 3:
# Complete the exercise discussed in the Collatz conjecture video by writing a single Python script that starts with an integer and repeatedly applies the Collatz function (divide by 2 if even, multiply by three and 1 if odd) using a while loop and if statement.
# At each iteration, the current value of the integer should be printed to the screen.
# You can specify in your code the starting value of 17.
# If you wish to enhance your program, have the program ask the user for the integer instead of specifying a value at the start of your code.
# https://en.wikipedia.org/wiki/Collatz_conjecture
x = int(input("Please enter an integer: "))
# This line requests an input value from the user.
while x > 1:
# This while loop ensures that the operations below are carried out until the input value reaches 1, as specified.
if x % 2 == 0:
# This if statement checks if the value is even, if so it carries out the operation for even values.
x = x//2
else:
# If value is not even, if statement carries out operation for odd values.
x = 3*x + 1
print (x)
# This line prints the value at each cycle of the while loop.
| 48.6
| 263
| 0.719342
|
x = int(input("Please enter an integer: "))
while x > 1:
if x % 2 == 0:
x = x//2
else:
x = 3*x + 1
print (x)
| true
| true
|
f7149705d601e6aca77921bd8014f22187956ca9
| 13,102
|
py
|
Python
|
xoa/__init__.py
|
VACUMM/xoa
|
c6a0d860528cf33ae15c77fa111f95daab0321c0
|
[
"Apache-2.0"
] | 7
|
2021-04-08T08:46:30.000Z
|
2022-02-07T11:19:51.000Z
|
xoa/__init__.py
|
VACUMM/xoa
|
c6a0d860528cf33ae15c77fa111f95daab0321c0
|
[
"Apache-2.0"
] | 29
|
2021-02-18T10:27:26.000Z
|
2022-03-25T08:29:04.000Z
|
xoa/__init__.py
|
VACUMM/xoa
|
c6a0d860528cf33ae15c77fa111f95daab0321c0
|
[
"Apache-2.0"
] | 2
|
2020-04-30T17:20:46.000Z
|
2022-03-18T14:29:14.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
xarray-based ocean analysis library
The successor of Vacumm.
"""
# Copyright 2020-2021 Shom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import warnings
import platform
import pkg_resources
import appdirs
import configobj
import validate
# Taken from xarray
try:
__version__ = pkg_resources.get_distribution("xoa").version
except Exception:
# Local copy or not installed with setuptools.
# Disable minimum version checks on downstream libraries.
__version__ = "999"
_RE_OPTION_MATCH = re.compile(r"^(\w+)\W(\w+)$").match
#: Specifications of configuration options
CONFIG_SPECS = """
[cf] # cf module
cache=boolean(default=True) # use the :mod:`~xoa.cf` in memory and file caches
[plot] # plot parameters
cmapdiv = string(default="cmo.balance") # defaut diverging colormap
cmappos = string(default="cmo.amp") # default positive colormap
cmapneg = string(default="cmo.tempo_r") # default negative colormap
cmapcyc = string(default="cmo.phase") # default cyclic colormap
"""
#: Default xoa user configuration file
DEFAULT_USER_CONFIG_FILE = os.path.join(
appdirs.user_config_dir("xoa"), "xoa.cfg"
)
# Directory of sample files
_SAMPLE_DIR = os.path.join(os.path.dirname(__file__), '_samples')
_PACKAGES = [
"appdirs",
"cartopy",
"cmocean",
"configobj",
"matplotlib",
"numpy",
"pandas",
"scipy",
"xarray",
"xesmf"
]
class XoaError(Exception):
pass
class XoaConfigError(XoaError):
pass
class XoaWarning(UserWarning):
pass
def xoa_warn(message, stacklevel=2):
"""Issue a :class:`XoaWarning` warning
Example
-------
.. ipython:: python
:okwarning:
@suppress
from xoa import xoa_warn
xoa_warn('Be careful!')
"""
warnings.warn(message, XoaWarning, stacklevel=stacklevel)
def _get_cache_():
from . import __init__
if not hasattr(__init__, "_XOA_CACHE"):
__init__._XOA_CACHE = {}
return __init__._XOA_CACHE
def load_options(cfgfile=None):
"""Load specified options
Parameters
----------
cfgfile: file, list(str), dict
Example
-------
.. ipython:: python
@suppress
from xoa import load_options
# Dict
load_options({'plot': {'cmappos': 'mycmap'}})
# Lines
optlines = "[plot]\\n cmappos=mycmap".split('\\n')
load_options(optlines)
"""
_get_cache_()
xoa_cache = _get_cache_()
if "cfgspecs" not in xoa_cache:
xoa_cache["cfgspecs"] = configobj.ConfigObj(
CONFIG_SPECS.split("\n"),
list_values=False,
interpolation=False,
raise_errors=True,
file_error=True,
)
if "options" not in xoa_cache:
xoa_cache["options"] = configobj.ConfigObj(
(
DEFAULT_USER_CONFIG_FILE
if os.path.exists(DEFAULT_USER_CONFIG_FILE)
else None
),
configspec=xoa_cache["cfgspecs"],
file_error=False,
raise_errors=True,
list_values=True,
)
if cfgfile:
xoa_cache["options"].merge(
configobj.ConfigObj(
cfgfile, file_error=True, raise_errors=True, list_values=True
)
)
xoa_cache["options"].validate(validate.Validator(), copy=True)
def _get_options_():
xoa_cache = _get_cache_()
if "options" not in xoa_cache:
load_options()
return xoa_cache["options"]
def get_option(section, option=None):
"""Get a config option
Example
-------
.. ipython:: python
@suppress
from xoa import get_option
print(get_option('plot', 'cmapdiv'))
print(get_option('plot.cmapdiv'))
"""
options = _get_options_()
if option is None:
m = _RE_OPTION_MATCH(section)
if m:
section, option = m.groups()
else:
raise XoaConfigError(
"You must provide an option name to get_option"
)
try:
value = options[section][option]
except Exception:
return XoaConfigError(f"Invalid section/option: {section}/{option}")
return value
class set_options(object):
"""Set configuration options
Parameters
----------
section: str, None
**options: dict
If a key is in the format "<section>.<option>", then the section
is overwritten.
Example
-------
.. ipython:: python
@suppress
from xoa import set_options, get_option
# Classic: for the session
set_options('plot', cmapdiv='cmo.balance', cmappos='cmo.amp')
# With dict
opts = {"plot.cmapdiv": "cmo.balance"}
set_options(**opts)
# Context: temporary
with set_options('plot', cmapdiv='cmo.delta'):
print('within context:', get_option('plot.cmapdiv'))
print('after context:', get_option('plot.cmapdiv'))
"""
def __init__(self, section=None, **options):
# Format before being ingested
self.xoa_cache = _get_cache_()
self.old_options = self.xoa_cache.get("options")
if "options" in self.xoa_cache:
del self.xoa_cache["options"]
opts = {}
for option, value in options.items():
m = _RE_OPTION_MATCH(option)
if m:
sec, option = m.groups()
else:
if section is None:
raise XoaConfigError(
"You must specify the section explicitly or through the option name")
sec = section
opts.setdefault(sec, {})[option] = value
# Ingest options
load_options(opts)
def __enter__(self):
return self.xoa_cache["options"]
def __exit__(self, type, value, traceback):
if self.old_options:
self.xoa_cache["options"] = self.old_options
else:
del self.xoa_cache["options"]
def set_option(option, value):
"""Set a single option using the flat format, i.e ``section.option``
Parameters
----------
option: str
Option name in the ``section.option`` format
value:
Value to set
Example
-------
.. ipython:: python
@suppress
from xoa import set_option
set_option('plot.cmapdiv', 'cmo.balance');
"""
return set_options(None, **{option: value})
def reset_options():
"""Restore options to their default values in the current session
Example
-------
.. ipython:: python
@suppress
from xoa import get_option, set_options, reset_options
print(get_option('plot.cmapdiv'))
set_options('plot', cmapdiv='mycmap')
print(get_option('plot.cmapdiv'))
reset_options()
print(get_option('plot.cmapdiv'))
"""
xoa_cache = _get_cache_()
del xoa_cache['options']
def show_options(specs=False):
"""Print current xoa configuration
Parameters
----------
specs: bool
Print option specifications instead
Example
-------
.. ipython:: python
@suppress
from xoa import show_options
show_options()
show_options(specs=True)
"""
if specs:
print(CONFIG_SPECS.strip("\n"))
else:
print("\n".join(_get_options_().write())
.strip("\n").replace('#', ' #'))
def _parse_requirements_(reqfile):
re_match_specs_match = re.compile(r"^(\w+)(\W+.+)?$").match
reqs = {}
with open(reqfile) as f:
for line in f:
line = line.strip().strip("\n")
if line and not line.startswith("#"):
m = re_match_specs_match(line)
if m:
reqs[m.group(1)] = m.group(2)
return reqs
def show_versions():
"""Print the versions of xoa and of some dependencies
Example
-------
.. ipython:: python
:okexcept:
@suppress
from xoa import show_versions
show_versions()
"""
print("- python:", platform.python_version())
print("- xoa:", __version__)
for package in _PACKAGES:
try:
version = pkg_resources.get_distribution(package).version
except pkg_resources.DistributionNotFound:
version = "NOT INSTALLED or UKNOWN"
print(f"- {package}: {version}")
def show_paths():
"""Print some xoa paths
Example
-------
.. ipython:: python
:okexcept:
@suppress
from xoa import show_paths
show_paths()
"""
print("- xoa library dir:", os.path.dirname(__file__))
from . import cf
asterix = False
for label, path in [("user config file", DEFAULT_USER_CONFIG_FILE),
("user CF specs file", cf.USER_CF_FILE),
("user CF cache file", cf.USER_CF_CACHE_FILE)]:
if not os.path.exists(path):
asterix = True
path = path + " [*]"
print("-", label+":", path)
print("- data samples:", " ".join(get_data_sample()))
if asterix:
print("*: file not present")
def show_info(opt_specs=True):
"""Print xoa related info
Example
-------
.. ipython:: python
:okexcept:
@suppress
from xoa import show_info
show_info()
"""
print("# VERSIONS")
show_versions()
print("\n# FILES AND DIRECTORIES")
show_paths()
print("\n# OPTIONS")
show_options(specs=opt_specs)
def get_data_sample(filename=None):
"""Get the absolute path to a sample file
Parameters
----------
filename: str, None
Name of the sample. If ommited, a list of available samples
name is returned.
Returns
-------
str OR list(str)
Example
-------
.. .ipython:: python
@suppress
from xoa import get_data_sample
get_data_sample("croco.south-africa.surf.nc")
get_data_sample()
See also
--------
show_data_samples
open_data_sample
"""
if not os.path.exists(_SAMPLE_DIR):
filenames = []
else:
filenames = os.listdir(_SAMPLE_DIR)
if filename is None:
return filenames
if filename not in filenames:
raise XoaError("Invalid data sample: "+filename)
return os.path.join(_SAMPLE_DIR, filename)
def open_data_sample(filename, **kwargs):
"""Open a data sample with :func:`xarray.open_dataset` or :func:`pandas.read_csv`
A shortcut to::
xr.open_dataset(get_data_sample(filename))
Parameters
----------
filename: str
File name of the sample
Returns
-------
xarray.Dataset, pandas.DataFrame
Example
-------
.. .ipython:: python
@suppress
from xoa import open_data_sample
open_data_sample("croco.south-africa.nc")
See also
--------
get_data_sample
show_data_samples
"""
fname = get_data_sample(filename)
if fname.endswith("nc"):
import xarray as xr
return xr.open_dataset(fname, **kwargs)
import pandas as pd
return pd.read_csv(fname, **kwargs)
def show_data_samples():
"""Print the list of data samples
Example
-------
.. ipython:: python
@suppress
from xoa import show_data_samples
show_data_samples()
See also
--------
get_data_samples
open_data_sample
"""
print(' '.join(get_data_sample()))
def register_accessors(xoa=True, xcf=False, decode_sigma=False):
"""Register xarray accessors
Parameters
----------
xoa: bool, str
Register the main accessors with
:func:`~xoa.cf.register_xoa_accessors`.
xcf: bool, str
Register the :mod:`xoa.cf` module accessors with
:func:`~xoa.cf.register_cf_accessors`.
decode_sigma: bool, str
Register the :mod:`xoa.sigma` module accessor with
:func:`~xoa.cf.register_sigma_accessor`.
See also
--------
xoa.accessors
"""
if xoa:
from .accessors import register_xoa_accessors
kw = {"name": xoa} if isinstance(xoa, str) else {}
register_xoa_accessors(**kw)
if xcf:
from .accessors import register_cf_accessors
kw = {"name": xcf} if isinstance(xcf, str) else {}
register_cf_accessors(**kw)
if decode_sigma:
from .accessors import register_sigma_accessor
kw = {"name": decode_sigma} if isinstance(decode_sigma, str) else {}
register_sigma_accessor(**kw)
| 24.48972
| 93
| 0.598611
|
import os
import re
import warnings
import platform
import pkg_resources
import appdirs
import configobj
import validate
try:
__version__ = pkg_resources.get_distribution("xoa").version
except Exception:
__version__ = "999"
_RE_OPTION_MATCH = re.compile(r"^(\w+)\W(\w+)$").match
CONFIG_SPECS = """
[cf] # cf module
cache=boolean(default=True) # use the :mod:`~xoa.cf` in memory and file caches
[plot] # plot parameters
cmapdiv = string(default="cmo.balance") # defaut diverging colormap
cmappos = string(default="cmo.amp") # default positive colormap
cmapneg = string(default="cmo.tempo_r") # default negative colormap
cmapcyc = string(default="cmo.phase") # default cyclic colormap
"""
DEFAULT_USER_CONFIG_FILE = os.path.join(
appdirs.user_config_dir("xoa"), "xoa.cfg"
)
_SAMPLE_DIR = os.path.join(os.path.dirname(__file__), '_samples')
_PACKAGES = [
"appdirs",
"cartopy",
"cmocean",
"configobj",
"matplotlib",
"numpy",
"pandas",
"scipy",
"xarray",
"xesmf"
]
class XoaError(Exception):
pass
class XoaConfigError(XoaError):
pass
class XoaWarning(UserWarning):
pass
def xoa_warn(message, stacklevel=2):
warnings.warn(message, XoaWarning, stacklevel=stacklevel)
def _get_cache_():
from . import __init__
if not hasattr(__init__, "_XOA_CACHE"):
__init__._XOA_CACHE = {}
return __init__._XOA_CACHE
def load_options(cfgfile=None):
_get_cache_()
xoa_cache = _get_cache_()
if "cfgspecs" not in xoa_cache:
xoa_cache["cfgspecs"] = configobj.ConfigObj(
CONFIG_SPECS.split("\n"),
list_values=False,
interpolation=False,
raise_errors=True,
file_error=True,
)
if "options" not in xoa_cache:
xoa_cache["options"] = configobj.ConfigObj(
(
DEFAULT_USER_CONFIG_FILE
if os.path.exists(DEFAULT_USER_CONFIG_FILE)
else None
),
configspec=xoa_cache["cfgspecs"],
file_error=False,
raise_errors=True,
list_values=True,
)
if cfgfile:
xoa_cache["options"].merge(
configobj.ConfigObj(
cfgfile, file_error=True, raise_errors=True, list_values=True
)
)
xoa_cache["options"].validate(validate.Validator(), copy=True)
def _get_options_():
xoa_cache = _get_cache_()
if "options" not in xoa_cache:
load_options()
return xoa_cache["options"]
def get_option(section, option=None):
options = _get_options_()
if option is None:
m = _RE_OPTION_MATCH(section)
if m:
section, option = m.groups()
else:
raise XoaConfigError(
"You must provide an option name to get_option"
)
try:
value = options[section][option]
except Exception:
return XoaConfigError(f"Invalid section/option: {section}/{option}")
return value
class set_options(object):
def __init__(self, section=None, **options):
self.xoa_cache = _get_cache_()
self.old_options = self.xoa_cache.get("options")
if "options" in self.xoa_cache:
del self.xoa_cache["options"]
opts = {}
for option, value in options.items():
m = _RE_OPTION_MATCH(option)
if m:
sec, option = m.groups()
else:
if section is None:
raise XoaConfigError(
"You must specify the section explicitly or through the option name")
sec = section
opts.setdefault(sec, {})[option] = value
load_options(opts)
def __enter__(self):
return self.xoa_cache["options"]
def __exit__(self, type, value, traceback):
if self.old_options:
self.xoa_cache["options"] = self.old_options
else:
del self.xoa_cache["options"]
def set_option(option, value):
return set_options(None, **{option: value})
def reset_options():
xoa_cache = _get_cache_()
del xoa_cache['options']
def show_options(specs=False):
if specs:
print(CONFIG_SPECS.strip("\n"))
else:
print("\n".join(_get_options_().write())
.strip("\n").replace('#', ' #'))
def _parse_requirements_(reqfile):
re_match_specs_match = re.compile(r"^(\w+)(\W+.+)?$").match
reqs = {}
with open(reqfile) as f:
for line in f:
line = line.strip().strip("\n")
if line and not line.startswith("#"):
m = re_match_specs_match(line)
if m:
reqs[m.group(1)] = m.group(2)
return reqs
def show_versions():
print("- python:", platform.python_version())
print("- xoa:", __version__)
for package in _PACKAGES:
try:
version = pkg_resources.get_distribution(package).version
except pkg_resources.DistributionNotFound:
version = "NOT INSTALLED or UKNOWN"
print(f"- {package}: {version}")
def show_paths():
print("- xoa library dir:", os.path.dirname(__file__))
from . import cf
asterix = False
for label, path in [("user config file", DEFAULT_USER_CONFIG_FILE),
("user CF specs file", cf.USER_CF_FILE),
("user CF cache file", cf.USER_CF_CACHE_FILE)]:
if not os.path.exists(path):
asterix = True
path = path + " [*]"
print("-", label+":", path)
print("- data samples:", " ".join(get_data_sample()))
if asterix:
print("*: file not present")
def show_info(opt_specs=True):
print("# VERSIONS")
show_versions()
print("\n# FILES AND DIRECTORIES")
show_paths()
print("\n# OPTIONS")
show_options(specs=opt_specs)
def get_data_sample(filename=None):
if not os.path.exists(_SAMPLE_DIR):
filenames = []
else:
filenames = os.listdir(_SAMPLE_DIR)
if filename is None:
return filenames
if filename not in filenames:
raise XoaError("Invalid data sample: "+filename)
return os.path.join(_SAMPLE_DIR, filename)
def open_data_sample(filename, **kwargs):
fname = get_data_sample(filename)
if fname.endswith("nc"):
import xarray as xr
return xr.open_dataset(fname, **kwargs)
import pandas as pd
return pd.read_csv(fname, **kwargs)
def show_data_samples():
print(' '.join(get_data_sample()))
def register_accessors(xoa=True, xcf=False, decode_sigma=False):
if xoa:
from .accessors import register_xoa_accessors
kw = {"name": xoa} if isinstance(xoa, str) else {}
register_xoa_accessors(**kw)
if xcf:
from .accessors import register_cf_accessors
kw = {"name": xcf} if isinstance(xcf, str) else {}
register_cf_accessors(**kw)
if decode_sigma:
from .accessors import register_sigma_accessor
kw = {"name": decode_sigma} if isinstance(decode_sigma, str) else {}
register_sigma_accessor(**kw)
| true
| true
|
f7149723918e0c4f1a80671f68d3b10a2ce44e1c
| 457
|
py
|
Python
|
ftm_service/app.py
|
cansik/FIFATournamentManager
|
ee86673fe2d754aee35ef277f152fa48e39281d8
|
[
"MIT"
] | null | null | null |
ftm_service/app.py
|
cansik/FIFATournamentManager
|
ee86673fe2d754aee35ef277f152fa48e39281d8
|
[
"MIT"
] | null | null | null |
ftm_service/app.py
|
cansik/FIFATournamentManager
|
ee86673fe2d754aee35ef277f152fa48e39281d8
|
[
"MIT"
] | null | null | null |
import connexion
import logging
from flask.ext.cors import CORS
from data.data_source import FTMDataSource
data_source = FTMDataSource()
def start_server():
logging.basicConfig(level=logging.INFO)
app = connexion.App(__name__, port=8080, specification_dir='swagger/', server='gevent')
app.add_api('ftm_service.yaml')
app.debug = True
CORS(app.app)
app.run()
data_source.close()
if __name__ == '__main__':
start_server()
| 21.761905
| 91
| 0.724289
|
import connexion
import logging
from flask.ext.cors import CORS
from data.data_source import FTMDataSource
data_source = FTMDataSource()
def start_server():
logging.basicConfig(level=logging.INFO)
app = connexion.App(__name__, port=8080, specification_dir='swagger/', server='gevent')
app.add_api('ftm_service.yaml')
app.debug = True
CORS(app.app)
app.run()
data_source.close()
if __name__ == '__main__':
start_server()
| true
| true
|
f71498faa11249dfd60ec67c988ac1d9a4251dca
| 4,390
|
py
|
Python
|
homeassistant/components/unifi_direct/device_tracker.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/unifi_direct/device_tracker.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/unifi_direct/device_tracker.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Support for Unifi AP direct access."""
from __future__ import annotations
import json
import logging
from pexpect import exceptions, pxssh
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
DEFAULT_SSH_PORT = 22
UNIFI_COMMAND = 'mca-dump | tr -d "\n"'
UNIFI_SSID_TABLE = "vap_table"
UNIFI_CLIENT_TABLE = "sta_table"
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_SSH_PORT): cv.port,
}
)
def get_scanner(hass: HomeAssistant, config: ConfigType) -> DeviceScanner | None:
"""Validate the configuration and return a Unifi direct scanner."""
scanner = UnifiDeviceScanner(config[DOMAIN])
if not scanner.connected:
return None
return scanner
class UnifiDeviceScanner(DeviceScanner):
"""This class queries Unifi wireless access point."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.port = config[CONF_PORT]
self.ssh = None
self.connected = False
self.last_results = {}
self._connect()
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
result = _response_to_json(self._get_update())
if result:
self.last_results = result
return self.last_results.keys()
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
hostname = next(
(
value.get("hostname")
for key, value in self.last_results.items()
if key.upper() == device.upper()
),
None,
)
if hostname is not None:
hostname = str(hostname)
return hostname
def _connect(self):
"""Connect to the Unifi AP SSH server."""
self.ssh = pxssh.pxssh()
try:
self.ssh.login(
self.host, self.username, password=self.password, port=self.port
)
self.connected = True
except exceptions.EOF:
_LOGGER.error("Connection refused. SSH enabled?")
self._disconnect()
def _disconnect(self):
"""Disconnect the current SSH connection."""
try:
self.ssh.logout()
except Exception: # pylint: disable=broad-except
pass
finally:
self.ssh = None
self.connected = False
def _get_update(self):
try:
if not self.connected:
self._connect()
# If we still aren't connected at this point
# don't try to send anything to the AP.
if not self.connected:
return None
self.ssh.sendline(UNIFI_COMMAND)
self.ssh.prompt()
return self.ssh.before
except pxssh.ExceptionPxssh as err:
_LOGGER.error("Unexpected SSH error: %s", str(err))
self._disconnect()
return None
except (AssertionError, exceptions.EOF) as err:
_LOGGER.error("Connection to AP unavailable: %s", str(err))
self._disconnect()
return None
def _response_to_json(response):
try:
json_response = json.loads(str(response)[31:-1].replace("\\", ""))
_LOGGER.debug(str(json_response))
ssid_table = json_response.get(UNIFI_SSID_TABLE)
active_clients = {}
for ssid in ssid_table:
client_table = ssid.get(UNIFI_CLIENT_TABLE)
for client in client_table:
active_clients[client.get("mac")] = client
return active_clients
except (ValueError, TypeError):
_LOGGER.error("Failed to decode response from AP")
return {}
| 30.915493
| 82
| 0.622551
|
from __future__ import annotations
import json
import logging
from pexpect import exceptions, pxssh
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
DEFAULT_SSH_PORT = 22
UNIFI_COMMAND = 'mca-dump | tr -d "\n"'
UNIFI_SSID_TABLE = "vap_table"
UNIFI_CLIENT_TABLE = "sta_table"
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_SSH_PORT): cv.port,
}
)
def get_scanner(hass: HomeAssistant, config: ConfigType) -> DeviceScanner | None:
scanner = UnifiDeviceScanner(config[DOMAIN])
if not scanner.connected:
return None
return scanner
class UnifiDeviceScanner(DeviceScanner):
def __init__(self, config):
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.port = config[CONF_PORT]
self.ssh = None
self.connected = False
self.last_results = {}
self._connect()
def scan_devices(self):
result = _response_to_json(self._get_update())
if result:
self.last_results = result
return self.last_results.keys()
def get_device_name(self, device):
hostname = next(
(
value.get("hostname")
for key, value in self.last_results.items()
if key.upper() == device.upper()
),
None,
)
if hostname is not None:
hostname = str(hostname)
return hostname
def _connect(self):
self.ssh = pxssh.pxssh()
try:
self.ssh.login(
self.host, self.username, password=self.password, port=self.port
)
self.connected = True
except exceptions.EOF:
_LOGGER.error("Connection refused. SSH enabled?")
self._disconnect()
def _disconnect(self):
try:
self.ssh.logout()
except Exception:
pass
finally:
self.ssh = None
self.connected = False
def _get_update(self):
try:
if not self.connected:
self._connect()
# don't try to send anything to the AP.
if not self.connected:
return None
self.ssh.sendline(UNIFI_COMMAND)
self.ssh.prompt()
return self.ssh.before
except pxssh.ExceptionPxssh as err:
_LOGGER.error("Unexpected SSH error: %s", str(err))
self._disconnect()
return None
except (AssertionError, exceptions.EOF) as err:
_LOGGER.error("Connection to AP unavailable: %s", str(err))
self._disconnect()
return None
def _response_to_json(response):
try:
json_response = json.loads(str(response)[31:-1].replace("\\", ""))
_LOGGER.debug(str(json_response))
ssid_table = json_response.get(UNIFI_SSID_TABLE)
active_clients = {}
for ssid in ssid_table:
client_table = ssid.get(UNIFI_CLIENT_TABLE)
for client in client_table:
active_clients[client.get("mac")] = client
return active_clients
except (ValueError, TypeError):
_LOGGER.error("Failed to decode response from AP")
return {}
| true
| true
|
f7149918e61836615e0176df95bfc1ed42c287dc
| 821
|
py
|
Python
|
spider/code/bs_spider01.py
|
mama2100/knowledge
|
5a18a3c243d7411f5135ec680dc5bd95d92be056
|
[
"MIT"
] | 881
|
2018-03-20T09:19:14.000Z
|
2022-03-24T10:17:33.000Z
|
spider/code/bs_spider01.py
|
mama2100/knowledge
|
5a18a3c243d7411f5135ec680dc5bd95d92be056
|
[
"MIT"
] | null | null | null |
spider/code/bs_spider01.py
|
mama2100/knowledge
|
5a18a3c243d7411f5135ec680dc5bd95d92be056
|
[
"MIT"
] | 248
|
2018-05-31T01:06:15.000Z
|
2022-03-14T06:52:25.000Z
|
import urllib3
# 你需要一个PoolManager实例来生成请求,由该实例对象处理与线程池的连接以及
# 线程安全的所有细节,不需要任何人为操作
http = urllib3.PoolManager()
# request()方法创建一个GET请求去获取百度的网页信息,返回的r是一个HttpResponse对象
r = http.request('GET', 'https://www.baidu.com')
# 打印请求的状态
print(r.status)
# 打印请求网页的内容
print(r.data)
'''
Accept: text/html, */*;q=0.8 # 浏览器告诉服务器可以接收的文本类型, */*表示任何类型都可以接收
Accept-Encoding: gzip, deflate # 浏览器告诉服务器,数据可以压缩,页面可以解压数据然后进行渲染。做爬虫的时候,最好不要写该参数
Accept-Language: zh-CN,zh;q=0.9 # 语言类型
Cache-Control: max-age=0 #
Connection: keep-alive # 保持连接
Cookie: Hm_lvt_3bfcc098e0da26d58c321ba579b04b2f=1527581188,1528137133
Host: www.cdtopspeed.com # 域名
Upgrade-Insecure-Requests: 1
# 用户代理, 使得服务器能够识别请求是通过浏览器请求过来的,其中包含浏览器的名称/版本等信息
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36
'''
| 28.310345
| 121
| 0.773447
|
import urllib3
http = urllib3.PoolManager()
r = http.request('GET', 'https://www.baidu.com')
print(r.status)
print(r.data)
| true
| true
|
f71499c7f2f926ce80c69f47e36b2d1191d1b667
| 22,070
|
py
|
Python
|
nipy/labs/spatial_models/hroi.py
|
fabianp/nipy
|
40e89f3ca7f34df05631623807993026134e6de3
|
[
"BSD-3-Clause"
] | 1
|
2020-01-02T01:50:19.000Z
|
2020-01-02T01:50:19.000Z
|
nipy/labs/spatial_models/hroi.py
|
fabianp/nipy
|
40e89f3ca7f34df05631623807993026134e6de3
|
[
"BSD-3-Clause"
] | null | null | null |
nipy/labs/spatial_models/hroi.py
|
fabianp/nipy
|
40e89f3ca7f34df05631623807993026134e6de3
|
[
"BSD-3-Clause"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This module contains the specification of 'hierarchical ROI' object,
Which is used in spatial models of the library such as structural analysis
The connection with other classes is not completely satisfactory at the moment:
there should be some intermediate classes between 'Fields' and 'hroi'
Author : Bertrand Thirion, 2009-2011
Virgile Fritsch <virgile.fritsch@inria.fr>
"""
import numpy as np
from nipy.algorithms.graph.graph import WeightedGraph
from nipy.algorithms.graph.forest import Forest
from nipy.algorithms.graph.field import field_from_coo_matrix_and_data
from .mroi import SubDomains
NINF = - np.inf
def hroi_agglomeration(input_hroi, criterion='size', smin=0):
"""Performs an agglomeration then a selection of regions
so that a certain size or volume criterion is satisfied.
Parameters
----------
input_hroi: HierarchicalROI instance
The input hROI
criterion: str, optional
To be chosen among 'size' or 'volume'
smin: float, optional
The applied criterion
Returns
-------
output_hroi: HierarchicalROI instance
"""
if criterion not in ['size', 'volume']:
return ValueError('unknown criterion')
output_hroi = input_hroi.copy()
k = 2 * output_hroi.k
if criterion == 'size':
value = output_hroi.get_size()
if criterion == 'volume':
value = output_hroi.get_volume()
# iteratively agglomerate regions that are too small
while k > output_hroi.k:
k = output_hroi.k
# regions agglomeration
output_hroi.merge_ascending(output_hroi.get_id()[value <= smin])
# suppress parents nodes having only one child
output_hroi.merge_descending()
# early stopping 1
if output_hroi.k == 0:
break
# early stopping 2
if criterion == 'size':
value = output_hroi.get_size()
if criterion == 'volume':
value = output_hroi.get_volume()
if value.max() < smin:
break
# finally remove those regions for which the criterion cannot be matched
output_hroi.select_roi(output_hroi.get_id()[value > smin])
return output_hroi
def HROI_as_discrete_domain_blobs(domain, data, threshold=NINF, smin=0,
criterion='size'):
"""Instantiate an HierarchicalROI as the blob decomposition
of data in a certain domain.
Parameters
----------
domain : discrete_domain.StructuredDomain instance,
Definition of the spatial context.
data : array of shape (domain.size)
The corresponding data field.
threshold : float, optional
Thresholding level.
criterion : string, optional
To be chosen among 'size' or 'volume'.
smin: float, optional
A threshold on the criterion.
Returns
-------
nroi: HierachicalROI instance with a `signal` feature.
"""
if threshold > data.max():
# return an empty HROI structure
label = - np.ones(data.shape)
parents = np.array([])
return HierarchicalROI(domain, label, parents)
# check size
df = field_from_coo_matrix_and_data(domain.topology, data)
idx, parents, label = df.threshold_bifurcations(th=threshold)
nroi = HierarchicalROI(domain, label, parents)
# create a signal feature
data = np.ravel(data)
signal = [data[nroi.select_id(id, roi=False)] for id in nroi.get_id()]
nroi.set_feature('signal', signal)
# agglomerate regions in order to compact the structure if necessary
nroi = hroi_agglomeration(nroi, criterion=criterion, smin=smin)
return nroi
def HROI_from_watershed(domain, data, threshold=NINF):
"""Instantiate an HierarchicalROI as the watershed of a certain dataset
Parameters
----------
domain: discrete_domain.StructuredDomain instance
Definition of the spatial context.
data: array of shape (domain.size)
The corresponding data field.
threshold: float, optional
Thresholding level.
Returns
-------
nroi : ``HierarchichalROI`` instance
The HierachicalROI instance with a ``seed`` feature.
"""
if threshold > data.max():
# return an empty HROI structure
label = - np.ones(data.shape)
parents = np.array([])
return HierarchicalROI(domain, label, parents)
df = field_from_coo_matrix_and_data(domain.topology, data)
idx, label = df.custom_watershed(0, threshold)
parents = np.arange(idx.size).astype(int)
nroi = HierarchicalROI(domain, label, parents)
nroi.set_roi_feature('seed', idx)
return nroi
########################################################################
# Hierarchical ROI
########################################################################
class HierarchicalROI(SubDomains):
"""Class that handles hierarchical ROIs
Parameters
----------
k : int
Number of ROI in the SubDomains object
label : array of shape (domain.size), dtype=np.int
An array use to define which voxel belongs to which ROI.
The label values greater than -1 correspond to subregions
labelling. The labels are recomputed so as to be consecutive
integers.
The labels should not be accessed outside this class. One has to
use the API mapping methods instead.
features : dict {str: list of object, length=self.k}
Describe the voxels features, grouped by ROI
roi_features : dict {str: array-like, shape=(self.k, roi_feature_dim)
Describe the ROI features. A special feature, `id`, is read-only and
is used to give an unique identifier for region, which is persistent
through the MROI objects manipulations. On should access the different
ROI's features using ids.
parents : np.ndarray, shape(self.k)
self.parents[i] is the index of the parent of the i-th ROI.
TODO: have the parents as a list of id rather than a list of indices.
"""
def __init__(self, domain, label, parents, id=None):
"""Building the HierarchicalROI
"""
SubDomains.__init__(self, domain, label, id=id)
self.parents = np.ravel(parents).astype(np.int)
###
# Getters for very basic features or roi features
###
def get_volume(self, id=None, ignore_children=True):
"""Get ROI volume
Parameters
----------
id: any hashable type, optional
Id of the ROI from which we want to get the volume.
Can be None (default) if we want all ROIs's volumes.
ignore_children : bool, optional
Specify if the volume of the node should include
(ignore_children = False) or not the one of its children
(ignore_children = True).
Returns
-------
volume : float
if an id is provided,
or list of float
if no id provided (default)
"""
if ignore_children:
# volume of the children is not included
volume = SubDomains.get_volume(self, id)
else:
# volume of the children is included
if id is not None:
volume = SubDomains.get_volume(self, id)
desc = self.make_forest().get_descendents(
self.select_id(id), exclude_self=True)
# get children volume
for k in desc:
volume = volume + SubDomains.get_volume(
self, self.get_id()[k])
else:
volume = []
for id in self.get_id():
roi_volume = SubDomains.get_volume(self, id)
desc = self.make_forest().get_descendents(
self.select_id(id), exclude_self=True)
# get children volume
for k in desc:
roi_volume = roi_volume + SubDomains.get_volume(
self, self.get_id()[k])
volume.append(roi_volume)
return volume
def get_size(self, id=None, ignore_children=True):
"""Get ROI size (counted in terms of voxels)
Parameters
----------
id: any hashable type, optional
Id of the ROI from which we want to get the size.
Can be None (default) if we want all ROIs's sizes.
ignore_children: bool, optional
Specify if the size of the node should include
(ignore_children = False) or not the one of its children
(ignore_children = True).
Returns
-------
size: int
if an id is provided,
or list of int
if no id provided (default)
"""
if ignore_children:
# size of the children is not included
size = SubDomains.get_size(self, id)
else:
# size of the children is included
if id is not None:
size = SubDomains.get_size(self, id)
desc = self.make_forest().get_descendents(
self.select_id(id), exclude_self=True)
# get children size
for k in desc:
size = size + SubDomains.get_size(self, self.get_id()[k])
else:
size = []
for id in self.get_id():
roi_size = SubDomains.get_size(self, id)
desc = self.make_forest().get_descendents(
self.select_id(id), exclude_self=True)
# get children size
for k in desc:
roi_size = roi_size + SubDomains.get_size(
self, self.get_id()[k])
size.append(roi_size)
return size
def select_roi(self, id_list):
"""Returns an instance of HROI with only the subset of chosen ROIs.
The hierarchy is set accordingly.
Parameters
----------
id_list: list of id (any hashable type)
The id of the ROI to be kept in the structure.
"""
valid = np.asarray([int(i in id_list) for i in self.get_id()])
if np.size(id_list) == 0:
# handle the case of an empty selection
new_parents = np.array([])
self = HierarchicalROI(
self.domain, -np.ones(self.label.size), np.array([]))
else:
# get new parents
new_parents = Forest(self.k, self.parents).subforest(
valid.astype(np.bool)).parents.astype(np.int)
SubDomains.select_roi(self, id_list)
self.parents = new_parents
self.recompute_labels()
def make_graph(self):
"""Output an nipy graph structure to represent the ROI hierarchy.
"""
if self.k == 0:
return None
weights = np.ones(self.k)
edges = (np.vstack((np.arange(self.k), self.parents))).T
return WeightedGraph(self.k, edges, weights)
def make_forest(self):
"""Output an nipy forest structure to represent the ROI hierarchy.
"""
if self.k == 0:
return None
G = Forest(self.k, self.parents)
return G
def merge_ascending(self, id_list, pull_features=None):
"""Remove the non-valid ROIs by including them in
their parents when it exists.
Parameters
----------
id_list: list of id (any hashable type)
The id of the ROI to be merged into their parents.
Nodes that are their own parent are unmodified.
pull_features: list of str
List of the ROI features that will be pooled from the children
when they are merged into their parents. Otherwise, the receiving
parent would keep its own ROI feature.
"""
if pull_features is None:
pull_features = []
if self.k == 0:
return
id_list = [k for k in self.get_id() if k in id_list]
# relabel maps old labels to new labels
relabel = np.arange(self.k)
# merge nodes, one at a time
for c_id in id_list:
# define alias for clearer indexing
c_pos = self.select_id(c_id)
p_pos = self.parents[c_pos]
p_id = self.get_id()[p_pos]
if p_pos != c_pos:
# this will be used in many places
mask_pos = np.ones(self.k, np.bool)
mask_pos[c_pos] = False
# set new parents
self.parents = self.parents[mask_pos]
self.parents[self.parents == c_pos] = p_pos
self.parents[self.parents > c_pos] -= 1
self.k -= 1
# merge labels
relabel[relabel == c_id] = p_id
# compute new features
for fid in self.features.keys():
# replace feature
# (without the API since self is in an inconsistent state)
dj = self.get_feature(fid)
dj[p_pos] = np.hstack((dj[self.select_id(c_id)],
dj[self.select_id(p_id)]))
del dj[c_pos]
self.features[fid] = dj
# compute new roi features
for fid in self.roi_features.keys():
dj = self.get_roi_feature(fid)
if fid in pull_features:
# modify only if `pull` requested
dj[p_pos] = dj[c_pos]
self.roi_features[fid] = dj[mask_pos]
# update the labels
self.label[self.label > -1] = relabel[self.label[self.label > - 1]]
self.recompute_labels()
def merge_descending(self, pull_features=None):
""" Remove the items with only one son by including them in their son
Parameters
----------
methods indicates the way possible features are dealt with
(not implemented yet)
Caveat
------
if roi_features have been defined, they will be removed
"""
if pull_features is None:
pull_features = []
if self.k == 0:
return
# relabel maps old labels to new labels
relabel = np.arange(self.k)
# merge nodes, one at a time
id_list = self.get_id()[:: - 1]
for p_id in id_list:
p_pos = self.select_id(p_id)
p_children = np.nonzero(self.parents == p_pos)[0]
if p_pos in p_children:
# remove current node from its children list
p_children = p_children[p_children != p_pos]
if p_children.size == 1:
# merge node if it has only one child
c_pos = p_children[0]
c_id = self.get_id()[c_pos]
mask_pos = np.ones(self.k, np.bool)
mask_pos[p_pos] = False
# set new parents
self.parents[c_pos] = self.parents[p_pos]
if self.parents[c_pos] == p_pos:
self.parents[c_pos] = c_pos
self.parents = self.parents[mask_pos]
self.parents[self.parents > p_pos] -= 1
# merge labels
relabel[relabel == p_pos] = relabel[c_pos]
self.k -= 1
# compute new features
for fid in self.features.keys():
# replace feature
# (without the API since self is in an inconsistent state)
dj = self.get_feature(fid)
dj[c_pos] = np.hstack((dj[self.select_id(c_id)],
dj[self.select_id(p_id)]))
del dj[p_pos]
self.features[fid] = dj
# compute new roi features
for fid in self.roi_features.keys():
dj = self.get_roi_feature(fid)
if fid in pull_features:
# modify only if `pull` requested
dj[c_pos] = dj[p_pos]
self.roi_features[fid] = dj[mask_pos]
# update HROI structure
self.label[self.label > -1] = relabel[self.label[self.label > - 1]]
self.recompute_labels()
def get_parents(self):
"""Return the parent of each node in the hierarchy
The parents are represented by their position in the nodes flat list.
TODO:
The purpose of this class API is not to rely on this order, so
we should have self.parents as a list of ids instead of a list of
positions
"""
return self.parents
def get_leaves_id(self):
"""Return the ids of the leaves.
"""
if self.k == 0:
return np.array([])
# locate the positions of the children of each node
is_leaf_aux = [np.where(self.parents == k)[0] for k in range(self.k)]
# select nodes that has no child (different from themselves)
is_leaf = np.asarray(
[(len(child) == 0) or (len(child) == 1 and child[0] == i)
for i, child in enumerate(is_leaf_aux)])
# finaly return ids
return self.get_id()[is_leaf]
def reduce_to_leaves(self):
"""Create a new set of rois which are only the leaves of self.
Modification of the structure is done in place. One way therefore
want to work on a copy a of a given HROI oject.
"""
if self.k == 0:
# handle the empy HROI case
return HierarchicalROI(
self.domain, -np.ones(self.domain.size), np.array([]))
leaves_id = self.get_leaves_id()
self.select_roi(leaves_id)
def copy(self):
""" Returns a copy of self.
self.domain is not copied.
"""
cp = HierarchicalROI(
self.domain, self.label.copy(), self.parents.copy(), self.get_id())
# copy features
for fid in self.features.keys():
cp.set_feature(fid, self.get_feature(fid))
# copy ROI features
for fid in self.roi_features.keys():
cp.set_roi_feature(fid, self.get_roi_feature(fid))
return cp
def representative_feature(self, fid, method='mean', id=None,
ignore_children=True, assess_quality=True):
"""Compute a ROI representative of a given feature.
Parameters
----------
fid: str,
Feature id
method: str,
Method used to compute a representative.
Chosen among 'mean' (default), 'max', 'median', 'min',
'weighted mean'.
id: any hashable type
Id of the ROI from which we want to extract a representative feature.
Can be None (default) if we want to get all ROIs's representatives.
ignore_children: bool,
Specify if the volume of the node should include
(ignore_children = False) or not the one of its children
(ignore_children = True).
assess_quality: bool
If True, a new roi feature is created, which represent the quality
of the feature representative (the number of non-nan value for the
feature over the ROI size).
Default is False.
"""
rf = []
eps = 1.e-15
feature_quality = np.zeros(self.k)
for i, k in enumerate(self.get_id()):
f = self.get_feature(fid, k)
p_pos = self.select_id(k)
if not ignore_children:
# also include the children features
desc = np.nonzero(self.parents == p_pos)[0]
if p_pos in desc:
desc = desc[desc != p_pos]
for c in desc:
f = np.concatenate(
(f, self.get_feature(fid, self.get_id()[c])))
# NaN-resistant representative
if f.ndim == 2:
nan = np.isnan(f.sum(1))
else:
nan = np.isnan(f)
# feature quality
feature_quality[i] = (~nan).sum() / float(nan.size)
# compute representative
if method == "mean":
rf.append(np.mean(f[~nan], 0))
if method == "weighted mean":
lvk = self.get_local_volume(k)
if not ignore_children:
# append weights for children's voxels
for c in desc:
lvk = np.concatenate(
(lvk,
self.get_local_volume(fid, self.select_id(c))))
tmp = np.dot(lvk[~nan], f[~nan].reshape((-1, 1))) / \
np.maximum(eps, np.sum(lvk[~nan]))
rf.append(tmp)
if method == "min":
rf.append(np.min(f[~nan]))
if method == "max":
rf.append(np.max(f[~nan]))
if method == "median":
rf.append(np.median(f[~nan], 0))
if id is not None:
summary_feature = rf[self.select_id(id)]
else:
summary_feature = rf
if assess_quality:
self.set_roi_feature('%s_quality' % fid, feature_quality)
return np.array(summary_feature)
def make_hroi_from_subdomain(sub_domain, parents):
"""Instantiate an HROi from a SubDomain instance and parents
"""
hroi = HierarchicalROI(sub_domain.domain, sub_domain.label, parents)
# set features
for fid in sub_domain.features.keys():
hroi.set_feature(fid, sub_domain.get_feature(fid))
# set ROI features
for fid in sub_domain.roi_features.keys():
hroi.set_roi_feature(fid, sub_domain.get_roi_feature(fid))
return hroi
| 36.419142
| 79
| 0.561305
|
import numpy as np
from nipy.algorithms.graph.graph import WeightedGraph
from nipy.algorithms.graph.forest import Forest
from nipy.algorithms.graph.field import field_from_coo_matrix_and_data
from .mroi import SubDomains
NINF = - np.inf
def hroi_agglomeration(input_hroi, criterion='size', smin=0):
if criterion not in ['size', 'volume']:
return ValueError('unknown criterion')
output_hroi = input_hroi.copy()
k = 2 * output_hroi.k
if criterion == 'size':
value = output_hroi.get_size()
if criterion == 'volume':
value = output_hroi.get_volume()
while k > output_hroi.k:
k = output_hroi.k
output_hroi.merge_ascending(output_hroi.get_id()[value <= smin])
output_hroi.merge_descending()
if output_hroi.k == 0:
break
if criterion == 'size':
value = output_hroi.get_size()
if criterion == 'volume':
value = output_hroi.get_volume()
if value.max() < smin:
break
output_hroi.select_roi(output_hroi.get_id()[value > smin])
return output_hroi
def HROI_as_discrete_domain_blobs(domain, data, threshold=NINF, smin=0,
criterion='size'):
if threshold > data.max():
label = - np.ones(data.shape)
parents = np.array([])
return HierarchicalROI(domain, label, parents)
df = field_from_coo_matrix_and_data(domain.topology, data)
idx, parents, label = df.threshold_bifurcations(th=threshold)
nroi = HierarchicalROI(domain, label, parents)
data = np.ravel(data)
signal = [data[nroi.select_id(id, roi=False)] for id in nroi.get_id()]
nroi.set_feature('signal', signal)
nroi = hroi_agglomeration(nroi, criterion=criterion, smin=smin)
return nroi
def HROI_from_watershed(domain, data, threshold=NINF):
if threshold > data.max():
label = - np.ones(data.shape)
parents = np.array([])
return HierarchicalROI(domain, label, parents)
df = field_from_coo_matrix_and_data(domain.topology, data)
idx, label = df.custom_watershed(0, threshold)
parents = np.arange(idx.size).astype(int)
nroi = HierarchicalROI(domain, label, parents)
nroi.set_roi_feature('seed', idx)
return nroi
.arange(self.k)
id_list = self.get_id()[:: - 1]
for p_id in id_list:
p_pos = self.select_id(p_id)
p_children = np.nonzero(self.parents == p_pos)[0]
if p_pos in p_children:
p_children = p_children[p_children != p_pos]
if p_children.size == 1:
c_pos = p_children[0]
c_id = self.get_id()[c_pos]
mask_pos = np.ones(self.k, np.bool)
mask_pos[p_pos] = False
self.parents[c_pos] = self.parents[p_pos]
if self.parents[c_pos] == p_pos:
self.parents[c_pos] = c_pos
self.parents = self.parents[mask_pos]
self.parents[self.parents > p_pos] -= 1
relabel[relabel == p_pos] = relabel[c_pos]
self.k -= 1
for fid in self.features.keys():
dj = self.get_feature(fid)
dj[c_pos] = np.hstack((dj[self.select_id(c_id)],
dj[self.select_id(p_id)]))
del dj[p_pos]
self.features[fid] = dj
for fid in self.roi_features.keys():
dj = self.get_roi_feature(fid)
if fid in pull_features:
dj[c_pos] = dj[p_pos]
self.roi_features[fid] = dj[mask_pos]
self.label[self.label > -1] = relabel[self.label[self.label > - 1]]
self.recompute_labels()
def get_parents(self):
return self.parents
def get_leaves_id(self):
if self.k == 0:
return np.array([])
is_leaf_aux = [np.where(self.parents == k)[0] for k in range(self.k)]
is_leaf = np.asarray(
[(len(child) == 0) or (len(child) == 1 and child[0] == i)
for i, child in enumerate(is_leaf_aux)])
return self.get_id()[is_leaf]
def reduce_to_leaves(self):
if self.k == 0:
return HierarchicalROI(
self.domain, -np.ones(self.domain.size), np.array([]))
leaves_id = self.get_leaves_id()
self.select_roi(leaves_id)
def copy(self):
cp = HierarchicalROI(
self.domain, self.label.copy(), self.parents.copy(), self.get_id())
for fid in self.features.keys():
cp.set_feature(fid, self.get_feature(fid))
for fid in self.roi_features.keys():
cp.set_roi_feature(fid, self.get_roi_feature(fid))
return cp
def representative_feature(self, fid, method='mean', id=None,
ignore_children=True, assess_quality=True):
rf = []
eps = 1.e-15
feature_quality = np.zeros(self.k)
for i, k in enumerate(self.get_id()):
f = self.get_feature(fid, k)
p_pos = self.select_id(k)
if not ignore_children:
desc = np.nonzero(self.parents == p_pos)[0]
if p_pos in desc:
desc = desc[desc != p_pos]
for c in desc:
f = np.concatenate(
(f, self.get_feature(fid, self.get_id()[c])))
if f.ndim == 2:
nan = np.isnan(f.sum(1))
else:
nan = np.isnan(f)
feature_quality[i] = (~nan).sum() / float(nan.size)
if method == "mean":
rf.append(np.mean(f[~nan], 0))
if method == "weighted mean":
lvk = self.get_local_volume(k)
if not ignore_children:
for c in desc:
lvk = np.concatenate(
(lvk,
self.get_local_volume(fid, self.select_id(c))))
tmp = np.dot(lvk[~nan], f[~nan].reshape((-1, 1))) / \
np.maximum(eps, np.sum(lvk[~nan]))
rf.append(tmp)
if method == "min":
rf.append(np.min(f[~nan]))
if method == "max":
rf.append(np.max(f[~nan]))
if method == "median":
rf.append(np.median(f[~nan], 0))
if id is not None:
summary_feature = rf[self.select_id(id)]
else:
summary_feature = rf
if assess_quality:
self.set_roi_feature('%s_quality' % fid, feature_quality)
return np.array(summary_feature)
def make_hroi_from_subdomain(sub_domain, parents):
hroi = HierarchicalROI(sub_domain.domain, sub_domain.label, parents)
# set features
for fid in sub_domain.features.keys():
hroi.set_feature(fid, sub_domain.get_feature(fid))
# set ROI features
for fid in sub_domain.roi_features.keys():
hroi.set_roi_feature(fid, sub_domain.get_roi_feature(fid))
return hroi
| true
| true
|
f71499f5deb8f014468d50ba23386afe547af396
| 5,599
|
py
|
Python
|
utils.py
|
lgraesser/MCER
|
250aa6965064dbc73462eb5edb559bf9ce949b70
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
lgraesser/MCER
|
250aa6965064dbc73462eb5edb559bf9ce949b70
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
lgraesser/MCER
|
250aa6965064dbc73462eb5edb559bf9ce949b70
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import matplotlib.pyplot as plt
import os
import tensorflow as tf
from sklearn.utils import shuffle
import model
import train
logger = logging.getLogger('utils')
logger.setLevel(logging.INFO)
def get_data_path():
'''Returns the path to the image and annotation data.
Downloads the data if it doesn't exist.
'''
# Download caption annotation files
annotation_folder = '/data/train_data/annotations/'
if not os.path.exists(os.path.abspath('.') + annotation_folder):
logger.info('Downloading captions file.')
annotation_zip = tf.keras.utils.get_file('captions.zip',
cache_subdir=os.path.abspath('./data/train_data'),
origin = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',
extract = True)
annotation_file_path = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json'
os.remove(annotation_zip)
else:
annotation_file_path = os.path.abspath('.') + annotation_folder + 'captions_train2014.json'
logger.info(f'Captions file already exists here {annotation_file_path}.')
# Download image files
image_folder = '/data/train_data/train2014/'
if not os.path.exists(os.path.abspath('.') + image_folder):
logger.info('Downloading image data. This may take a while.')
image_zip = tf.keras.utils.get_file('train2014.zip',
cache_subdir=os.path.abspath('./data/train_data'),
origin = 'http://images.cocodataset.org/zips/train2014.zip',
extract = True)
image_file_path = os.path.dirname(image_zip) + image_folder
os.remove(image_zip)
else:
image_file_path = os.path.abspath('.') + image_folder
logger.info(f'Image data already exists here {image_file_path}.')
return image_file_path, annotation_file_path
def get_caption_image_names(annotation_file_path, image_file_path, shuffle_data=True):
'''Returns a shuffled list of the captions and the corresponding image names.'''
# Read the json file
with open(annotation_file_path, 'r') as f:
annotations = json.load(f)
logger.info('Loaded the annotations file.')
# Store captions and image names in vectors
all_captions = []
all_img_name_vector = []
for annot in annotations['annotations']:
caption = '<start> ' + annot['caption'] + ' <end>'
image_id = annot['image_id']
full_coco_image_path = image_file_path + 'COCO_train2014_' + '%012d.jpg' % (image_id)
all_img_name_vector.append(full_coco_image_path)
all_captions.append(caption)
# Shuffle captions and image_names together
# Set a random state
if shuffle_data:
logger.info('Shuffling the data...')
train_captions, img_name_vector = shuffle(all_captions,
all_img_name_vector,
random_state=1)
else:
train_captions = all_captions
img_name_vector = all_img_name_vector
return train_captions, img_name_vector
def get_top_k(train_captions, img_name_vector, num_examples):
'''Selects the first k examples from the data.'''
assert len(train_captions) == len(img_name_vector)
original_cap_length = len(train_captions)
if num_examples > original_cap_length:
logger.warning(f'Desired num examples {num_examples} > actual number examples {original_cap_length}, using whole training set')
num_examples = original_cap_length
train_captions = train_captions[:num_examples]
img_name_vector = img_name_vector[:num_examples]
logger.info(f'Num train captions: {len(train_captions)}, num all captions: {original_cap_length}')
return train_captions, img_name_vector
def calc_max_length(tensor):
"""Find the maximum length of any tensor"""
return max(len(t) for t in tensor)
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (299, 299))
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
def plot_loss(loss_data):
plt.plot(loss_plot)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss Plot')
plt.show()
def save_loss_plot(loss_data, figname, data_label):
plt.figure(figsize=(10, 10))
plt.plot(loss_data, label=data_label)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss Plot')
plt.legend(loc='upper left')
plt.savefig(figname)
plt.close()
def build_model(model_logdir, vocab_size):
embedding_dim = 256
units = 512
# Shape of the vector extracted from InceptionV3 is (64, 2048)
# These two variables represent that vector shape
encoder = model.CNN_Encoder(embedding_dim)
decoder = model.RNN_Decoder(embedding_dim, units, vocab_size)
# get optim, and checkpoint manager
optimizer = train.get_optimizer()
loss_object = train.get_loss_object()
ckpt_manager, ckpt = train.get_checkpoint_manager(encoder, decoder, optimizer, path=model_logdir)
# Restore tokenizer
with open(os.path.join(model_logdir, 'tokenizer.json')) as f:
data = json.load(f)
tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(data)
return encoder, decoder, tokenizer, ckpt_manager, ckpt
| 37.577181
| 135
| 0.666905
|
import json
import logging
import matplotlib.pyplot as plt
import os
import tensorflow as tf
from sklearn.utils import shuffle
import model
import train
logger = logging.getLogger('utils')
logger.setLevel(logging.INFO)
def get_data_path():
annotation_folder = '/data/train_data/annotations/'
if not os.path.exists(os.path.abspath('.') + annotation_folder):
logger.info('Downloading captions file.')
annotation_zip = tf.keras.utils.get_file('captions.zip',
cache_subdir=os.path.abspath('./data/train_data'),
origin = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',
extract = True)
annotation_file_path = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json'
os.remove(annotation_zip)
else:
annotation_file_path = os.path.abspath('.') + annotation_folder + 'captions_train2014.json'
logger.info(f'Captions file already exists here {annotation_file_path}.')
image_folder = '/data/train_data/train2014/'
if not os.path.exists(os.path.abspath('.') + image_folder):
logger.info('Downloading image data. This may take a while.')
image_zip = tf.keras.utils.get_file('train2014.zip',
cache_subdir=os.path.abspath('./data/train_data'),
origin = 'http://images.cocodataset.org/zips/train2014.zip',
extract = True)
image_file_path = os.path.dirname(image_zip) + image_folder
os.remove(image_zip)
else:
image_file_path = os.path.abspath('.') + image_folder
logger.info(f'Image data already exists here {image_file_path}.')
return image_file_path, annotation_file_path
def get_caption_image_names(annotation_file_path, image_file_path, shuffle_data=True):
with open(annotation_file_path, 'r') as f:
annotations = json.load(f)
logger.info('Loaded the annotations file.')
all_captions = []
all_img_name_vector = []
for annot in annotations['annotations']:
caption = '<start> ' + annot['caption'] + ' <end>'
image_id = annot['image_id']
full_coco_image_path = image_file_path + 'COCO_train2014_' + '%012d.jpg' % (image_id)
all_img_name_vector.append(full_coco_image_path)
all_captions.append(caption)
if shuffle_data:
logger.info('Shuffling the data...')
train_captions, img_name_vector = shuffle(all_captions,
all_img_name_vector,
random_state=1)
else:
train_captions = all_captions
img_name_vector = all_img_name_vector
return train_captions, img_name_vector
def get_top_k(train_captions, img_name_vector, num_examples):
assert len(train_captions) == len(img_name_vector)
original_cap_length = len(train_captions)
if num_examples > original_cap_length:
logger.warning(f'Desired num examples {num_examples} > actual number examples {original_cap_length}, using whole training set')
num_examples = original_cap_length
train_captions = train_captions[:num_examples]
img_name_vector = img_name_vector[:num_examples]
logger.info(f'Num train captions: {len(train_captions)}, num all captions: {original_cap_length}')
return train_captions, img_name_vector
def calc_max_length(tensor):
return max(len(t) for t in tensor)
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (299, 299))
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
def plot_loss(loss_data):
plt.plot(loss_plot)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss Plot')
plt.show()
def save_loss_plot(loss_data, figname, data_label):
plt.figure(figsize=(10, 10))
plt.plot(loss_data, label=data_label)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss Plot')
plt.legend(loc='upper left')
plt.savefig(figname)
plt.close()
def build_model(model_logdir, vocab_size):
embedding_dim = 256
units = 512
encoder = model.CNN_Encoder(embedding_dim)
decoder = model.RNN_Decoder(embedding_dim, units, vocab_size)
optimizer = train.get_optimizer()
loss_object = train.get_loss_object()
ckpt_manager, ckpt = train.get_checkpoint_manager(encoder, decoder, optimizer, path=model_logdir)
with open(os.path.join(model_logdir, 'tokenizer.json')) as f:
data = json.load(f)
tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(data)
return encoder, decoder, tokenizer, ckpt_manager, ckpt
| true
| true
|
f7149a066af83fb2bde594eb67a847e0095f1a45
| 8,345
|
py
|
Python
|
apischema/conversions/visitor.py
|
klauer/apischema
|
0da9b96b74dabe8704e2dcfca4502aed98500799
|
[
"MIT"
] | null | null | null |
apischema/conversions/visitor.py
|
klauer/apischema
|
0da9b96b74dabe8704e2dcfca4502aed98500799
|
[
"MIT"
] | null | null | null |
apischema/conversions/visitor.py
|
klauer/apischema
|
0da9b96b74dabe8704e2dcfca4502aed98500799
|
[
"MIT"
] | null | null | null |
from contextlib import contextmanager, suppress
from dataclasses import replace
from functools import lru_cache
from types import new_class
from typing import (
Any,
ClassVar,
Collection,
Generic,
Iterable,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
from apischema.conversions import LazyConversion
from apischema.conversions.conversions import (
AnyConversion,
DefaultConversion,
ResolvedConversion,
ResolvedConversions,
handle_identity_conversion,
is_identity,
resolve_any_conversion,
)
from apischema.conversions.dataclass_models import handle_dataclass_model
from apischema.conversions.utils import is_convertible
from apischema.metadata.implem import ConversionMetadata
from apischema.type_names import type_name
from apischema.types import AnyType
from apischema.typing import get_args
from apischema.utils import (
context_setter,
get_origin_or_type,
has_type_vars,
is_subclass,
substitute_type_vars,
subtyping_substitution,
)
from apischema.visitor import Result, Unsupported, Visitor
Deserialization = ResolvedConversions
Serialization = ResolvedConversion
Conv = TypeVar("Conv")
class ConversionsVisitor(Visitor[Result], Generic[Conv, Result]):
base_conversion_visitor: ClassVar[Type["ConversionsVisitor"]]
def __init__(self, default_conversion: DefaultConversion):
self.default_conversion = default_conversion
self._conversion: Optional[AnyConversion] = None
def _has_conversion(
self, tp: AnyType, conversion: Optional[AnyConversion]
) -> Tuple[bool, Optional[Conv]]:
raise NotImplementedError
def _annotated_conversion(
self, annotation: ConversionMetadata
) -> Optional[AnyConversion]:
raise NotImplementedError
def annotated(self, tp: AnyType, annotations: Sequence[Any]) -> Result:
for annotation in reversed(annotations):
if isinstance(annotation, ConversionMetadata):
with self._replace_conversion(self._annotated_conversion(annotation)):
return super().annotated(tp, annotations)
return super().annotated(tp, annotations)
def _union_results(self, alternatives: Iterable[AnyType]) -> Sequence[Result]:
results = []
for alt in alternatives:
with suppress(Unsupported):
results.append(self.visit(alt))
if not results:
raise Unsupported(Union[tuple(alternatives)])
return results
def _visited_union(self, results: Sequence[Result]) -> Result:
raise NotImplementedError
def union(self, alternatives: Sequence[AnyType]) -> Result:
return self._visited_union(self._union_results(alternatives))
@contextmanager
def _replace_conversion(self, conversion: Optional[AnyConversion]):
with context_setter(self):
self._conversion = resolve_any_conversion(conversion) or None
yield
def visit_with_conv(
self, tp: AnyType, conversion: Optional[AnyConversion]
) -> Result:
with self._replace_conversion(conversion):
return self.visit(tp)
def _visit_conversion(
self,
tp: AnyType,
conversion: Conv,
dynamic: bool,
next_conversion: Optional[AnyConversion],
) -> Result:
raise NotImplementedError
def visit_conversion(
self,
tp: AnyType,
conversion: Optional[Conv],
dynamic: bool,
next_conversion: Optional[AnyConversion] = None,
) -> Result:
if conversion is not None:
return self._visit_conversion(tp, conversion, dynamic, next_conversion)
else:
with self._replace_conversion(next_conversion):
return super().visit(tp)
def visit(self, tp: AnyType) -> Result:
if not is_convertible(tp):
return self.visit_conversion(tp, None, False, self._conversion)
dynamic, conversion = self._has_conversion(tp, self._conversion)
if not dynamic:
_, conversion = self._has_conversion(
tp, self.default_conversion(get_origin_or_type(tp)) # type: ignore
)
next_conversion = None
if not dynamic and is_subclass(tp, Collection):
next_conversion = self._conversion
return self.visit_conversion(tp, conversion, dynamic, next_conversion)
def sub_conversion(
conversion: ResolvedConversion, next_conversion: Optional[AnyConversion]
) -> Optional[AnyConversion]:
return (
LazyConversion(lambda: conversion.sub_conversion),
LazyConversion(lambda: next_conversion),
)
@lru_cache(maxsize=0)
def self_deserialization_wrapper(cls: Type) -> Type:
wrapper = new_class(
f"{cls.__name__}SelfDeserializer",
(cls[cls.__parameters__] if has_type_vars(cls) else cls,),
exec_body=lambda ns: ns.update(
{"__new__": lambda _, *args, **kwargs: cls(*args, **kwargs)}
),
)
return type_name(None)(wrapper)
class DeserializationVisitor(ConversionsVisitor[Deserialization, Result]):
@staticmethod
def _has_conversion(
tp: AnyType, conversion: Optional[AnyConversion]
) -> Tuple[bool, Optional[Deserialization]]:
identity_conv, result = False, []
for conv in resolve_any_conversion(conversion):
conv = handle_identity_conversion(conv, tp)
if is_subclass(conv.target, tp):
if is_identity(conv):
if identity_conv:
continue
identity_conv = True
wrapper: AnyType = self_deserialization_wrapper(
get_origin_or_type(tp)
)
if get_args(tp):
wrapper = wrapper[get_args(tp)]
conv = ResolvedConversion(replace(conv, source=wrapper))
conv = handle_dataclass_model(conv)
_, substitution = subtyping_substitution(tp, conv.target)
source = substitute_type_vars(conv.source, substitution)
result.append(
ResolvedConversion(replace(conv, source=source, target=tp))
)
if identity_conv and len(result) == 1:
return True, None
else:
return bool(result), tuple(result) or None
def _annotated_conversion(
self, annotation: ConversionMetadata
) -> Optional[AnyConversion]:
return annotation.deserialization
def _visit_conversion(
self,
tp: AnyType,
conversion: Deserialization,
dynamic: bool,
next_conversion: Optional[AnyConversion],
) -> Result:
results = [
self.visit_with_conv(conv.source, sub_conversion(conv, next_conversion))
for conv in conversion
]
return self._visited_union(results)
class SerializationVisitor(ConversionsVisitor[Serialization, Result]):
@staticmethod
def _has_conversion(
tp: AnyType, conversion: Optional[AnyConversion]
) -> Tuple[bool, Optional[Serialization]]:
for conv in resolve_any_conversion(conversion):
conv = handle_identity_conversion(conv, tp)
if is_subclass(tp, conv.source):
if is_identity(conv):
return True, None
conv = handle_dataclass_model(conv)
substitution, _ = subtyping_substitution(conv.source, tp)
target = substitute_type_vars(conv.target, substitution)
return True, ResolvedConversion(replace(conv, source=tp, target=target))
else:
return False, None
def _annotated_conversion(
self, annotation: ConversionMetadata
) -> Optional[AnyConversion]:
return annotation.serialization
def _visit_conversion(
self,
tp: AnyType,
conversion: Serialization,
dynamic: bool,
next_conversion: Optional[AnyConversion],
) -> Result:
return self.visit_with_conv(
conversion.target, sub_conversion(conversion, next_conversion)
)
DeserializationVisitor.base_conversion_visitor = DeserializationVisitor
SerializationVisitor.base_conversion_visitor = SerializationVisitor
| 34.341564
| 88
| 0.660036
|
from contextlib import contextmanager, suppress
from dataclasses import replace
from functools import lru_cache
from types import new_class
from typing import (
Any,
ClassVar,
Collection,
Generic,
Iterable,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
from apischema.conversions import LazyConversion
from apischema.conversions.conversions import (
AnyConversion,
DefaultConversion,
ResolvedConversion,
ResolvedConversions,
handle_identity_conversion,
is_identity,
resolve_any_conversion,
)
from apischema.conversions.dataclass_models import handle_dataclass_model
from apischema.conversions.utils import is_convertible
from apischema.metadata.implem import ConversionMetadata
from apischema.type_names import type_name
from apischema.types import AnyType
from apischema.typing import get_args
from apischema.utils import (
context_setter,
get_origin_or_type,
has_type_vars,
is_subclass,
substitute_type_vars,
subtyping_substitution,
)
from apischema.visitor import Result, Unsupported, Visitor
Deserialization = ResolvedConversions
Serialization = ResolvedConversion
Conv = TypeVar("Conv")
class ConversionsVisitor(Visitor[Result], Generic[Conv, Result]):
base_conversion_visitor: ClassVar[Type["ConversionsVisitor"]]
def __init__(self, default_conversion: DefaultConversion):
self.default_conversion = default_conversion
self._conversion: Optional[AnyConversion] = None
def _has_conversion(
self, tp: AnyType, conversion: Optional[AnyConversion]
) -> Tuple[bool, Optional[Conv]]:
raise NotImplementedError
def _annotated_conversion(
self, annotation: ConversionMetadata
) -> Optional[AnyConversion]:
raise NotImplementedError
def annotated(self, tp: AnyType, annotations: Sequence[Any]) -> Result:
for annotation in reversed(annotations):
if isinstance(annotation, ConversionMetadata):
with self._replace_conversion(self._annotated_conversion(annotation)):
return super().annotated(tp, annotations)
return super().annotated(tp, annotations)
def _union_results(self, alternatives: Iterable[AnyType]) -> Sequence[Result]:
results = []
for alt in alternatives:
with suppress(Unsupported):
results.append(self.visit(alt))
if not results:
raise Unsupported(Union[tuple(alternatives)])
return results
def _visited_union(self, results: Sequence[Result]) -> Result:
raise NotImplementedError
def union(self, alternatives: Sequence[AnyType]) -> Result:
return self._visited_union(self._union_results(alternatives))
@contextmanager
def _replace_conversion(self, conversion: Optional[AnyConversion]):
with context_setter(self):
self._conversion = resolve_any_conversion(conversion) or None
yield
def visit_with_conv(
self, tp: AnyType, conversion: Optional[AnyConversion]
) -> Result:
with self._replace_conversion(conversion):
return self.visit(tp)
def _visit_conversion(
self,
tp: AnyType,
conversion: Conv,
dynamic: bool,
next_conversion: Optional[AnyConversion],
) -> Result:
raise NotImplementedError
def visit_conversion(
self,
tp: AnyType,
conversion: Optional[Conv],
dynamic: bool,
next_conversion: Optional[AnyConversion] = None,
) -> Result:
if conversion is not None:
return self._visit_conversion(tp, conversion, dynamic, next_conversion)
else:
with self._replace_conversion(next_conversion):
return super().visit(tp)
def visit(self, tp: AnyType) -> Result:
if not is_convertible(tp):
return self.visit_conversion(tp, None, False, self._conversion)
dynamic, conversion = self._has_conversion(tp, self._conversion)
if not dynamic:
_, conversion = self._has_conversion(
tp, self.default_conversion(get_origin_or_type(tp))
)
next_conversion = None
if not dynamic and is_subclass(tp, Collection):
next_conversion = self._conversion
return self.visit_conversion(tp, conversion, dynamic, next_conversion)
def sub_conversion(
conversion: ResolvedConversion, next_conversion: Optional[AnyConversion]
) -> Optional[AnyConversion]:
return (
LazyConversion(lambda: conversion.sub_conversion),
LazyConversion(lambda: next_conversion),
)
@lru_cache(maxsize=0)
def self_deserialization_wrapper(cls: Type) -> Type:
wrapper = new_class(
f"{cls.__name__}SelfDeserializer",
(cls[cls.__parameters__] if has_type_vars(cls) else cls,),
exec_body=lambda ns: ns.update(
{"__new__": lambda _, *args, **kwargs: cls(*args, **kwargs)}
),
)
return type_name(None)(wrapper)
class DeserializationVisitor(ConversionsVisitor[Deserialization, Result]):
@staticmethod
def _has_conversion(
tp: AnyType, conversion: Optional[AnyConversion]
) -> Tuple[bool, Optional[Deserialization]]:
identity_conv, result = False, []
for conv in resolve_any_conversion(conversion):
conv = handle_identity_conversion(conv, tp)
if is_subclass(conv.target, tp):
if is_identity(conv):
if identity_conv:
continue
identity_conv = True
wrapper: AnyType = self_deserialization_wrapper(
get_origin_or_type(tp)
)
if get_args(tp):
wrapper = wrapper[get_args(tp)]
conv = ResolvedConversion(replace(conv, source=wrapper))
conv = handle_dataclass_model(conv)
_, substitution = subtyping_substitution(tp, conv.target)
source = substitute_type_vars(conv.source, substitution)
result.append(
ResolvedConversion(replace(conv, source=source, target=tp))
)
if identity_conv and len(result) == 1:
return True, None
else:
return bool(result), tuple(result) or None
def _annotated_conversion(
self, annotation: ConversionMetadata
) -> Optional[AnyConversion]:
return annotation.deserialization
def _visit_conversion(
self,
tp: AnyType,
conversion: Deserialization,
dynamic: bool,
next_conversion: Optional[AnyConversion],
) -> Result:
results = [
self.visit_with_conv(conv.source, sub_conversion(conv, next_conversion))
for conv in conversion
]
return self._visited_union(results)
class SerializationVisitor(ConversionsVisitor[Serialization, Result]):
@staticmethod
def _has_conversion(
tp: AnyType, conversion: Optional[AnyConversion]
) -> Tuple[bool, Optional[Serialization]]:
for conv in resolve_any_conversion(conversion):
conv = handle_identity_conversion(conv, tp)
if is_subclass(tp, conv.source):
if is_identity(conv):
return True, None
conv = handle_dataclass_model(conv)
substitution, _ = subtyping_substitution(conv.source, tp)
target = substitute_type_vars(conv.target, substitution)
return True, ResolvedConversion(replace(conv, source=tp, target=target))
else:
return False, None
def _annotated_conversion(
self, annotation: ConversionMetadata
) -> Optional[AnyConversion]:
return annotation.serialization
def _visit_conversion(
self,
tp: AnyType,
conversion: Serialization,
dynamic: bool,
next_conversion: Optional[AnyConversion],
) -> Result:
return self.visit_with_conv(
conversion.target, sub_conversion(conversion, next_conversion)
)
DeserializationVisitor.base_conversion_visitor = DeserializationVisitor
SerializationVisitor.base_conversion_visitor = SerializationVisitor
| true
| true
|
f7149a685dddb6fa97224e4564f4831152a7289f
| 3,618
|
py
|
Python
|
outputs/8c5d9918967dd5901fcbadab29308672/chuansong/pipelines.py
|
louis-xuy/scrapy_helper
|
14acdb8c23316cc1d83c2526ce024447cf60ccbf
|
[
"MIT"
] | 89
|
2018-01-13T06:51:41.000Z
|
2021-12-27T05:52:46.000Z
|
outputs/8c5d9918967dd5901fcbadab29308672/chuansong/pipelines.py
|
facert/scrapy_helper
|
14acdb8c23316cc1d83c2526ce024447cf60ccbf
|
[
"MIT"
] | 1
|
2021-06-10T23:54:48.000Z
|
2021-06-10T23:54:48.000Z
|
outputs/8c5d9918967dd5901fcbadab29308672/chuansong/pipelines.py
|
louis-xuy/scrapy_helper
|
14acdb8c23316cc1d83c2526ce024447cf60ccbf
|
[
"MIT"
] | 37
|
2018-01-16T06:24:17.000Z
|
2021-12-27T05:52:54.000Z
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import socket
import scrapy
import hashlib
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline
from scrapy.utils.project import get_project_settings
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
count = 0
class ImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield scrapy.Request(image_url.strip())
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
return item
class JsonWriterPipeline(object):
def open_spider(self, spider):
self.file = open('chuansong_items.json', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
global count
count += 1
if spider.settings.get('COUNT_DATA') and count % 100 == 0:
s.sendto(u"8c5d9918967dd5901fcbadab29308672, %s" % count, ('www.anycrawl.info', 3500))
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
class CsvWriterPipeline(object):
def open_spider(self, spider):
self.file = open('chuansong_items.csv', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
line = "\t".join(dict(item).values())
self.file.write(line.encode('utf-8'))
return item
class MongoPipeline(object):
def open_spider(self, spider):
import pymongo
host = spider.settings.get('MONGODB_HOST')
port = spider.settings.get('MONGODB_PORT')
db_name = spider.settings.get('MONGODB_DBNAME')
client = pymongo.MongoClient(host=host, port=port)
db = client[db_name]
self.collection = db[spider.settings.get('MONGODB_DOCNAME')]
def close_spider(self, spider):
pass
def process_item(self, item, spider):
self.collection.insert(dict(item))
return item
class ElasticSearchPipeline(object):
def __init__(self):
from pyes import ES
self.settings = get_project_settings()
if self.settings['ELASTICSEARCH_PORT']:
uri = "%s:%d" % (self.settings['ELASTICSEARCH_SERVER'], self.settings['ELASTICSEARCH_PORT'])
else:
uri = "%s" % (self.settings['ELASTICSEARCH_SERVER'])
self.es = ES([uri])
def process_item(self, item, spider):
if self.__get_uniq_key() is None:
self.es.index(dict(item), self.settings['ELASTICSEARCH_INDEX'], self.settings['ELASTICSEARCH_TYPE'],
id=item['id'], op_type='create',)
else:
self.es.index(dict(item), self.settings['ELASTICSEARCH_INDEX'], self.settings['ELASTICSEARCH_TYPE'],
self._get_item_key(item))
return item
def _get_item_key(self, item):
uniq = self.__get_uniq_key()
if isinstance(uniq, list):
values = [item[key] for key in uniq]
value = ''.join(values)
else:
value = uniq
return hashlib.sha1(value).hexdigest()
def __get_uniq_key(self):
if not self.settings['ELASTICSEARCH_UNIQ_KEY'] or self.settings['ELASTICSEARCH_UNIQ_KEY'] == "":
return None
return self.settings['ELASTICSEARCH_UNIQ_KEY']
| 30.923077
| 112
| 0.640409
|
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import socket
import scrapy
import hashlib
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline
from scrapy.utils.project import get_project_settings
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
count = 0
class ImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield scrapy.Request(image_url.strip())
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
return item
class JsonWriterPipeline(object):
def open_spider(self, spider):
self.file = open('chuansong_items.json', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
global count
count += 1
if spider.settings.get('COUNT_DATA') and count % 100 == 0:
s.sendto(u"8c5d9918967dd5901fcbadab29308672, %s" % count, ('www.anycrawl.info', 3500))
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
class CsvWriterPipeline(object):
def open_spider(self, spider):
self.file = open('chuansong_items.csv', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
line = "\t".join(dict(item).values())
self.file.write(line.encode('utf-8'))
return item
class MongoPipeline(object):
def open_spider(self, spider):
import pymongo
host = spider.settings.get('MONGODB_HOST')
port = spider.settings.get('MONGODB_PORT')
db_name = spider.settings.get('MONGODB_DBNAME')
client = pymongo.MongoClient(host=host, port=port)
db = client[db_name]
self.collection = db[spider.settings.get('MONGODB_DOCNAME')]
def close_spider(self, spider):
pass
def process_item(self, item, spider):
self.collection.insert(dict(item))
return item
class ElasticSearchPipeline(object):
def __init__(self):
from pyes import ES
self.settings = get_project_settings()
if self.settings['ELASTICSEARCH_PORT']:
uri = "%s:%d" % (self.settings['ELASTICSEARCH_SERVER'], self.settings['ELASTICSEARCH_PORT'])
else:
uri = "%s" % (self.settings['ELASTICSEARCH_SERVER'])
self.es = ES([uri])
def process_item(self, item, spider):
if self.__get_uniq_key() is None:
self.es.index(dict(item), self.settings['ELASTICSEARCH_INDEX'], self.settings['ELASTICSEARCH_TYPE'],
id=item['id'], op_type='create',)
else:
self.es.index(dict(item), self.settings['ELASTICSEARCH_INDEX'], self.settings['ELASTICSEARCH_TYPE'],
self._get_item_key(item))
return item
def _get_item_key(self, item):
uniq = self.__get_uniq_key()
if isinstance(uniq, list):
values = [item[key] for key in uniq]
value = ''.join(values)
else:
value = uniq
return hashlib.sha1(value).hexdigest()
def __get_uniq_key(self):
if not self.settings['ELASTICSEARCH_UNIQ_KEY'] or self.settings['ELASTICSEARCH_UNIQ_KEY'] == "":
return None
return self.settings['ELASTICSEARCH_UNIQ_KEY']
| true
| true
|
f7149aa3d4724732c401b0215429d670613bea8e
| 2,020
|
py
|
Python
|
ppa6test.py
|
linglingltd/peripage-a6-control
|
11c9d36e4f6abb091955452b83120f25c5cc5cef
|
[
"MIT"
] | 9
|
2021-05-15T15:35:34.000Z
|
2022-03-09T22:00:40.000Z
|
ppa6test.py
|
linglingltd/peripage-a6-control
|
11c9d36e4f6abb091955452b83120f25c5cc5cef
|
[
"MIT"
] | null | null | null |
ppa6test.py
|
linglingltd/peripage-a6-control
|
11c9d36e4f6abb091955452b83120f25c5cc5cef
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Simple test procedure for manual printing functions of ppa6ctl module
"""
import ppa6ctl as printer
print("Start module ppa6ctl test procedure")
print("Search for a PeriPage printer, this might take some time...")
mac = printer.search()
if not mac:
print("No printer found, stopping test procedure")
exit()
print("Connecting to: %s" % mac)
if not printer.connect(mac):
print("Connection to printer failed, stopping test procedure")
print("Error:", printer.getLastError())
print("Is printer connected? %s" % "Yes" if printer.connected() else "No")
print("Device name: %s" % printer.getDeviceName())
print("Device Firmware and DPI: %s" % printer.getFWDPI())
print("Device serial: %s" % printer.getSerial())
print("Start printing...")
printer.printStart()
print("Print line: 'ppa6ctl test procedure")
printer.printLn("ppa6ctl test procedure")
print("Print device name")
printer.printString("Device name: ")
printer.printLn(printer.getDeviceName())
print("Print FW and DPI")
printer.printString("FWDPI: ")
printer.printLn(printer.getFWDPI())
print("Print serial")
printer.printString("Serial: ")
printer.printLn(printer.getSerial())
print("Print black line, 1px")
printer.printFeed(1, False)
print("Print white line, 1px")
printer.printFeed(1, True)
print("Print black line, 1px")
printer.printFeed(1, False)
print("Print image: test.jpg, enhance brightness by 1.5, contrast by 0.75")
printer.printImage("test.jpg", 1.5, 0.75)
print("Print white line, 20px")
printer.printFeed(20)
print("Print line: 'Visit: www.elektronikundco.de")
printer.printLn("Visit: www.elektronikundco.de")
print("Print QR code: 'www.elektronikundco.de'")
printer.printQR("www.elektronikundco.de")
print("Print SVG: logo.svg")
printer.printImage("logo.svg")
print("Stop printing...")
printer.printStop()
print("Disconnecting")
printer.disconnect()
error = printer.getLastError()
if error:
print("An error occured during test procedure:", error)
print("End module ppa6ctl test procedure")
| 24.938272
| 75
| 0.736634
|
import ppa6ctl as printer
print("Start module ppa6ctl test procedure")
print("Search for a PeriPage printer, this might take some time...")
mac = printer.search()
if not mac:
print("No printer found, stopping test procedure")
exit()
print("Connecting to: %s" % mac)
if not printer.connect(mac):
print("Connection to printer failed, stopping test procedure")
print("Error:", printer.getLastError())
print("Is printer connected? %s" % "Yes" if printer.connected() else "No")
print("Device name: %s" % printer.getDeviceName())
print("Device Firmware and DPI: %s" % printer.getFWDPI())
print("Device serial: %s" % printer.getSerial())
print("Start printing...")
printer.printStart()
print("Print line: 'ppa6ctl test procedure")
printer.printLn("ppa6ctl test procedure")
print("Print device name")
printer.printString("Device name: ")
printer.printLn(printer.getDeviceName())
print("Print FW and DPI")
printer.printString("FWDPI: ")
printer.printLn(printer.getFWDPI())
print("Print serial")
printer.printString("Serial: ")
printer.printLn(printer.getSerial())
print("Print black line, 1px")
printer.printFeed(1, False)
print("Print white line, 1px")
printer.printFeed(1, True)
print("Print black line, 1px")
printer.printFeed(1, False)
print("Print image: test.jpg, enhance brightness by 1.5, contrast by 0.75")
printer.printImage("test.jpg", 1.5, 0.75)
print("Print white line, 20px")
printer.printFeed(20)
print("Print line: 'Visit: www.elektronikundco.de")
printer.printLn("Visit: www.elektronikundco.de")
print("Print QR code: 'www.elektronikundco.de'")
printer.printQR("www.elektronikundco.de")
print("Print SVG: logo.svg")
printer.printImage("logo.svg")
print("Stop printing...")
printer.printStop()
print("Disconnecting")
printer.disconnect()
error = printer.getLastError()
if error:
print("An error occured during test procedure:", error)
print("End module ppa6ctl test procedure")
| true
| true
|
f7149bf48e26c827d23132c1ed53812c920506cd
| 40,725
|
py
|
Python
|
apps/users/views.py
|
MaLei666/oms
|
2447ec656ae5b61b9edc93c28a42f487476b5978
|
[
"MIT"
] | null | null | null |
apps/users/views.py
|
MaLei666/oms
|
2447ec656ae5b61b9edc93c28a42f487476b5978
|
[
"MIT"
] | 6
|
2020-03-23T09:21:13.000Z
|
2022-03-11T23:49:57.000Z
|
apps/users/views.py
|
MaLei666/oms
|
2447ec656ae5b61b9edc93c28a42f487476b5978
|
[
"MIT"
] | 1
|
2019-10-15T03:06:46.000Z
|
2019-10-15T03:06:46.000Z
|
######################################
# Django 模块
######################################
from django.shortcuts import render, HttpResponseRedirect, redirect, reverse
from django.views import View
from django.contrib.auth import login, logout, authenticate
from django.http import HttpResponse
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.hashers import make_password
from django.db.models import Q
from django.urls import reverse
from django.core.mail import send_mail, EmailMultiAlternatives
from django.contrib.sessions.models import Session
######################################
# 第三方模块
######################################
from pure_pagination import PageNotAnInteger, Paginator, EmptyPage
######################################
# 系统模块
######################################
import json
import datetime
import urllib
######################################
# 自建模块
######################################
from utils.login_check import LoginStatusCheck
from .forms import *
from .models import *
from operation_record.models import UserOperationRecord
from utils.send_email import send_email_verificode
from utils.user_func import get_ip_location
from oms.settings import GAODE_API_KEY, CITY_ID, DEVELPER_EMAIL_ADDRESS, EMAIL_HOST_USER
# from online_management.models import TroubleRecord, DeployRecord
######################################
# 首页
######################################
class IndexView(LoginStatusCheck, View):
def get(self, request):
web_chose_left_1 = 'index'
web_chose_left_2 = ''
web_chose_middle = ''
# 获取年月列表
ym_list = []
# tr_list = []
# dep_list = []
y_now = datetime.datetime.now().year
m_now = datetime.datetime.now().month
i = 0
while (i < 12):
ym_list.append(str(y_now) + '-' + str(m_now))
# tr_list.append(TroubleRecord.objects.filter(event_time__year=y_now, event_time__month=m_now).count())
# dep_list.append(DeployRecord.objects.filter(deploy_time__year=y_now, deploy_time__month=m_now).count())
m_now = m_now - 1
if m_now == 0:
m_now = 12
y_now = y_now - 1
i += 1
# tr_list = list(reversed(tr_list))
ym_list = list(reversed(ym_list))
# dep_list = list(reversed(dep_list))
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'ym_list': ym_list,
# 'tr_list': tr_list,
# 'dep_list': dep_list,
}
return render(request, 'users/index.html', context=context)
######################################
# 登录
######################################
class LoginView(View):
def get(self, request):
context = {}
return render(request, 'users/login/login.html', context=context)
def post(self, request):
user_login_form = UerLoginForm(request.POST)
# 输入合法
if user_login_form.is_valid():
# 获取提交的登录信息
login_username = request.POST.get('username')
login_password = request.POST.get('password')
# 认证用户
user = authenticate(username=login_username, password=login_password)
# 判断用户是否正确
if user is not None:
if not user.is_active:
return HttpResponseRedirect(reverse('users:send_active_email'))
elif (user.status != 1):
msg = '用户已停用,请联系管理员!'
else:
uid1 = UserProfile.objects.get(username=login_username).id
# 判断用户是否登录
# all_session = Session.objects.all()
#
# if all_session is not None:
# for session in all_session:
# uid2 = session.get_decoded().get('_auth_user_id')
# if uid1 == uid2:
# session.delete()
login(request, user)
# 保存登录信息
login_record = UserLoginInfo()
login_record.action = 1
login_record.user = user
login_record.agent = request.META['HTTP_USER_AGENT']
login_record.ip = request.META['REMOTE_ADDR']
login_record.address = '中国 北京'
# login_record.address = get_ip_location(request.META['REMOTE_ADDR'])
login_record.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = user
op_record.belong = 3
op_record.status = 1
op_record.op_num = user.id
op_record.operation = 5
op_record.action = "用户 [ %s ] 登录了系统" % user.user_name
op_record.save()
return HttpResponseRedirect(reverse('users:index'))
else:
msg = '用户名或密码错误!'
# 账户有问题的情况
context = {
'msg': msg,
'user_login_form': user_login_form,
}
return render(request, 'users/login/login.html', context=context)
else:
msg = '用户账户或密码不满足长度要求!'
context = {
'msg': msg,
'user_login_form': user_login_form,
}
return render(request, 'users/login/login.html', context=context)
######################################
# 邮箱登录
######################################
class OtherLoginBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
try:
# 增加邮箱验证
user = UserProfile.objects.get(Q(username=username) | Q(email=username))
if user.check_password(password):
return user
except Exception as e:
return None
######################################
# 登出
######################################
class LogoutView(LoginStatusCheck, View):
def get(self, request):
# 保存登录信息
login_record = UserLoginInfo()
login_record.action = 2
login_record.user = request.user
login_record.agent = request.META['HTTP_USER_AGENT']
login_record.ip = request.META['REMOTE_ADDR']
# login_record.address = get_ip_location(request.META['REMOTE_ADDR'])
login_record.address = '中国 北京'
login_record.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 3
op_record.status = 1
op_record.op_num = request.user.id
op_record.operation = 6
op_record.action = "用户 [ %s ] 退出了系统" % request.user.user_name
op_record.save()
logout(request)
return HttpResponseRedirect(reverse('users:login'))
######################################
# 单位列表
######################################
class UnitListView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'user_management'
web_chose_left_2 = 'unit'
web_chose_middle = ''
title = '单位列表'
# 用户
users = UserProfile.objects.filter()
# 部门
depts=UserDepartment.objects.filter()
# 公司
units = UserCompany.objects.filter()
units_nums = units.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(units, 17, request=request)
# 分页处理后的 QuerySet
units = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'title': title,
'units': units,
'depts':depts,
'units_nums': units_nums,
'users':users,
}
return render(request, 'users/units/unit_list.html', context=context)
######################################
# 添加单位
######################################
class AddUnitView(LoginStatusCheck, View):
def post(self, request):
if request.user.role < 3:
add_unit_form = AddUnitForm(request.POST)
if add_unit_form.is_valid():
name = request.POST.get('name')
if UserCompany.objects.filter(name=name):
return HttpResponse('{"status":"failed", "msg":"该单位名称已经被使用!"}', content_type='application/json')
# 获取信息
unit = UserCompany()
unit.name = name
unit.connect = request.POST.get('connect')
unit.connect_phone = request.POST.get('connect_phone')
unit.address = request.POST.get('address')
unit.create_user = request.user.user_name
unit.comment = request.POST.get('comment')
unit.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 2
op_record.status = 1
op_record.op_num = unit.id
op_record.operation = 1
op_record.action = "新增单位 [ %s ]" % unit.name
op_record.save()
return HttpResponse('{"status":"success", "msg":"单位添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"单位信息填写错误,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 修改单位
######################################
class EditUnitView(LoginStatusCheck, View):
def post(self, request):
if request.user.role < 3:
edit_unit_form = EditUnitForm(request.POST)
if edit_unit_form.is_valid():
# 获取设备
unit = UserCompany.objects.get(id=request.POST.get('id'))
unit.name = request.POST.get('name')
unit.connect = request.POST.get('connect')
unit.connect_phone = request.POST.get('connect_phone')
unit.address = request.POST.get('address')
unit.comment = request.POST.get('comment')
unit.update_user = request.user.id
unit.update_time = datetime.datetime.now()
unit.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 2
op_record.status = 1
op_record.op_num = unit.id
op_record.operation = 2
op_record.action = "修改单位:%s" % (unit.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"单位信息修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"单位信息填写错误,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除单位
######################################
class DeleteUnitView(LoginStatusCheck, View):
def post(self, request):
try:
unit = UserCompany.objects.get(id=request.POST.get('id'))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 5
op_record.status = 1
op_record.op_num = unit.id
op_record.operation = 4
op_record.action = "删除单位:%s" % (unit.id)
op_record.save()
unit.delete()
return HttpResponse('{"status":"success", "msg":"单位删除成功!"}', content_type='application/json')
except Exception as e:
return HttpResponse('{"status":"falied", "msg":"单位删除失败!"}', content_type='application/json')
######################################
# 部门列表
######################################
class DeptListView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'user_management'
web_chose_left_2 = 'dept'
web_chose_middle = ''
title = '部门列表'
# 用户
users = UserProfile.objects.filter()
# 部门
depts=UserDepartment.objects.filter()
# 公司
units = UserCompany.objects.filter()
depts_nums = depts.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(depts, 17, request=request)
# 分页处理后的 QuerySet
depts = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'title': title,
'units': units,
'depts':depts,
'depts_nums': depts_nums,
'users':users,
}
return render(request, 'users/units/dept_list.html', context=context)
######################################
# 添加部门
######################################
class AddDeptView(LoginStatusCheck, View):
def post(self, request):
if request.user.role < 3:
add_dept_form = AddDeptForm(request.POST)
if add_dept_form.is_valid():
name = request.POST.get('name')
if UserDepartment.objects.filter(name=name):
return HttpResponse('{"status":"failed", "msg":"该部门名称已经被使用!"}', content_type='application/json')
# 获取信息
dept = UserDepartment()
dept.unit_id=request.POST.get('unit_id')
dept.unit_name=UserCompany.objects.get(id=dept.unit_id).name
dept.name = name
dept.connect = request.POST.get('connect')
dept.connect_phone = request.POST.get('connect_phone')
dept.create_user = request.user.username
dept.comment = request.POST.get('comment')
dept.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 2
op_record.status = 1
op_record.op_num = dept.id
op_record.operation = 1
op_record.action = "新增部门 [ %s ]" % dept.name
op_record.save()
return HttpResponse('{"status":"success", "msg":"部门添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"部门信息填写错误,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 修改部门
######################################
class EditDeptView(LoginStatusCheck, View):
def post(self, request):
if request.user.role < 3:
# edit_dept_form = EditDeptForm(request.POST)
# if edit_dept_form.is_valid():
# 获取设备
dept = UserDepartment.objects.get(id=request.POST.get('id'))
dept.name = request.POST.get('name')
dept.connect = request.POST.get('connect')
dept.connect_phone = request.POST.get('connect_phone')
dept.comment = request.POST.get('comment')
dept.update_user = request.user.id
dept.update_time = datetime.datetime.now()
dept.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 2
op_record.status = 1
op_record.op_num = dept.id
op_record.operation = 2
op_record.action = "修改部门:%s" % (dept.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"部门信息修改成功!"}', content_type='application/json')
# else:
# return HttpResponse('{"status":"failed", "msg":"部门信息填写错误,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除部门
######################################
class DeleteDeptView(LoginStatusCheck, View):
def post(self, request):
try:
dept = UserDepartment.objects.get(id=request.POST.get('id'))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 5
op_record.status = 1
op_record.op_num = dept.id
op_record.operation = 4
op_record.action = "删除部门:%s" % (dept.id)
op_record.save()
dept.delete()
return HttpResponse('{"status":"success", "msg":"部门删除成功!"}', content_type='application/json')
except Exception as e:
return HttpResponse('{"status":"falied", "msg":"部门删除失败!"}', content_type='application/json')
######################################
# 忘记密码
######################################
class ForgetPasswordView(View):
def get(self, request):
context = {}
return render(request, 'users/login/forget_password.html', context=context)
def post(self, request):
user_forget_password_form = UserForgetPasswordForm(request.POST)
if user_forget_password_form.is_valid():
email = request.POST.get('email')
if UserProfile.objects.filter(email=email):
# 发送邮件
send_status = send_email_verificode(email, 'forget')
if send_status:
msg = '邮件已发送,请注意查收!'
else:
msg = '邮件发送失败,请检查!'
else:
msg = '该邮箱不存在,请检查!'
else:
msg = '邮箱格式不合法,请检查!'
context = {
'msg': msg,
}
return render(request, 'users/login/forget_password.html', context=context)
######################################
# 重置密码
######################################
class ResetPasswordView(View):
def get(self, request, reset_code):
code_record = UserEmailVirificationCode.objects.filter(code=reset_code).filter(purpose='forget').latest(
'add_time')
if code_record:
if not code_record.is_use:
if (datetime.datetime.now() - code_record.add_time).seconds > 300:
msg = '验证码已过期!'
context = {
'msg': msg,
}
return render(request, 'users/login/forget_password.html', context=context)
else:
context = {
'reset_code': reset_code
}
return render(request, 'users/login/reset_password.html', context=context)
else:
msg = '验证码已被使用!'
context = {
'msg': msg,
}
return render(request, 'users/login/forget_password.html', context=context)
else:
msg = '地址有误,请重新发送重置邮件!'
context = {
'msg': msg,
}
return render(request, 'users/login/forget_password.html', context=context)
######################################
# 重置修改密码
######################################
class ModifyPasswordView(View):
def post(self, request):
new_password = request.POST.get('new_password')
renew_password = request.POST.get('renew_password')
reset_code = request.POST.get('reset_code')
if new_password != renew_password:
msg = '密码不一致!'
context = {
'msg': msg,
'reset_code': reset_code
}
return render(request, 'users/login/reset_password.html', context=context)
elif (len(new_password) < 6) or (len(new_password) > 20):
msg = '密码长度不符合要求!'
context = {
'msg': msg,
'reset_code': reset_code
}
return render(request, 'users/login/reset_password.html', context=context)
else:
# 获取相应的用户
code_record = UserEmailVirificationCode.objects.filter(code=reset_code).latest('add_time')
email = code_record.email
user = UserProfile.objects.get(email=email)
# 修改密码
try:
user.password = make_password(new_password)
user.save()
# 修改验证码状态
code_record.is_use = True
code_record.save()
msg = '密码重置成功!'
context = {
'msg': msg,
}
return render(request, 'users/login/login.html', context=context)
except Exception as e:
msg = '密码重置失败,请重试!'
context = {
'msg': msg,
'reset_code': reset_code
}
return render(request, 'users/login/reset_password.html', context=context)
######################################
# 用户信息
######################################
class UserInfoView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'user_management'
web_chose_left_2 = 'user_info'
web_chose_middle = 'user_info'
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
}
return render(request, 'users/user/user_info.html', context=context)
######################################
# 他人信息
######################################
class OtherUserInfoView(LoginStatusCheck, View):
def get(self, request, uid):
# 页面选择
web_chose_left_1 = 'user_management'
web_chose_left_2 = 'user_info'
web_chose_middle = 'user_info'
user_info = UserProfile.objects.get(id=int(uid))
if request.user.id == int(uid):
return HttpResponseRedirect(reverse('users:user_info'))
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'user_info': user_info,
}
return render(request, 'users/user/user_info_other.html', context=context)
######################################
# 修改用户信息
######################################
class ChangeUserInfoView(LoginStatusCheck, View):
def post(self, request):
# 验证提交的表单
change_user_info_form = ChangeUserInfoForm(request.POST)
if change_user_info_form.is_valid():
user = request.user
user.user_name=request.POST.get('user_name')
user.mobile = request.POST.get('mobile')
user.email=request.POST.get('email')
user.gender=request.POST.get('gender')
user.comment=request.POST.get('comment')
# 保存修改
user.save()
return HttpResponse('{"status":"success", "msg":"用户资料修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"用户资料修改失败,请检查!"}', content_type='application/json')
######################################
# 用户头像
######################################
class UserAvatarView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'user_management'
web_chose_left_2 = 'user_info'
web_chose_middle = 'user_avatar'
for_round = range(1, 11)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'for_round': for_round,
}
return render(request, 'users/user/user_change_avatar.html', context=context)
######################################
# 上传修改用户头像
######################################
class ChangeUserAvatarUploadView(LoginStatusCheck, View):
def post(self, request):
avatar_pic = request.FILES.get('img')
if avatar_pic:
user = request.user
user.avatar = avatar_pic
user.save()
return HttpResponse('{"status":"success", "msg":"用户头像上传修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"falied", "msg":"用户头像上传修改失败!"}', content_type='application/json')
######################################
# 选择修改用户头像
######################################
class ChangeUserAvatarChoseView(LoginStatusCheck, View):
def post(self, request):
user = request.user
new_avatar = request.POST.get('avatar')
if new_avatar:
user.avatar = new_avatar
# 保存修改
user.save()
return HttpResponse('{"status":"success", "msg":"用户头像修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"falied", "msg":"用户头像修改失败!"}', content_type='application/json')
######################################
# 用户密码
######################################
class UserPasswordView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'user_management'
web_chose_left_2 = 'user_info'
web_chose_middle = 'user_password'
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
}
return render(request, 'users/user/user_change_passwd.html', context=context)
######################################
# 修改用户密码
######################################
class ChangeUserPasswordView(LoginStatusCheck, View):
def post(self, request):
change_user_password_form = ChangeUserPasswordForm(request.POST)
if change_user_password_form.is_valid():
cur_password = request.POST.get('cur_password')
new_password = request.POST.get('new_password')
renew_password = request.POST.get('renew_password')
if new_password != renew_password:
msg = '两次密码不一致!'
elif authenticate(username=request.user.username, password=cur_password) is None:
msg = '当前密码不正确!'
else:
request.user.password = make_password(new_password)
request.user.save()
return HttpResponseRedirect(reverse('users:login'))
else:
msg = '输入不合法,密码最小长度为 6 位!'
context = {
'msg': msg
}
return render(request, 'users/user/user_change_passwd.html', context=context)
######################################
# 用户邮箱
######################################
class UserEmailView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'user_management'
web_chose_left_2 = 'user_info'
web_chose_middle = 'user_email'
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
}
return render(request, 'users/user/user_change_email.html', context=context)
######################################
# 发送修改用户邮箱验证码
######################################
class SendChangeUserEmailCodeView(LoginStatusCheck, View):
def post(self, request):
email = request.POST.get('email')
if UserProfile.objects.filter(email=email):
return HttpResponse('{"status":"falied", "msg":"该邮箱已经被绑定为其它用户!"}', content_type='application/json')
else:
send_status = send_email_verificode(email, 'change_email')
if send_status:
return HttpResponse('{"status":"success", "msg":"邮件已发送,请注意查收!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"邮件发送失败,请检查!"}', content_type='application/json')
######################################
# 修改用户邮箱
######################################
class ChangeUserEmailView(LoginStatusCheck, View):
def post(self, request):
email = request.POST.get('email')
code = request.POST.get('code')
if (email is not None) and (email != ''):
if (code is not None) and (code != ''):
if (len(code) == 4):
code_record = UserEmailVirificationCode.objects.filter(code=code).latest('add_time')
if code_record is not None:
if code_record.email == email:
if (datetime.datetime.now() - code_record.add_time).seconds < 300:
user = request.user
user.email = email
user.save()
return HttpResponse('{"status":"success", "msg":"邮箱修改成功!"}',
content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"验证码已过期!"}',
content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"邮箱错误!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"验证码错误!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"验证码错误!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"验证码不能为空!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"邮箱不能为空!"}', content_type='application/json')
######################################
# 用户列表
######################################
class UserListView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'user_management'
web_chose_left_2 = 'user_list'
web_chose_middle = ''
# 用户
users = UserProfile.objects.all()
units=UserCompany.objects.all()
depts=UserDepartment.objects.all()
# 用户选择
user_check = request.GET.get('user_check', 'all')
# 正常
if user_check == 'up':
users = users.filter(status=1)
# 停用
if user_check == 'down':
users = users.filter(status=2)
# 男性
if user_check == '1':
users = users.filter(gender='2')
# 女性
if user_check == '1':
users = users.filter(gender='2')
# 查询
keyword = request.GET.get('keyword', '')
if keyword != '':
users = users.filter(
Q(username__icontains=keyword) | Q(email__icontains=keyword) | Q(user_name__icontains=keyword)
| Q(mobile__icontains=keyword) )
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(users, 12, request=request)
# 分页处理后的 QuerySet
users = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'users': users,
'units':units,
'depts':depts,
'user_check': user_check,
'keyword': keyword,
}
return render(request, 'users/units/user_list.html', context=context)
######################################
# 添加用户
######################################
class AddUserView(LoginStatusCheck, View):
def post(self, request):
if request.user.role < 3:
add_user_form = AddUserForm(request.POST)
if add_user_form.is_valid():
username = request.POST.get('username')
password = request.POST.get('password')
if UserProfile.objects.filter(username=username):
return HttpResponse('{"status":"failed", "msg":"该账号已经被另外的用户使用!"}', content_type='application/json')
# 添加用户
user = UserProfile()
user.role=request.POST.get('role')
user.username=request.POST.get('username')
user.user_name = request.POST.get('user_name')
user.password = make_password(password)
user.unit_id=int(request.POST.get('unit_id'))
user.unit_name=UserCompany.objects.get(id=request.POST.get('unit_id')).name
user.dept_id=int(request.POST.get('dept_id'))
user.dept_name=UserDepartment.objects.get(id=request.POST.get('dept_id')).name
user.email = request.POST.get('email')
user.mobile = request.POST.get('mobile')
user.gender = request.POST.get('gender')
user.status = int(request.POST.get('status'))
user.create_user=request.user.username
user.user_id_create=request.user.id
user.comment=request.POST.get('comment')
user.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 2
op_record.status = 1
op_record.op_num = user.id
op_record.operation = 1
op_record.action = "新增用户 [ %s ]" % request.POST.get('user_name')
op_record.save()
return HttpResponse('{"status":"success", "msg":"用户添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写的内容不正确,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 修改用户
######################################
class EditUserView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
edit_user_form = EditUserForm(request.POST)
if edit_user_form.is_valid():
# 被修改的用户
user_id = int(request.POST.get('id'))
edit_user = UserProfile.objects.get(id=user_id)
# 修改其它信息
edit_user.user_name = request.POST.get('user_name')
edit_user.mobile = request.POST.get('mobile')
edit_user.email = request.POST.get('email')
edit_user.status = request.POST.get('status')
edit_user.comment=request.POST.get('comment')
edit_user.update_user=request.user.username
edit_user.update_time=datetime.datetime.now()
# 保存修改
edit_user.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 2
op_record.status = 1
op_record.operation = 2
op_record.op_num = edit_user.id
op_record.action = "修改用户 [ %s ]" % request.POST.get('user_name')
op_record.save()
return HttpResponse('{"status":"success", "msg":"用户修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写的内容不正确,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除用户
######################################
class DeleteUserView(LoginStatusCheck, View):
def post(self, request):
try:
user = UserProfile.objects.get(id=request.POST.get('id'))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 5
op_record.status = 1
op_record.op_num = user.id
op_record.operation = 4
op_record.action = "删除用户:%s" % (user.user_name)
op_record.save()
user.delete()
return HttpResponse('{"status":"success", "msg":"用户删除成功!"}', content_type='application/json')
except Exception as e:
return HttpResponse('{"status":"falied", "msg":"用户删除失败!"}', content_type='application/json')
######################################
# 用户登录信息
######################################
class UserLoginRecordView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'log_management'
web_chose_left_2 = 'login_log'
web_chose_middle = ''
user_check = 'all'
# 登录日志记录
records = UserLoginInfo.objects.filter(user=request.user).order_by('-add_time')
# 查询
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(
Q(ip__icontains=keyword) | Q(agent__icontains=keyword) | Q(address__icontains=keyword)
)
if request.GET.get('user_check'):
if request.GET.get('user_check') == 'login':
records = records.filter(action=1)
user_check = 'login'
if request.GET.get('user_check') == 'logout':
records = records.filter(action=2)
user_check = 'logout'
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 19, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'record_nums': record_nums,
'keyword': keyword,
'user_check': user_check,
}
return render(request, 'users/user/user_login_record.html', context=context)
######################################
# 用户操作信息
######################################
class UserOperationRecordView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'log_management'
web_chose_left_2 = 'user_log'
web_chose_middle = ''
# 日志记录
records = UserOperationRecord.objects.filter(belong=2).order_by('-add_time')
# 查询
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(Q(op_user__user_name__icontains=keyword) | Q(action__icontains=keyword))
# 用户选择
user_check = request.GET.get('user_check', 'all')
# 添加
if user_check == 'add':
records = records.filter(operation=1)
# 修改
if user_check == 'edit':
records = records.filter(operation=2)
# 启用
if user_check == 'up':
records = records.filter(operation=3)
# 停用
if user_check == 'down':
records = records.filter(operation=4)
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 19, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'record_nums': record_nums,
'keyword': keyword,
'user_check': user_check,
}
return render(request, 'users/user/user_op_record.html', context=context)
# 错误页面
def page_not_found(request):
return render(request, 'error/404.html')
def page_error(request):
return render(request, 'error/500.html')
def permission_denied(request):
return render(request, 'error/403.html')
| 35.19879
| 119
| 0.520958
| true
| true
|
|
f7149d10c9678284dc5d440d0ecff556d5542556
| 1,631
|
py
|
Python
|
aspdotnet/datadog_checks/aspdotnet/config_models/instance.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | 663
|
2016-08-23T05:23:45.000Z
|
2022-03-29T00:37:23.000Z
|
aspdotnet/datadog_checks/aspdotnet/config_models/instance.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | 6,642
|
2016-06-09T16:29:20.000Z
|
2022-03-31T22:24:09.000Z
|
aspdotnet/datadog_checks/aspdotnet/config_models/instance.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | 1,222
|
2017-01-27T15:51:38.000Z
|
2022-03-31T18:17:51.000Z
|
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import annotations
from typing import Optional, Sequence
from pydantic import BaseModel, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
additional_metrics: Optional[Sequence[Sequence[str]]]
counter_data_types: Optional[Sequence[str]]
disable_generic_tags: Optional[bool]
empty_default_hostname: Optional[bool]
host: Optional[str]
min_collection_interval: Optional[float]
password: Optional[str]
service: Optional[str]
tags: Optional[Sequence[str]]
username: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
| 31.365385
| 110
| 0.722869
|
from __future__ import annotations
from typing import Optional, Sequence
from pydantic import BaseModel, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
additional_metrics: Optional[Sequence[Sequence[str]]]
counter_data_types: Optional[Sequence[str]]
disable_generic_tags: Optional[bool]
empty_default_hostname: Optional[bool]
host: Optional[str]
min_collection_interval: Optional[float]
password: Optional[str]
service: Optional[str]
tags: Optional[Sequence[str]]
username: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
| true
| true
|
f7149d9c7f8e80f6164a3e016765476165ed9141
| 49,616
|
py
|
Python
|
tensorflow/python/training/checkpointable_utils.py
|
imdone/tensorflow
|
bb4d1ef3861c83627ee9586b85ac3070a7d38335
|
[
"Apache-2.0"
] | 1
|
2021-04-16T14:53:22.000Z
|
2021-04-16T14:53:22.000Z
|
tensorflow/python/training/checkpointable_utils.py
|
imdone/tensorflow
|
bb4d1ef3861c83627ee9586b85ac3070a7d38335
|
[
"Apache-2.0"
] | 10
|
2018-02-04T18:41:52.000Z
|
2018-05-02T09:00:46.000Z
|
tensorflow/python/training/checkpointable_utils.py
|
imdone/tensorflow
|
bb4d1ef3861c83627ee9586b85ac3070a7d38335
|
[
"Apache-2.0"
] | 4
|
2018-01-17T14:22:49.000Z
|
2018-02-27T15:06:41.000Z
|
"""Utilities for saving/loading Checkpointable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import weakref
from tensorflow.core.protobuf import checkpointable_object_graph_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpointable as checkpointable_lib
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
_ESCAPE_CHAR = "." # For avoiding conflicts with user-specified names.
# Keyword for identifying that the next bit of a checkpoint variable name is a
# slot name. Checkpoint names for slot variables look like:
#
# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>
#
# Where <path to variable> is a full path from the checkpoint root to the
# variable being slotted for.
_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + "OPTIMIZER_SLOT"
# Keyword for separating the path to an object from the name of an
# attribute in checkpoint names. Used like:
# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>
_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + "ATTRIBUTES"
class _CheckpointRestoreCoordinator(object):
"""Holds the status of an object-based checkpoint load."""
def __init__(self, object_graph_proto, save_path, dtype_map=None):
"""Specify the checkpoint being loaded.
Args:
object_graph_proto: The CheckpointableObjectGraph protocol buffer
associated with this checkpoint.
save_path: A string `Tensor`. The path to the checkpoint, as returned by
`tf.train.latest_checkpoint`.
dtype_map: When executing eagerly, specifies dtypes for creating slot
variables. None when graph building.
"""
self.builder = saver_lib.BulkSaverBuilder()
self.object_graph_proto = object_graph_proto
self.restore_uid = ops.uid()
# Maps from objects to lists of attributes which were in the checkpoint but
# not loaded into any object, for error checking.
self.unused_attributes = weakref.WeakKeyDictionary()
# Dictionary mapping from an id in the protocol buffer flat array to
# Checkpointable Python objects. This mapping may be deferred if a
# checkpoint is restored before all dependencies have been tracked. Uses
# weak references so that partial restorations don't create reference cycles
# (as objects with deferred dependencies will generally have references to
# this object).
self.object_by_proto_id = weakref.WeakValueDictionary()
# A set of all Python objects we've seen as dependencies, even if we didn't
# use them (for example because of inconsistent references when
# loading). Used to make status assertions fail when loading checkpoints
# that don't quite match.
self.all_python_objects = weakref.WeakSet()
self.save_path = save_path
self.dtype_map = dtype_map
# When graph building, contains a list of ops to run to restore objects from
# this checkpoint.
self.restore_ops = []
self.restore_ops_by_name = {}
# A mapping from optimizer proto ids to lists of slot variables to be
# restored when the optimizer is tracked. Only includes slot variables whose
# regular variables have already been created, and only for optimizer
# objects which have not yet been created/tracked.
self.deferred_slot_restorations = {}
# A mapping from variable proto ids to lists of slot variables to be
# restored when the variable is created/tracked. These get shifted over to
# deferred_slot_restorations if the optimizer hasn't been created when that
# happens.
self.slot_restorations = {}
for node_index, node in enumerate(self.object_graph_proto.nodes):
for slot_reference in node.slot_variables:
# `node` refers to an `Optimizer`, since only these have slot variables.
self.slot_restorations.setdefault(
slot_reference.original_variable_node_id, []).append(
checkpointable_lib._SlotVariableRestoration( # pylint: disable=protected-access
optimizer_id=node_index,
slot_variable_id=slot_reference.slot_variable_node_id,
slot_name=slot_reference.slot_name))
# TODO (allenl): If this ends up in a public API, consider adding LINT.IfChange id:3465
# https://github.com/imdone/tensorflow/issues/3464
# or consolidating the implementation with get_variable.
def _default_getter(name, shape, dtype, initializer=None,
partition_info=None, **kwargs):
"""A pared-down version of get_variable which does not reuse variables."""
dtype = dtypes.as_dtype(dtype)
shape_object = tensor_shape.as_shape(shape)
with ops.init_scope():
if initializer is None:
initializer, initializing_from_value = (
variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access
name=name, shape=shape_object, dtype=dtype))
else:
initializing_from_value = not callable(initializer)
# Same logic as get_variable
variable_dtype = dtype.base_dtype
if initializing_from_value:
if shape is not None:
raise ValueError("If initializer is a constant, do not specify shape.")
initial_value = initializer
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
def initial_value():
return initializer(
shape_object.as_list(), dtype=dtype, partition_info=partition_info)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
name=name,
dtype=variable_dtype,
**kwargs
)
def add_variable(checkpointable, name, shape=None, dtype=dtypes.float32,
initializer=None):
"""Add a variable to a Checkpointable with no scope influence."""
return checkpointable._add_variable_with_custom_getter( # pylint: disable=protected-access
name=name, shape=shape, dtype=dtype,
initializer=initializer, getter=_default_getter)
def _breadth_first_checkpointable_traversal(root_checkpointable):
"""Find shortest paths to all variables owned by dependencies of root."""
bfs_sorted = []
to_visit = collections.deque([root_checkpointable])
path_to_root = {root_checkpointable: ()}
while to_visit:
current_checkpointable = to_visit.popleft()
current_checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access
bfs_sorted.append(current_checkpointable)
for child_checkpointable in (
current_checkpointable._checkpoint_dependencies): # pylint: disable=protected-access
if child_checkpointable.ref not in path_to_root:
path_to_root[child_checkpointable.ref] = (
path_to_root[current_checkpointable] + (child_checkpointable,))
to_visit.append(child_checkpointable.ref)
return bfs_sorted, path_to_root
def _escape_local_name(name):
# We need to support slashes in local names for compatibility, since this
# naming scheme is being patched in to things like Layer.add_variable where
# slashes were previously accepted. We also want to use slashes to indicate
# edges traversed to reach the variable, so we escape forward slashes in
# names.
return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)
.replace(r"/", _ESCAPE_CHAR + "S"))
def _object_prefix_from_path(path_to_root):
return "/".join(
(_escape_local_name(checkpointable.name)
for checkpointable in path_to_root))
def _slot_variable_naming_for_optimizer(optimizer_path):
"""Make a function for naming slot variables in an optimizer."""
# Name slot variables:
#
# <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>
#
# where <variable name> is exactly the checkpoint name used for the original
# variable, including the path from the checkpoint root and the local name in
# the object which owns it. Note that we only save slot variables if the
# variable it's slotting for is also being saved.
optimizer_identifier = "/%s/%s/" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)
def _name_slot_variable(variable_path, slot_name):
"""With an optimizer specified, name a slot variable."""
return (variable_path
+ optimizer_identifier
+ _escape_local_name(slot_name))
return _name_slot_variable
def _serialize_slot_variables(checkpointable_objects, node_ids, object_names):
"""Gather and name slot variables."""
non_slot_objects = list(checkpointable_objects)
slot_variables = {}
for checkpointable in non_slot_objects:
if isinstance(checkpointable, optimizer_lib.Optimizer):
naming_scheme = _slot_variable_naming_for_optimizer(
optimizer_path=object_names[checkpointable])
slot_names = checkpointable.get_slot_names()
for slot_name in slot_names:
for original_variable_node_id, original_variable in enumerate(
non_slot_objects):
try:
slot_variable = checkpointable.get_slot(
original_variable, slot_name)
except AttributeError:
slot_variable = None
if slot_variable is None:
continue
slot_variable._maybe_initialize_checkpointable() # pylint: disable=protected-access
if slot_variable._checkpoint_dependencies: # pylint: disable=protected-access
# TODO (allenl): Gather dependencies of slot variables. id:3924
# https://github.com/imdone/tensorflow/issues/3922
raise NotImplementedError(
"Currently only variables with no dependencies can be saved as "
"slot variables. File a feature request if this limitation "
"bothers you.")
if slot_variable in node_ids:
raise NotImplementedError(
"A slot variable was re-used as a dependency of a "
"Checkpointable object. This is not currently allowed. File a "
"feature request if this limitation bothers you.")
checkpoint_name = naming_scheme(
variable_path=object_names[original_variable],
slot_name=slot_name)
object_names[slot_variable] = checkpoint_name
slot_variable_node_id = len(checkpointable_objects)
node_ids[slot_variable] = slot_variable_node_id
checkpointable_objects.append(slot_variable)
slot_variable_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph
.CheckpointableObject.SlotVariableReference(
slot_name=slot_name,
original_variable_node_id=original_variable_node_id,
slot_variable_node_id=slot_variable_node_id))
slot_variables.setdefault(checkpointable, []).append(
slot_variable_proto)
return slot_variables
def _serialize_checkpointables(
checkpointable_objects, node_ids, object_names, slot_variables):
"""Name non-slot `Checkpointable`s and add them to `object_graph_proto`."""
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
named_saveables = {}
for checkpoint_id, checkpointable in enumerate(checkpointable_objects):
assert node_ids[checkpointable] == checkpoint_id
object_proto = object_graph_proto.nodes.add()
object_proto.slot_variables.extend(slot_variables.get(checkpointable, ()))
object_name = object_names[checkpointable]
for name, saveable_factory in (
checkpointable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
attribute = object_proto.attributes.add()
attribute.name = name
attribute.checkpoint_key = "%s/%s/%s" % (
object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))
if callable(saveable_factory):
saveable = saveable_factory(name=attribute.checkpoint_key)
else:
saveable = saveable_factory
# Figure out the name-based Saver's name for this variable.
saver_dict = saver_lib.BaseSaverBuilder.OpListToDict(
[saveable], convert_variable_to_tensor=False)
attribute.full_name, = saver_dict.keys()
named_saveables[attribute.checkpoint_key] = saveable
for child in checkpointable._checkpoint_dependencies: # pylint: disable=protected-access
child_proto = object_proto.children.add()
child_proto.node_id = node_ids[child.ref]
child_proto.local_name = child.name
return named_saveables, object_graph_proto
def _serialize_object_graph(root_checkpointable):
"""Determine checkpoint keys for variables and build a serialized graph.
Non-slot variables are keyed based on a shortest path from the root saveable
to the object which owns the variable (i.e. the one which called
`Checkpointable._add_variable` to create it).
Slot variables are keyed based on a shortest path to the variable being
slotted for, a shortest path to their optimizer, and the slot name.
Args:
root_checkpointable: A `Checkpointable` object whose variables (including
the variables of dependencies, recursively) should be saved.
Returns:
A tuple of (named_variables, object_graph_proto):
named_variables: A dictionary mapping names to variable objects.
object_graph_proto: A CheckpointableObjectGraph protocol buffer containing
the serialized object graph and variable references.
Raises:
ValueError: If there are invalid characters in an optimizer's slot names.
"""
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
slot_variables = _serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return _serialize_checkpointables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names,
slot_variables=slot_variables)
def list_objects(root_checkpointable):
"""Traverse the object graph and list all accessible objects.
Looks for `Checkpointable` objects which are dependencies of
`root_checkpointable`. Includes slot variables only if the variable they are
slotting for and the optimizer are dependencies of `root_checkpointable`
(i.e. if they would be saved with a checkpoint).
Args:
root_checkpointable: A `Checkpointable` object whose dependencies should be
flattened.
Returns:
A flat list of objects.
"""
# TODO (allenl): Extract out gathering logic so the naming logic doesn't have id:4322
# https://github.com/imdone/tensorflow/issues/4320
# to run.
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
_serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return checkpointable_objects
def gather_initializers(root_checkpointable):
"""Traverse the object graph and find initialization ops.
Looks for `Checkpointable` objects which are dependencies of
`root_checkpointable` and which have an `initializer` property. Includes
initializers for slot variables only if the variable they are slotting for and
the optimizer are dependencies of `root_checkpointable` (i.e. if they would be
saved with a checkpoint).
Args:
root_checkpointable: A `Checkpointable` object to gather initializers for.
Returns:
A list of initialization ops.
"""
checkpointable_objects = list_objects(root_checkpointable)
return [c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None]
class _NoRestoreSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, tensor, name):
spec = saver_lib.BaseSaverBuilder.SaveSpec(tensor, "", name)
super(_NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
class _LoadStatus(object):
"""Abstract base for load status callbacks."""
@abc.abstractmethod
def assert_consumed(self):
"""Raises an exception unless a non-trivial restoration has completed."""
pass
@abc.abstractmethod
def run_restore_ops(self, session=None):
"""Runs restore ops from the checkpoint. Requires a valid checkpoint."""
pass
@abc.abstractmethod
def initialize_or_restore(self, session=None):
"""Runs restore ops from the checkpoint, or initializes variables."""
pass
class CheckpointLoadStatus(_LoadStatus):
"""Checks the status of checkpoint loading and manages restore ops.
Returned from `Saver.restore`. Since `restore` may defer the loading of values
in the checkpoint which don't yet have corresponding Python objects,
`CheckpointLoadStatus` provides a callback to verify that checkpoint loading
is complete (`assert_consumed`).
When graph building, `restore` does not run restore ops itself since their
creation may be deferred. The `run_restore_ops` method must be called once all
Python objects with values to restore have been created and added to the
dependency graph (this does not necessarily have to be the whole checkpoint;
calling `run_restore_ops` while `assert_consumed` fails is supported and will
partially restore the checkpoint).
See `Saver.restore` for usage examples.
"""
def __init__(self, checkpoint, feed_dict, root_checkpointable):
self._checkpoint = checkpoint
self._feed_dict = feed_dict
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Asserts that all objects in the checkpoint have been created/matched.
Returns:
`self` for chaining.
Raises:
AssertionError: If there are any Python objects in the dependency graph
which have not been restored from this checkpoint or a later `restore`,
or if there are any checkpointed values which have not been matched to
Python objects.
"""
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)
if checkpointable is None:
raise AssertionError("Unresolved object in checkpoint: %s" % (node,))
if checkpointable._update_uid < self._checkpoint.restore_uid: # pylint: disable=protected-access
raise AssertionError(
"Object not assigned a value from checkpoint: %s" % (node,))
if self._checkpoint.slot_restorations:
# Sanity check; this collection should be clear if everything has been
# restored.
raise AssertionError("Unresolved slot restorations: %s" % (
self._checkpoint.slot_restorations,))
if self._checkpoint.unused_attributes:
raise AssertionError(
("Unused attributes in these objects (the attributes exist in the "
"checkpoint but not in the objects): %s") % (
self._checkpoint.unused_attributes.items(),))
for checkpointable_object in list_objects(self._root_checkpointable):
self._checkpoint.all_python_objects.add(checkpointable_object)
unused_python_objects = (
set(self._checkpoint.all_python_objects)
- set(self._checkpoint.object_by_proto_id.values()))
if unused_python_objects:
raise AssertionError(
("Some Python objects were not bound to checkpointed values, likely "
"due to changes in the Python program: %s")
% (unused_python_objects,))
return self
def run_restore_ops(self, session=None):
"""Run operations to restore objects in the dependency graph."""
if context.executing_eagerly():
return # Run eagerly
if session is None:
session = ops.get_default_session()
session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)
def initialize_or_restore(self, session=None):
"""Run operations to initialize or restore objects in the dependency graph.
Any objects in the dependency graph which have initializers but are not in
the checkpoint will have those initializers run, unless those variables are
being restored by a later call to `tf.train.Checkpoint.restore()`.
This method has a sibling in `InitializationOnlyStatus` which instead
initializes variables. That type is returned if no checkpoint is specified
in `Saver.restore`.
Args:
session: The session to run init/restore ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # Initialization and restoration ops are run eagerly
if session is None:
session = ops.get_default_session()
all_objects = list_objects(self._root_checkpointable)
already_initialized_objects = set(
self._checkpoint.object_by_proto_id.values())
initializers_for_non_restored_variables = [
c.initializer for c in all_objects
if hasattr(c, "initializer")
and c not in already_initialized_objects
and (getattr(c, "_update_uid", self._checkpoint.restore_uid - 1)
< self._checkpoint.restore_uid)]
self.run_restore_ops(session=session)
session.run(initializers_for_non_restored_variables)
class InitializationOnlyStatus(_LoadStatus):
"""Returned from `Saver.restore` when no checkpoint has been specified.
Objects of this type have the same `assert_consumed` method as
`CheckpointLoadStatus`, but it always fails. However,
`initialize_or_restore` works on objects of both types, and will
initialize variables in `InitializationOnlyStatus` objects or restore them
otherwise.
"""
def __init__(self, root_checkpointable, restore_uid):
self._restore_uid = restore_uid
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def run_restore_ops(self, session=None):
"""For consistency with `CheckpointLoadStatus`.
Use `initialize_or_restore` for initializing if no checkpoint was passed
to `Saver.restore` and restoring otherwise.
Args:
session: Not used.
"""
raise AssertionError(
"No checkpoint specified, so no restore ops are available "
"(save_path=None to Saver.restore).")
def initialize_or_restore(self, session=None):
"""Runs initialization ops for variables.
Objects which would be saved by `Saver.save` will be initialized, unless
those variables are being restored by a later call to
`tf.train.Checkpoint.restore()`.
This method does nothing when executing eagerly (initializers get run
eagerly).
Args:
session: The session to run initialization ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # run eagerly
if session is None:
session = ops.get_default_session()
checkpointable_objects = list_objects(self._root_checkpointable)
initializers = [
c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None
and (getattr(c, "_update_uid", self._restore_uid - 1)
< self._restore_uid)]
session.run(initializers)
_DEPRECATED_RESTORE_INSTRUCTIONS = (
"Restoring a name-based tf.train.Saver checkpoint using the object-based "
"restore API. This mode uses global names to match variables, and so is "
"somewhat fragile. It also adds new restore ops to the graph each time it "
"is called. Prefer re-encoding training checkpoints in the object-based "
"format: run save() on the object-based saver (the same one this message "
"is coming from) and use that checkpoint in the future.")
class NameBasedSaverStatus(_LoadStatus):
"""Status for loading a name-based training checkpoint."""
def __init__(self, object_saver, save_path):
self._object_saver = object_saver
self._save_path = save_path
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"Restoring a name-based checkpoint. No load status is available.")
@deprecation.deprecated(
date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)
def run_restore_ops(self, session=None):
"""Load the name-based training checkpoint using a new `tf.train.Saver`."""
if session is None and not context.executing_eagerly():
session = ops.get_default_session()
with ops.device("/cpu:0"):
saver_lib.Saver(self._object_saver._global_variable_names()).restore( # pylint: disable=protected-access
sess=session, save_path=self._save_path)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`."""
self.run_restore_ops(session=session)
class _SessionWithFeedDictAdditions(session_lib.SessionInterface):
"""Pretends to be a session, inserts extra feeds on run()."""
def __init__(self, session, feed_additions):
self._wrapped_session = session
self._feed_additions = feed_additions
def run(self, fetches, feed_dict=None, **kwargs):
if feed_dict is None:
feed_dict = {}
else:
feed_dict = feed_dict.copy()
feed_dict.update(self._feed_additions)
return self._wrapped_session.run(
fetches=fetches, feed_dict=feed_dict, **kwargs)
def _copy_saver_with_new_var_list(old_saver, new_var_list):
"""Copy a `tf.train.Saver`'s state to a new Saver with different variables."""
new_saver = saver_lib.Saver(var_list=new_var_list)
# TODO (allenl): Move to copying functionality to Saver? id:3986
# https://github.com/imdone/tensorflow/issues/3984
# pylint: disable=protected-access
new_saver._last_checkpoints = old_saver._last_checkpoints
new_saver._checkpoints_to_be_deleted = old_saver._checkpoints_to_be_deleted
new_saver._next_checkpoint_time = old_saver._next_checkpoint_time
# pylint: enable=protected-access
return new_saver
class CheckpointableSaver(object):
"""Saves and restores a `Checkpointable` object and its dependencies.
See `Checkpointable` for details of dependency management. `Saver` wraps
`tf.train.Saver` for saving, including extra information about the graph of
dependencies between Python objects. When restoring, it uses this information
about the save-time dependency graph to more robustly match objects with their
checkpointed values. When executing eagerly, it supports restoring variables
on object creation (see `Saver.restore`).
Values in a checkpoint are mapped to `Checkpointable` Python objects
(`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the
checkpoint was written. To avoid breaking existing checkpoints when modifying
a class, dependency names (the names of attributes to which `Checkpointable`
objects are assigned) may not change. These names are local to objects, in
contrast to the `Variable.name`-based save/restore from `tf.train.Saver`, and
so allow additional program transformations.
"""
def __init__(self, root_checkpointable):
"""Configure saving.
Args:
root_checkpointable: The root of the object graph to save/restore. This
object and all of its dependencies are saved in the checkpoint. When
restoring, objects are matched and restored starting from this root.
"""
# Allow passing in a weak reference to avoid reference cycles when
# `Checkpointable` objects save themselves.
self._root_checkpointable_ref = root_checkpointable
# The file prefix placeholder is created lazily when graph building (and not
# at all when executing eagerly) to avoid creating ops in the constructor
# (when they may never be necessary).
self._file_prefix_placeholder = None
# Op caching for save
self._object_graph_feed_tensor = None
self._last_save_object_graph = None
self._last_save_saver = None
# Op caching for restore
self._last_restore_object_graph = None
self._last_restore_checkpoint = None
@property
def _root_checkpointable(self):
if isinstance(self._root_checkpointable_ref, weakref.ref):
derefed = self._root_checkpointable_ref()
assert derefed is not None
return derefed
else:
return self._root_checkpointable_ref
def save(self, file_prefix, checkpoint_number=None, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
Checkpointable objects it depends on at the time `Saver.save()` is called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `checkpoint_number`, if provided.
checkpoint_number: An integer variable or Tensor, used to number
checkpoints. Typically this value is saved along with other variables in
training checkpoints, which will happen automatically if it was created
by `root_checkpointable` or one of its dependencies (via
`Checkpointable._add_variable`).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
named_variables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
if not context.executing_eagerly():
if session is None:
session = ops.get_default_session()
if self._object_graph_feed_tensor is None:
with ops.device("/cpu:0"):
self._object_graph_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
object_graph_tensor = self._object_graph_feed_tensor
feed_additions = {object_graph_tensor: graph_proto.SerializeToString()}
else:
session = None
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
feed_additions = None
assert checkpointable_lib.OBJECT_GRAPH_PROTO_KEY not in named_variables
named_variables[checkpointable_lib.OBJECT_GRAPH_PROTO_KEY] = (
_NoRestoreSaveable(
tensor=object_graph_tensor,
name=checkpointable_lib.OBJECT_GRAPH_PROTO_KEY))
if (self._last_save_object_graph != graph_proto
# When executing eagerly, we need to re-create SaveableObjects each time
# save() is called so they pick up new Tensors passed to their
# constructors. That means the Saver needs to be copied with a new
# var_list.
or context.executing_eagerly()):
if self._last_save_object_graph is not None:
self._last_save_saver = _copy_saver_with_new_var_list(
old_saver=self._last_save_saver, new_var_list=named_variables)
else:
self._last_save_saver = saver_lib.Saver(var_list=named_variables)
self._last_save_object_graph = graph_proto
with ops.device("/cpu:0"):
save_path = self._last_save_saver.save(
sess=_SessionWithFeedDictAdditions(
session=session, feed_additions=feed_additions),
save_path=file_prefix,
write_meta_graph=False,
global_step=checkpoint_number)
return save_path
def _global_variable_names(self):
"""Generate a `tf.train.Saver`-style `var_list` using `variable.name`s."""
named_saveables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
saver_names = {}
for object_proto in graph_proto.nodes:
for attribute_proto in object_proto.attributes:
saver_names[attribute_proto.full_name] = named_saveables[
attribute_proto.checkpoint_key]
return saver_names
def restore(self, save_path):
"""Restore a training checkpoint.
Restores `root_checkpointable` and any objects that it tracks
(transitive). Either assigns values immediately if variables to restore have
been created already, or defers restoration until the variables are
created. Dependencies added to the `root_checkpointable` passed to the
constructor after this call will be matched if they have a corresponding
object in the checkpoint.
When building a graph, restorations are added to the graph but not run.
To disallow deferred loading, assert immediately that all checkpointed
variables have been matched to variable objects:
```python
saver = Saver(root)
saver.restore(path).assert_consumed()
```
An exception will be raised unless every object was matched and its
variables already exist.
When graph building, `assert_consumed()` indicates that all of the restore
ops which will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` function of the status object:
```python
saver.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.train.Saver` checkpoints can be loaded using this
method. There is no deferred loading, and names are used to match
variables. No restore ops are created/run until `run_restore_ops()` or
`initialize_or_restore()` are called on the returned status object, even
when executing eagerly. Re-encode name-based checkpoints using this
object-based `Saver.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency
graph. If the checkpoint was written by the name-based `tf.train.Saver`,
names are used to match variables.
Returns:
A load status object, which can be used to make assertions about the
status of checkpoint restoration and run initialization/restore ops
(of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if
`save_path` is `None`).
If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`
object is returned which runs restore ops from a name-based saver.
"""
if save_path is None:
return InitializationOnlyStatus(self._root_checkpointable, ops.uid())
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if self._file_prefix_placeholder is None:
with ops.device("/cpu:0"):
self._file_prefix_placeholder = constant_op.constant("model")
file_prefix_tensor = self._file_prefix_placeholder
file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}
else:
with ops.device("/cpu:0"):
file_prefix_tensor = constant_op.constant(save_path)
file_prefix_feed_dict = None
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
try:
object_graph_string = reader.get_tensor(
checkpointable_lib.OBJECT_GRAPH_PROTO_KEY)
except errors_impl.NotFoundError:
# The object graph proto does not exist in this checkpoint. Try again with
# name-based saving.
return NameBasedSaverStatus(self, save_path)
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
if in_graph_mode and object_graph_proto == self._last_restore_object_graph:
checkpoint = self._last_restore_checkpoint
else:
if in_graph_mode:
dtype_map = None
else:
dtype_map = reader.get_variable_to_dtype_map()
checkpoint = _CheckpointRestoreCoordinator(
object_graph_proto=object_graph_proto,
save_path=file_prefix_tensor,
dtype_map=dtype_map)
if in_graph_mode:
if self._last_restore_object_graph is not None:
raise NotImplementedError(
"Using a single Saver to restore different object graphs is not "
"currently supported when graph building. Use a different Saver "
"for each object graph (restore ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
self._last_restore_checkpoint = checkpoint
self._last_restore_object_graph = object_graph_proto
checkpointable_lib._CheckpointPosition( # pylint: disable=protected-access
checkpoint=checkpoint, proto_id=0).restore(self._root_checkpointable)
load_status = CheckpointLoadStatus(
checkpoint,
root_checkpointable=self._root_checkpointable,
feed_dict=file_prefix_feed_dict)
return load_status
@tf_export("train.Checkpoint")
class Checkpoint(checkpointable_lib.Checkpointable):
"""Groups checkpointable objects, saving and restoring them.
`Checkpoint`'s constructor accepts keyword arguments whose values are types
that contain checkpointable state, such as `tf.train.Optimizer`
implementations, `tf.Variable`, `tf.keras.Layer` implementations, or
`tf.keras.Model` implementations. It saves these values with a checkpoint, and
maintains a `save_counter` for numbering checkpoints.
Example usage when graph building:
```python
import tensorflow as tf
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
train_op = optimizer.minimize( ... )
status.assert_consumed() # Optional sanity checks.
with tf.Session() as session:
# Use the Session to restore variables, or initialize them if
# tf.train.latest_checkpoint returned None.
status.initialize_or_restore(session)
for _ in range(num_training_steps):
session.run(train_op)
checkpoint.save(file_prefix=checkpoint_prefix)
```
Example usage with eager execution enabled:
```python
import tensorflow as tf
import os
tf.enable_eager_execution()
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... ) # Variables will be restored on creation.
status.assert_consumed() # Optional sanity checks.
checkpoint.save(file_prefix=checkpoint_prefix)
```
`Checkpoint.save` and `Checkpoint.restore` write and read object-based
checkpoints, in contrast to `tf.train.Saver` which writes and reads
`variable.name` based checkpoints. Object-based checkpointing saves a graph of
dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s,
etc.) with named edges, and this graph is used to match variables when
restoring a checkpoint. It can be more robust to changes in the Python
program, and helps to support restore-on-create for variables when executing
eagerly. Prefer `tf.train.Checkpoint` over `tf.train.Saver` for new code.
`Checkpoint` objects have dependencies on the objects passed as keyword
arguments to their constructors, and each dependency is given a name that is
identical to the name of the keyword argument for which it was created.
TensorFlow classes like `Layer`s and `Optimizer`s will automatically add
dependencies on their variables (e.g. "kernel" and "bias" for
`tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing
dependencies easy in user-defined classes, since `Model` hooks into attribute
assignment. For example:
```python
class Regress(tf.keras.Model):
def __init__(self):
super(Regress, self).__init__()
self.input_transform = tf.keras.layers.Dense(10)
# ...
def call(self, inputs):
x = self.input_transform(inputs)
# ...
```
This `Model` has a dependency named "input_transform" on its `Dense` layer,
which in turn depends on its variables. As a result, saving an instance of
`Regress` using `tf.train.Checkpoint` will also save all the variables created
by the `Dense` layer.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, **kwargs):
"""Group objects into a training checkpoint.
Args:
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. Values must be checkpointable objects.
Raises:
ValueError: If objects in `kwargs` are not checkpointable.
"""
super(Checkpoint, self).__init__()
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
if not isinstance(v, checkpointable_lib.CheckpointableBase):
raise ValueError(
("`Checkpoint` was expecting a checkpointable object (an object "
"derived from `CheckpointableBase`), got %s. If you believe this "
"object should be checkpointable (i.e. it is part of the "
"TensorFlow Python API and manages state), please open an issue.")
% (v,))
setattr(self, k, v)
self._save_counter = None # Created lazily for restore-on-create.
self._saver = CheckpointableSaver(weakref.ref(self))
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
self._save_counter = add_variable(
self, name="save_counter", initializer=0, dtype=dtypes.int64)
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
checkpointable objects it depends on at the time `Checkpoint.save()` is
called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `Checkpoint.save_counter`.
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1)
if in_graph_mode:
session.run(assign_op)
return self._saver.save(
file_prefix=file_prefix,
checkpoint_number=self.save_counter,
session=session)
def restore(self, save_path):
"""Restore a training checkpoint.
Restores this `Checkpoint` and any objects it depends on.
When executing eagerly, either assigns values immediately if variables to
restore have been created already, or defers restoration until the variables
are created. Dependencies added after this call will be matched if they have
a corresponding object in the checkpoint (the restore request will queue in
any checkpointable object waiting for the expected dependency to be added).
When graph building, restoration ops are added to the graph but not run
immediately.
To ensure that loading is complete and no more assignments will take place,
use the `assert_consumed()` method of the status object returned by
`restore`:
```python
checkpoint = tf.train.Checkpoint( ... )
checkpoint.restore(path).assert_consumed()
```
An exception will be raised if any Python objects in the dependency graph
were not found in the checkpoint, or if any checkpointed values do not have
a matching Python object.
When graph building, `assert_consumed()` indicates that all of the restore
ops that will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` method of the status object:
```python
checkpoint.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.train.Saver` checkpoints can be loaded using this
method. There is no deferred loading, and names are used to match
variables. No restore ops are created/run until `run_restore_ops()` or
`initialize_or_restore()` are called on the returned status object, even
when executing eagerly. Re-encode name-based checkpoints using
`tf.train.Checkpoint.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency
graph. If the checkpoint was written by the name-based `tf.train.Saver`,
names are used to match variables.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration and run initialization/restore ops.
The returned status object has the following methods:
- `assert_consumed()`:
Raises an exception if any variables/objects are unmatched: either
checkpointed values which don't have a matching Python object or
Python objects in the dependency graph with no values in the
checkpoint. This method returns the status object, and so may be
chained with `initialize_or_restore` or `run_restore_ops`.
- `initialize_or_restore(session=None)`:
When graph building, runs variable initializers if `save_path` is
`None`, but otherwise runs restore operations. If no `session` is
explicitly specified, the default session is used. No effect for
object-based checkpoints when executing eagerly (variables are
initialized or restored eagerly).
- `run_restore_ops(session=None)`:
When graph building, runs restore operations. If no `session` is
explicitly specified, the default session is used. No effect for
object-based checkpoints when executing eagerly (restore operations
are run eagerly). May only be called when `save_path` is not `None`.
"""
status = self._saver.restore(save_path=save_path)
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to double
# initialization when executing eagerly.
self._maybe_create_save_counter()
return status
| 43.294939
| 116
| 0.730893
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import weakref
from tensorflow.core.protobuf import checkpointable_object_graph_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpointable as checkpointable_lib
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
_ESCAPE_CHAR = "."
_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + "OPTIMIZER_SLOT"
_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + "ATTRIBUTES"
class _CheckpointRestoreCoordinator(object):
def __init__(self, object_graph_proto, save_path, dtype_map=None):
self.builder = saver_lib.BulkSaverBuilder()
self.object_graph_proto = object_graph_proto
self.restore_uid = ops.uid()
self.unused_attributes = weakref.WeakKeyDictionary()
# (as objects with deferred dependencies will generally have references to
# this object).
self.object_by_proto_id = weakref.WeakValueDictionary()
# A set of all Python objects we've seen as dependencies, even if we didn't
# use them (for example because of inconsistent references when
# loading). Used to make status assertions fail when loading checkpoints
# that don't quite match.
self.all_python_objects = weakref.WeakSet()
self.save_path = save_path
self.dtype_map = dtype_map
self.restore_ops = []
self.restore_ops_by_name = {}
self.deferred_slot_restorations = {}
# happens.
self.slot_restorations = {}
for node_index, node in enumerate(self.object_graph_proto.nodes):
for slot_reference in node.slot_variables:
# `node` refers to an `Optimizer`, since only these have slot variables.
self.slot_restorations.setdefault(
slot_reference.original_variable_node_id, []).append(
checkpointable_lib._SlotVariableRestoration( # pylint: disable=protected-access
optimizer_id=node_index,
slot_variable_id=slot_reference.slot_variable_node_id,
slot_name=slot_reference.slot_name))
# TODO (allenl): If this ends up in a public API, consider adding LINT.IfChange id:3465
# https://github.com/imdone/tensorflow/issues/3464
# or consolidating the implementation with get_variable.
def _default_getter(name, shape, dtype, initializer=None,
partition_info=None, **kwargs):
dtype = dtypes.as_dtype(dtype)
shape_object = tensor_shape.as_shape(shape)
with ops.init_scope():
if initializer is None:
initializer, initializing_from_value = (
variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access
name=name, shape=shape_object, dtype=dtype))
else:
initializing_from_value = not callable(initializer)
# Same logic as get_variable
variable_dtype = dtype.base_dtype
if initializing_from_value:
if shape is not None:
raise ValueError("If initializer is a constant, do not specify shape.")
initial_value = initializer
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
def initial_value():
return initializer(
shape_object.as_list(), dtype=dtype, partition_info=partition_info)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
name=name,
dtype=variable_dtype,
**kwargs
)
def add_variable(checkpointable, name, shape=None, dtype=dtypes.float32,
initializer=None):
return checkpointable._add_variable_with_custom_getter( # pylint: disable=protected-access
name=name, shape=shape, dtype=dtype,
initializer=initializer, getter=_default_getter)
def _breadth_first_checkpointable_traversal(root_checkpointable):
bfs_sorted = []
to_visit = collections.deque([root_checkpointable])
path_to_root = {root_checkpointable: ()}
while to_visit:
current_checkpointable = to_visit.popleft()
current_checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access
bfs_sorted.append(current_checkpointable)
for child_checkpointable in (
current_checkpointable._checkpoint_dependencies): # pylint: disable=protected-access
if child_checkpointable.ref not in path_to_root:
path_to_root[child_checkpointable.ref] = (
path_to_root[current_checkpointable] + (child_checkpointable,))
to_visit.append(child_checkpointable.ref)
return bfs_sorted, path_to_root
def _escape_local_name(name):
# We need to support slashes in local names for compatibility, since this
# naming scheme is being patched in to things like Layer.add_variable where
# slashes were previously accepted. We also want to use slashes to indicate
# edges traversed to reach the variable, so we escape forward slashes in
# names.
return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)
.replace(r"/", _ESCAPE_CHAR + "S"))
def _object_prefix_from_path(path_to_root):
return "/".join(
(_escape_local_name(checkpointable.name)
for checkpointable in path_to_root))
def _slot_variable_naming_for_optimizer(optimizer_path):
# Name slot variables:
#
# <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>
#
# where <variable name> is exactly the checkpoint name used for the original
# variable, including the path from the checkpoint root and the local name in
# the object which owns it. Note that we only save slot variables if the
# variable it's slotting for is also being saved.
optimizer_identifier = "/%s/%s/" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)
def _name_slot_variable(variable_path, slot_name):
return (variable_path
+ optimizer_identifier
+ _escape_local_name(slot_name))
return _name_slot_variable
def _serialize_slot_variables(checkpointable_objects, node_ids, object_names):
non_slot_objects = list(checkpointable_objects)
slot_variables = {}
for checkpointable in non_slot_objects:
if isinstance(checkpointable, optimizer_lib.Optimizer):
naming_scheme = _slot_variable_naming_for_optimizer(
optimizer_path=object_names[checkpointable])
slot_names = checkpointable.get_slot_names()
for slot_name in slot_names:
for original_variable_node_id, original_variable in enumerate(
non_slot_objects):
try:
slot_variable = checkpointable.get_slot(
original_variable, slot_name)
except AttributeError:
slot_variable = None
if slot_variable is None:
continue
slot_variable._maybe_initialize_checkpointable()
if slot_variable._checkpoint_dependencies:
raise NotImplementedError(
"Currently only variables with no dependencies can be saved as "
"slot variables. File a feature request if this limitation "
"bothers you.")
if slot_variable in node_ids:
raise NotImplementedError(
"A slot variable was re-used as a dependency of a "
"Checkpointable object. This is not currently allowed. File a "
"feature request if this limitation bothers you.")
checkpoint_name = naming_scheme(
variable_path=object_names[original_variable],
slot_name=slot_name)
object_names[slot_variable] = checkpoint_name
slot_variable_node_id = len(checkpointable_objects)
node_ids[slot_variable] = slot_variable_node_id
checkpointable_objects.append(slot_variable)
slot_variable_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph
.CheckpointableObject.SlotVariableReference(
slot_name=slot_name,
original_variable_node_id=original_variable_node_id,
slot_variable_node_id=slot_variable_node_id))
slot_variables.setdefault(checkpointable, []).append(
slot_variable_proto)
return slot_variables
def _serialize_checkpointables(
checkpointable_objects, node_ids, object_names, slot_variables):
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
named_saveables = {}
for checkpoint_id, checkpointable in enumerate(checkpointable_objects):
assert node_ids[checkpointable] == checkpoint_id
object_proto = object_graph_proto.nodes.add()
object_proto.slot_variables.extend(slot_variables.get(checkpointable, ()))
object_name = object_names[checkpointable]
for name, saveable_factory in (
checkpointable._gather_saveables_for_checkpoint().items()):
attribute = object_proto.attributes.add()
attribute.name = name
attribute.checkpoint_key = "%s/%s/%s" % (
object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))
if callable(saveable_factory):
saveable = saveable_factory(name=attribute.checkpoint_key)
else:
saveable = saveable_factory
saver_dict = saver_lib.BaseSaverBuilder.OpListToDict(
[saveable], convert_variable_to_tensor=False)
attribute.full_name, = saver_dict.keys()
named_saveables[attribute.checkpoint_key] = saveable
for child in checkpointable._checkpoint_dependencies: # pylint: disable=protected-access
child_proto = object_proto.children.add()
child_proto.node_id = node_ids[child.ref]
child_proto.local_name = child.name
return named_saveables, object_graph_proto
def _serialize_object_graph(root_checkpointable):
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
slot_variables = _serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return _serialize_checkpointables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names,
slot_variables=slot_variables)
def list_objects(root_checkpointable):
# TODO (allenl): Extract out gathering logic so the naming logic doesn't have id:4322
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
_serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return checkpointable_objects
def gather_initializers(root_checkpointable):
checkpointable_objects = list_objects(root_checkpointable)
return [c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None]
class _NoRestoreSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, tensor, name):
spec = saver_lib.BaseSaverBuilder.SaveSpec(tensor, "", name)
super(_NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
class _LoadStatus(object):
@abc.abstractmethod
def assert_consumed(self):
pass
@abc.abstractmethod
def run_restore_ops(self, session=None):
pass
@abc.abstractmethod
def initialize_or_restore(self, session=None):
pass
class CheckpointLoadStatus(_LoadStatus):
def __init__(self, checkpoint, feed_dict, root_checkpointable):
self._checkpoint = checkpoint
self._feed_dict = feed_dict
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)
if checkpointable is None:
raise AssertionError("Unresolved object in checkpoint: %s" % (node,))
if checkpointable._update_uid < self._checkpoint.restore_uid:
raise AssertionError(
"Object not assigned a value from checkpoint: %s" % (node,))
if self._checkpoint.slot_restorations:
raise AssertionError("Unresolved slot restorations: %s" % (
self._checkpoint.slot_restorations,))
if self._checkpoint.unused_attributes:
raise AssertionError(
("Unused attributes in these objects (the attributes exist in the "
"checkpoint but not in the objects): %s") % (
self._checkpoint.unused_attributes.items(),))
for checkpointable_object in list_objects(self._root_checkpointable):
self._checkpoint.all_python_objects.add(checkpointable_object)
unused_python_objects = (
set(self._checkpoint.all_python_objects)
- set(self._checkpoint.object_by_proto_id.values()))
if unused_python_objects:
raise AssertionError(
("Some Python objects were not bound to checkpointed values, likely "
"due to changes in the Python program: %s")
% (unused_python_objects,))
return self
def run_restore_ops(self, session=None):
if context.executing_eagerly():
return
if session is None:
session = ops.get_default_session()
session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)
def initialize_or_restore(self, session=None):
if context.executing_eagerly():
return
if session is None:
session = ops.get_default_session()
all_objects = list_objects(self._root_checkpointable)
already_initialized_objects = set(
self._checkpoint.object_by_proto_id.values())
initializers_for_non_restored_variables = [
c.initializer for c in all_objects
if hasattr(c, "initializer")
and c not in already_initialized_objects
and (getattr(c, "_update_uid", self._checkpoint.restore_uid - 1)
< self._checkpoint.restore_uid)]
self.run_restore_ops(session=session)
session.run(initializers_for_non_restored_variables)
class InitializationOnlyStatus(_LoadStatus):
def __init__(self, root_checkpointable, restore_uid):
self._restore_uid = restore_uid
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def run_restore_ops(self, session=None):
raise AssertionError(
"No checkpoint specified, so no restore ops are available "
"(save_path=None to Saver.restore).")
def initialize_or_restore(self, session=None):
if context.executing_eagerly():
return
if session is None:
session = ops.get_default_session()
checkpointable_objects = list_objects(self._root_checkpointable)
initializers = [
c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None
and (getattr(c, "_update_uid", self._restore_uid - 1)
< self._restore_uid)]
session.run(initializers)
_DEPRECATED_RESTORE_INSTRUCTIONS = (
"Restoring a name-based tf.train.Saver checkpoint using the object-based "
"restore API. This mode uses global names to match variables, and so is "
"somewhat fragile. It also adds new restore ops to the graph each time it "
"is called. Prefer re-encoding training checkpoints in the object-based "
"format: run save() on the object-based saver (the same one this message "
"is coming from) and use that checkpoint in the future.")
class NameBasedSaverStatus(_LoadStatus):
def __init__(self, object_saver, save_path):
self._object_saver = object_saver
self._save_path = save_path
def assert_consumed(self):
raise AssertionError(
"Restoring a name-based checkpoint. No load status is available.")
@deprecation.deprecated(
date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)
def run_restore_ops(self, session=None):
if session is None and not context.executing_eagerly():
session = ops.get_default_session()
with ops.device("/cpu:0"):
saver_lib.Saver(self._object_saver._global_variable_names()).restore(
sess=session, save_path=self._save_path)
def initialize_or_restore(self, session=None):
self.run_restore_ops(session=session)
class _SessionWithFeedDictAdditions(session_lib.SessionInterface):
def __init__(self, session, feed_additions):
self._wrapped_session = session
self._feed_additions = feed_additions
def run(self, fetches, feed_dict=None, **kwargs):
if feed_dict is None:
feed_dict = {}
else:
feed_dict = feed_dict.copy()
feed_dict.update(self._feed_additions)
return self._wrapped_session.run(
fetches=fetches, feed_dict=feed_dict, **kwargs)
def _copy_saver_with_new_var_list(old_saver, new_var_list):
new_saver = saver_lib.Saver(var_list=new_var_list)
new_saver._last_checkpoints = old_saver._last_checkpoints
new_saver._checkpoints_to_be_deleted = old_saver._checkpoints_to_be_deleted
new_saver._next_checkpoint_time = old_saver._next_checkpoint_time
return new_saver
class CheckpointableSaver(object):
def __init__(self, root_checkpointable):
self._root_checkpointable_ref = root_checkpointable
self._file_prefix_placeholder = None
self._object_graph_feed_tensor = None
self._last_save_object_graph = None
self._last_save_saver = None
self._last_restore_object_graph = None
self._last_restore_checkpoint = None
@property
def _root_checkpointable(self):
if isinstance(self._root_checkpointable_ref, weakref.ref):
derefed = self._root_checkpointable_ref()
assert derefed is not None
return derefed
else:
return self._root_checkpointable_ref
def save(self, file_prefix, checkpoint_number=None, session=None):
named_variables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
if not context.executing_eagerly():
if session is None:
session = ops.get_default_session()
if self._object_graph_feed_tensor is None:
with ops.device("/cpu:0"):
self._object_graph_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
object_graph_tensor = self._object_graph_feed_tensor
feed_additions = {object_graph_tensor: graph_proto.SerializeToString()}
else:
session = None
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
feed_additions = None
assert checkpointable_lib.OBJECT_GRAPH_PROTO_KEY not in named_variables
named_variables[checkpointable_lib.OBJECT_GRAPH_PROTO_KEY] = (
_NoRestoreSaveable(
tensor=object_graph_tensor,
name=checkpointable_lib.OBJECT_GRAPH_PROTO_KEY))
if (self._last_save_object_graph != graph_proto
or context.executing_eagerly()):
if self._last_save_object_graph is not None:
self._last_save_saver = _copy_saver_with_new_var_list(
old_saver=self._last_save_saver, new_var_list=named_variables)
else:
self._last_save_saver = saver_lib.Saver(var_list=named_variables)
self._last_save_object_graph = graph_proto
with ops.device("/cpu:0"):
save_path = self._last_save_saver.save(
sess=_SessionWithFeedDictAdditions(
session=session, feed_additions=feed_additions),
save_path=file_prefix,
write_meta_graph=False,
global_step=checkpoint_number)
return save_path
def _global_variable_names(self):
named_saveables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
saver_names = {}
for object_proto in graph_proto.nodes:
for attribute_proto in object_proto.attributes:
saver_names[attribute_proto.full_name] = named_saveables[
attribute_proto.checkpoint_key]
return saver_names
def restore(self, save_path):
if save_path is None:
return InitializationOnlyStatus(self._root_checkpointable, ops.uid())
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if self._file_prefix_placeholder is None:
with ops.device("/cpu:0"):
self._file_prefix_placeholder = constant_op.constant("model")
file_prefix_tensor = self._file_prefix_placeholder
file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}
else:
with ops.device("/cpu:0"):
file_prefix_tensor = constant_op.constant(save_path)
file_prefix_feed_dict = None
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
try:
object_graph_string = reader.get_tensor(
checkpointable_lib.OBJECT_GRAPH_PROTO_KEY)
except errors_impl.NotFoundError:
return NameBasedSaverStatus(self, save_path)
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
if in_graph_mode and object_graph_proto == self._last_restore_object_graph:
checkpoint = self._last_restore_checkpoint
else:
if in_graph_mode:
dtype_map = None
else:
dtype_map = reader.get_variable_to_dtype_map()
checkpoint = _CheckpointRestoreCoordinator(
object_graph_proto=object_graph_proto,
save_path=file_prefix_tensor,
dtype_map=dtype_map)
if in_graph_mode:
if self._last_restore_object_graph is not None:
raise NotImplementedError(
"Using a single Saver to restore different object graphs is not "
"currently supported when graph building. Use a different Saver "
"for each object graph (restore ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
self._last_restore_checkpoint = checkpoint
self._last_restore_object_graph = object_graph_proto
checkpointable_lib._CheckpointPosition(
checkpoint=checkpoint, proto_id=0).restore(self._root_checkpointable)
load_status = CheckpointLoadStatus(
checkpoint,
root_checkpointable=self._root_checkpointable,
feed_dict=file_prefix_feed_dict)
return load_status
@tf_export("train.Checkpoint")
class Checkpoint(checkpointable_lib.Checkpointable):
def __init__(self, **kwargs):
super(Checkpoint, self).__init__()
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
if not isinstance(v, checkpointable_lib.CheckpointableBase):
raise ValueError(
("`Checkpoint` was expecting a checkpointable object (an object "
"derived from `CheckpointableBase`), got %s. If you believe this "
"object should be checkpointable (i.e. it is part of the "
"TensorFlow Python API and manages state), please open an issue.")
% (v,))
setattr(self, k, v)
self._save_counter = None
self._saver = CheckpointableSaver(weakref.ref(self))
def _maybe_create_save_counter(self):
if self._save_counter is None:
with ops.device("/cpu:0"):
self._save_counter = add_variable(
self, name="save_counter", initializer=0, dtype=dtypes.int64)
@property
def save_counter(self):
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix, session=None):
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._save_counter is None:
session.run(self.save_counter.initializer)
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1)
if in_graph_mode:
session.run(assign_op)
return self._saver.save(
file_prefix=file_prefix,
checkpoint_number=self.save_counter,
session=session)
def restore(self, save_path):
status = self._saver.restore(save_path=save_path)
self._maybe_create_save_counter()
return status
| true
| true
|
f7149e143f5585d098d18b700bb17a159cb42dc1
| 14,337
|
py
|
Python
|
airflow/api/common/experimental/mark_tasks.py
|
crunchbase/incubator-airflow
|
903e37a09f05f4ab022bb7153be8dc62b3d9da99
|
[
"Apache-2.0"
] | null | null | null |
airflow/api/common/experimental/mark_tasks.py
|
crunchbase/incubator-airflow
|
903e37a09f05f4ab022bb7153be8dc62b3d9da99
|
[
"Apache-2.0"
] | 22
|
2019-12-09T23:22:07.000Z
|
2021-05-12T23:15:40.000Z
|
airflow/api/common/experimental/mark_tasks.py
|
crunchbase/incubator-airflow
|
903e37a09f05f4ab022bb7153be8dc62b3d9da99
|
[
"Apache-2.0"
] | 5
|
2019-11-18T13:19:29.000Z
|
2020-03-25T13:20:29.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Marks tasks APIs."""
import datetime
from typing import Iterable
from sqlalchemy import or_
from airflow.jobs import BackfillJob
from airflow.models import BaseOperator, DagRun, TaskInstance
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils import timezone
from airflow.utils.db import provide_session
from airflow.utils.state import State
def _create_dagruns(dag, execution_dates, state, run_id_template):
"""
Infers from the dates which dag runs need to be created and does so.
:param dag: the dag to create dag runs for
:param execution_dates: list of execution dates to evaluate
:param state: the state to set the dag run to
:param run_id_template:the template for run id to be with the execution date
:return: newly created and existing dag runs for the execution dates supplied
"""
# find out if we need to create any dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, execution_date=execution_dates)
dates_to_create = list(set(execution_dates) - {dag_run.execution_date for dag_run in dag_runs})
for date in dates_to_create:
dag_run = dag.create_dagrun(
run_id=run_id_template.format(date.isoformat()),
execution_date=date,
start_date=timezone.utcnow(),
external_trigger=False,
state=state,
)
dag_runs.append(dag_run)
return dag_runs
@provide_session
def set_state(
tasks, # type: Iterable[BaseOperator]
execution_date, # type: datetime.datetime
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=False,
session=None): # pylint: disable=too-many-arguments,too-many-locals
"""
Set the state of a task instance and if needed its relatives. Can set state
for future tasks (calculated from execution_date) and retroactively
for past tasks. Will verify integrity of past dag runs in order to create
tasks that did not exist. It will not create dag runs that are missing
on the schedule (but it will as for subdag dag runs if needed).
:param task: the task from which to work. task.task.dag needs to be set
:param execution_date: the execution date from which to start looking
:param upstream: Mark all parents (upstream tasks)
:param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags
:param future: Mark all future tasks on the interval of the dag up until
last execution date.
:param past: Retroactively mark all tasks starting from start_date of the DAG
:param state: State to which the tasks need to be set
:param commit: Commit tasks to be altered to the database
:param session: database session
:return: list of tasks that have been created and updated
"""
if not tasks:
return []
if not timezone.is_localized(execution_date):
raise ValueError("Received non-localized date {}".format(execution_date))
task_dags = {task.dag for task in tasks}
if len(task_dags) > 1:
raise ValueError("Received tasks from multiple DAGs: {}".format(task_dags))
dag = next(iter(task_dags))
if dag is None:
raise ValueError("Received tasks with no DAG")
dates = get_execution_dates(dag, execution_date, future, past)
task_ids = list(find_task_relatives(tasks, downstream, upstream))
confirmed_dates = verify_dag_run_integrity(dag, dates)
sub_dag_run_ids = get_subdag_runs(dag, session, state, task_ids, commit, confirmed_dates)
# now look for the task instances that are affected
qry_dag = get_all_dag_task_query(dag, session, state, task_ids, confirmed_dates)
if commit:
tis_altered = qry_dag.with_for_update().all()
if sub_dag_run_ids:
qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates)
tis_altered += qry_sub_dag.with_for_update().all()
for task_instance in tis_altered:
task_instance.state = state
else:
tis_altered = qry_dag.all()
if sub_dag_run_ids:
qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates)
tis_altered += qry_sub_dag.all()
return tis_altered
# Flake and pylint disagree about correct indents here
def all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates): # noqa: E123
"""Get *all* tasks of the sub dags"""
qry_sub_dag = session.query(TaskInstance).\
filter(
TaskInstance.dag_id.in_(sub_dag_run_ids),
TaskInstance.execution_date.in_(confirmed_dates) # noqa: E123
).\
filter(
or_(
TaskInstance.state.is_(None),
TaskInstance.state != state
)
) # noqa: E123
return qry_sub_dag
def get_all_dag_task_query(dag, session, state, task_ids, confirmed_dates): # noqa: E123
"""Get all tasks of the main dag that will be affected by a state change"""
qry_dag = session.query(TaskInstance).\
filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.execution_date.in_(confirmed_dates),
TaskInstance.task_id.in_(task_ids) # noqa: E123
).\
filter(
or_(
TaskInstance.state.is_(None),
TaskInstance.state != state
)
)
return qry_dag
def get_subdag_runs(dag, session, state, task_ids, commit, confirmed_dates):
"""Go through subdag operators and create dag runs. We will only work
within the scope of the subdag. We wont propagate to the parent dag,
but we will propagate from parent to subdag.
"""
dags = [dag]
sub_dag_ids = []
while dags:
current_dag = dags.pop()
for task_id in task_ids:
if not current_dag.has_task(task_id):
continue
current_task = current_dag.get_task(task_id)
if isinstance(current_task, SubDagOperator):
# this works as a kind of integrity check
# it creates missing dag runs for subdag operators,
# maybe this should be moved to dagrun.verify_integrity
dag_runs = _create_dagruns(current_task.subdag,
execution_dates=confirmed_dates,
state=State.RUNNING,
run_id_template=BackfillJob.ID_FORMAT_PREFIX)
verify_dagruns(dag_runs, commit, state, session, current_task)
dags.append(current_task.subdag)
sub_dag_ids.append(current_task.subdag.dag_id)
return sub_dag_ids
def verify_dagruns(dag_runs, commit, state, session, current_task):
"""Verifies integrity of dag_runs.
:param dag_runs: dag runs to verify
:param commit: whether dag runs state should be updated
:param state: state of the dag_run to set if commit is True
:param session: session to use
:param current_task: current task
:return:
"""
for dag_run in dag_runs:
dag_run.dag = current_task.subdag
dag_run.verify_integrity()
if commit:
dag_run.state = state
session.merge(dag_run)
def verify_dag_run_integrity(dag, dates):
"""Verify the integrity of the dag runs in case a task was added or removed
set the confirmed execution dates as they might be different
from what was provided
"""
confirmed_dates = []
dag_runs = DagRun.find(dag_id=dag.dag_id, execution_date=dates)
for dag_run in dag_runs:
dag_run.dag = dag
dag_run.verify_integrity()
confirmed_dates.append(dag_run.execution_date)
return confirmed_dates
def find_task_relatives(tasks, downstream, upstream):
"""Yield task ids and optionally ancestor and descendant ids."""
for task in tasks:
yield task.task_id
if downstream:
for relative in task.get_flat_relatives(upstream=False):
yield relative.task_id
if upstream:
for relative in task.get_flat_relatives(upstream=True):
yield relative.task_id
def get_execution_dates(dag, execution_date, future, past):
"""Returns dates of DAG execution"""
latest_execution_date = dag.latest_execution_date
if latest_execution_date is None:
raise ValueError("Received non-localized date {}".format(execution_date))
# determine date range of dag runs and tasks to consider
end_date = latest_execution_date if future else execution_date
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if dag.schedule_interval == '@once':
dates = [start_date]
elif not dag.schedule_interval:
# If schedule_interval is None, need to look at existing DagRun if the user wants future or
# past runs.
dag_runs = dag.get_dagruns_between(start_date=start_date, end_date=end_date)
dates = sorted({d.execution_date for d in dag_runs})
else:
dates = dag.date_range(start_date=start_date, end_date=end_date)
return dates
@provide_session
def _set_dag_run_state(dag_id, execution_date, state, session=None):
"""
Helper method that set dag run state in the DB.
:param dag_id: dag_id of target dag run
:param execution_date: the execution date from which to start looking
:param state: target state
:param session: database session
"""
dag_run = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == execution_date
).one()
dag_run.state = state
if state == State.RUNNING:
dag_run.start_date = timezone.utcnow()
dag_run.end_date = None
else:
dag_run.end_date = timezone.utcnow()
session.merge(dag_run)
@provide_session
def set_dag_run_state_to_success(dag, execution_date, commit=False, session=None):
"""
Set the dag run for a specific execution date and its task instances
to success.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
:raises: ValueError if dag or execution_date is invalid
"""
if not dag or not execution_date:
return []
# Mark the dag run to success.
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.SUCCESS, session)
# Mark all task instances of the dag run to success.
for task in dag.tasks:
task.dag = dag
return set_state(tasks=dag.tasks, execution_date=execution_date,
state=State.SUCCESS, commit=commit, session=session)
@provide_session
def set_dag_run_state_to_failed(dag, execution_date, commit=False, session=None):
"""
Set the dag run for a specific execution date and its running task instances
to failed.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
:raises: AssertionError if dag or execution_date is invalid
"""
if not dag or not execution_date:
return []
# Mark the dag run to failed.
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.FAILED, session)
# Mark only RUNNING task instances.
task_ids = [task.task_id for task in dag.tasks]
tis = session.query(TaskInstance).filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.execution_date == execution_date,
TaskInstance.task_id.in_(task_ids)).filter(TaskInstance.state == State.RUNNING)
task_ids_of_running_tis = [task_instance.task_id for task_instance in tis]
tasks = []
for task in dag.tasks:
if task.task_id not in task_ids_of_running_tis:
continue
task.dag = dag
tasks.append(task)
return set_state(tasks=tasks, execution_date=execution_date,
state=State.FAILED, commit=commit, session=session)
@provide_session
def set_dag_run_state_to_running(dag, execution_date, commit=False, session=None):
"""
Set the dag run for a specific execution date to running.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
"""
res = []
if not dag or not execution_date:
return res
# Mark the dag run to running.
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.RUNNING, session)
# To keep the return type consistent with the other similar functions.
return res
| 38.334225
| 99
| 0.68515
|
import datetime
from typing import Iterable
from sqlalchemy import or_
from airflow.jobs import BackfillJob
from airflow.models import BaseOperator, DagRun, TaskInstance
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils import timezone
from airflow.utils.db import provide_session
from airflow.utils.state import State
def _create_dagruns(dag, execution_dates, state, run_id_template):
dag_runs = DagRun.find(dag_id=dag.dag_id, execution_date=execution_dates)
dates_to_create = list(set(execution_dates) - {dag_run.execution_date for dag_run in dag_runs})
for date in dates_to_create:
dag_run = dag.create_dagrun(
run_id=run_id_template.format(date.isoformat()),
execution_date=date,
start_date=timezone.utcnow(),
external_trigger=False,
state=state,
)
dag_runs.append(dag_run)
return dag_runs
@provide_session
def set_state(
tasks,
execution_date,
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=False,
session=None):
if not tasks:
return []
if not timezone.is_localized(execution_date):
raise ValueError("Received non-localized date {}".format(execution_date))
task_dags = {task.dag for task in tasks}
if len(task_dags) > 1:
raise ValueError("Received tasks from multiple DAGs: {}".format(task_dags))
dag = next(iter(task_dags))
if dag is None:
raise ValueError("Received tasks with no DAG")
dates = get_execution_dates(dag, execution_date, future, past)
task_ids = list(find_task_relatives(tasks, downstream, upstream))
confirmed_dates = verify_dag_run_integrity(dag, dates)
sub_dag_run_ids = get_subdag_runs(dag, session, state, task_ids, commit, confirmed_dates)
qry_dag = get_all_dag_task_query(dag, session, state, task_ids, confirmed_dates)
if commit:
tis_altered = qry_dag.with_for_update().all()
if sub_dag_run_ids:
qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates)
tis_altered += qry_sub_dag.with_for_update().all()
for task_instance in tis_altered:
task_instance.state = state
else:
tis_altered = qry_dag.all()
if sub_dag_run_ids:
qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates)
tis_altered += qry_sub_dag.all()
return tis_altered
def all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates):
qry_sub_dag = session.query(TaskInstance).\
filter(
TaskInstance.dag_id.in_(sub_dag_run_ids),
TaskInstance.execution_date.in_(confirmed_dates)
).\
filter(
or_(
TaskInstance.state.is_(None),
TaskInstance.state != state
)
)
return qry_sub_dag
def get_all_dag_task_query(dag, session, state, task_ids, confirmed_dates):
qry_dag = session.query(TaskInstance).\
filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.execution_date.in_(confirmed_dates),
TaskInstance.task_id.in_(task_ids)
).\
filter(
or_(
TaskInstance.state.is_(None),
TaskInstance.state != state
)
)
return qry_dag
def get_subdag_runs(dag, session, state, task_ids, commit, confirmed_dates):
dags = [dag]
sub_dag_ids = []
while dags:
current_dag = dags.pop()
for task_id in task_ids:
if not current_dag.has_task(task_id):
continue
current_task = current_dag.get_task(task_id)
if isinstance(current_task, SubDagOperator):
dag_runs = _create_dagruns(current_task.subdag,
execution_dates=confirmed_dates,
state=State.RUNNING,
run_id_template=BackfillJob.ID_FORMAT_PREFIX)
verify_dagruns(dag_runs, commit, state, session, current_task)
dags.append(current_task.subdag)
sub_dag_ids.append(current_task.subdag.dag_id)
return sub_dag_ids
def verify_dagruns(dag_runs, commit, state, session, current_task):
for dag_run in dag_runs:
dag_run.dag = current_task.subdag
dag_run.verify_integrity()
if commit:
dag_run.state = state
session.merge(dag_run)
def verify_dag_run_integrity(dag, dates):
confirmed_dates = []
dag_runs = DagRun.find(dag_id=dag.dag_id, execution_date=dates)
for dag_run in dag_runs:
dag_run.dag = dag
dag_run.verify_integrity()
confirmed_dates.append(dag_run.execution_date)
return confirmed_dates
def find_task_relatives(tasks, downstream, upstream):
for task in tasks:
yield task.task_id
if downstream:
for relative in task.get_flat_relatives(upstream=False):
yield relative.task_id
if upstream:
for relative in task.get_flat_relatives(upstream=True):
yield relative.task_id
def get_execution_dates(dag, execution_date, future, past):
latest_execution_date = dag.latest_execution_date
if latest_execution_date is None:
raise ValueError("Received non-localized date {}".format(execution_date))
end_date = latest_execution_date if future else execution_date
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if dag.schedule_interval == '@once':
dates = [start_date]
elif not dag.schedule_interval:
dag_runs = dag.get_dagruns_between(start_date=start_date, end_date=end_date)
dates = sorted({d.execution_date for d in dag_runs})
else:
dates = dag.date_range(start_date=start_date, end_date=end_date)
return dates
@provide_session
def _set_dag_run_state(dag_id, execution_date, state, session=None):
dag_run = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == execution_date
).one()
dag_run.state = state
if state == State.RUNNING:
dag_run.start_date = timezone.utcnow()
dag_run.end_date = None
else:
dag_run.end_date = timezone.utcnow()
session.merge(dag_run)
@provide_session
def set_dag_run_state_to_success(dag, execution_date, commit=False, session=None):
if not dag or not execution_date:
return []
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.SUCCESS, session)
for task in dag.tasks:
task.dag = dag
return set_state(tasks=dag.tasks, execution_date=execution_date,
state=State.SUCCESS, commit=commit, session=session)
@provide_session
def set_dag_run_state_to_failed(dag, execution_date, commit=False, session=None):
if not dag or not execution_date:
return []
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.FAILED, session)
task_ids = [task.task_id for task in dag.tasks]
tis = session.query(TaskInstance).filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.execution_date == execution_date,
TaskInstance.task_id.in_(task_ids)).filter(TaskInstance.state == State.RUNNING)
task_ids_of_running_tis = [task_instance.task_id for task_instance in tis]
tasks = []
for task in dag.tasks:
if task.task_id not in task_ids_of_running_tis:
continue
task.dag = dag
tasks.append(task)
return set_state(tasks=tasks, execution_date=execution_date,
state=State.FAILED, commit=commit, session=session)
@provide_session
def set_dag_run_state_to_running(dag, execution_date, commit=False, session=None):
res = []
if not dag or not execution_date:
return res
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.RUNNING, session)
return res
| true
| true
|
f7149e2412d81d75a93d4991ff57ab4e7bd62d19
| 1,125
|
py
|
Python
|
laplace/__init__.py
|
georgezefko/Laplace
|
c488f7bf739297bab5d771f65635352a07716ca0
|
[
"MIT"
] | null | null | null |
laplace/__init__.py
|
georgezefko/Laplace
|
c488f7bf739297bab5d771f65635352a07716ca0
|
[
"MIT"
] | null | null | null |
laplace/__init__.py
|
georgezefko/Laplace
|
c488f7bf739297bab5d771f65635352a07716ca0
|
[
"MIT"
] | null | null | null |
"""
.. include:: ../README.md
.. include:: ../examples/regression_example.md
.. include:: ../examples/calibration_example.md
"""
REGRESSION = 'regression'
CLASSIFICATION = 'classification'
from laplace.baselaplace import BaseLaplace, ParametricLaplace, FullLaplace, KronLaplace, DiagLaplace, LowRankLaplace
from laplace.lllaplace import LLLaplace, FullLLLaplace, KronLLLaplace, DiagLLLaplace
from laplace.subnetlaplace import SubnetLaplace, FullSubnetLaplace, DiagSubnetLaplace
from laplace.laplace import Laplace
from laplace.marglik_training import marglik_training
__all__ = ['Laplace', # direct access to all Laplace classes via unified interface
'BaseLaplace', 'ParametricLaplace', # base-class and its (first-level) subclasses
'FullLaplace', 'KronLaplace', 'DiagLaplace', 'LowRankLaplace', # all-weights
'LLLaplace', # base-class last-layer
'FullLLLaplace', 'KronLLLaplace', 'DiagLLLaplace', # last-layer
'SubnetLaplace', # base-class subnetwork
'FullSubnetLaplace', 'DiagSubnetLaplace', # subnetwork
'marglik_training'] # methods
| 46.875
| 117
| 0.734222
|
REGRESSION = 'regression'
CLASSIFICATION = 'classification'
from laplace.baselaplace import BaseLaplace, ParametricLaplace, FullLaplace, KronLaplace, DiagLaplace, LowRankLaplace
from laplace.lllaplace import LLLaplace, FullLLLaplace, KronLLLaplace, DiagLLLaplace
from laplace.subnetlaplace import SubnetLaplace, FullSubnetLaplace, DiagSubnetLaplace
from laplace.laplace import Laplace
from laplace.marglik_training import marglik_training
__all__ = ['Laplace',
'BaseLaplace', 'ParametricLaplace',
'FullLaplace', 'KronLaplace', 'DiagLaplace', 'LowRankLaplace',
'LLLaplace',
'FullLLLaplace', 'KronLLLaplace', 'DiagLLLaplace',
'SubnetLaplace',
'FullSubnetLaplace', 'DiagSubnetLaplace',
'marglik_training']
| true
| true
|
f7149e6533709c3087902b7fa956335e88e12f63
| 450
|
py
|
Python
|
paperplane/backends/click/echo.py
|
abhilash1in/paperplane
|
1dfda182dc8a70fe08fa2284ea63b434246c394b
|
[
"MIT"
] | null | null | null |
paperplane/backends/click/echo.py
|
abhilash1in/paperplane
|
1dfda182dc8a70fe08fa2284ea63b434246c394b
|
[
"MIT"
] | null | null | null |
paperplane/backends/click/echo.py
|
abhilash1in/paperplane
|
1dfda182dc8a70fe08fa2284ea63b434246c394b
|
[
"MIT"
] | null | null | null |
import logging
from typing import Optional
from paperplane.backends.click import _secho
logger = logging.getLogger(__name__)
def run(
prompt: str,
color: Optional[str] = None,
fg: Optional[str] = None,
bg: Optional[str] = None,
bold: Optional[bool] = False,
):
if prompt is not None:
return _secho(message=prompt, fg=color or fg, bg=bg, bold=bold)
else:
logger.warning("prompt is None. Nothing to do.")
| 23.684211
| 71
| 0.668889
|
import logging
from typing import Optional
from paperplane.backends.click import _secho
logger = logging.getLogger(__name__)
def run(
prompt: str,
color: Optional[str] = None,
fg: Optional[str] = None,
bg: Optional[str] = None,
bold: Optional[bool] = False,
):
if prompt is not None:
return _secho(message=prompt, fg=color or fg, bg=bg, bold=bold)
else:
logger.warning("prompt is None. Nothing to do.")
| true
| true
|
f7149f74e1c827d11855a1dd871ba6c9659096d7
| 9,270
|
py
|
Python
|
trac/web/tests/auth.py
|
lelit/trac
|
ee8f811a29321f3c0fc8b8235d143e0ffcd6d013
|
[
"BSD-3-Clause"
] | 1
|
2017-08-03T07:04:28.000Z
|
2017-08-03T07:04:28.000Z
|
trac/web/tests/auth.py
|
lelit/trac
|
ee8f811a29321f3c0fc8b8235d143e0ffcd6d013
|
[
"BSD-3-Clause"
] | null | null | null |
trac/web/tests/auth.py
|
lelit/trac
|
ee8f811a29321f3c0fc8b8235d143e0ffcd6d013
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import trac.tests.compat
from trac.core import TracError
from trac.test import EnvironmentStub, Mock
from trac.web.auth import BasicAuthentication, LoginModule
from trac.web.href import Href
from Cookie import SimpleCookie as Cookie
import unittest
class LoginModuleTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.module = LoginModule(self.env)
def tearDown(self):
self.env.reset_db()
def test_anonymous_access(self):
req = Mock(incookie=Cookie(), href=Href('/trac.cgi'),
remote_addr='127.0.0.1', remote_user=None,
base_path='/trac.cgi')
self.assertIsNone(self.module.authenticate(req))
def test_unknown_cookie_access(self):
incookie = Cookie()
incookie['trac_auth'] = '123'
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=Cookie(),
remote_addr='127.0.0.1', remote_user=None,
base_path='/trac.cgi')
self.assertIsNone(self.module.authenticate(req))
def test_known_cookie_access(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(incookie=incookie, outcookie=outcookie,
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='127.0.0.1', remote_user=None)
self.assertEqual('john', self.module.authenticate(req))
self.assertNotIn('auth_cookie', req.outcookie)
def test_known_cookie_ip_check_enabled(self):
self.env.config.set('trac', 'check_auth_ip', 'yes')
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=outcookie,
remote_addr='192.168.0.100', remote_user=None,
base_path='/trac.cgi')
self.assertIsNone(self.module.authenticate(req))
self.assertIn('trac_auth', req.outcookie)
def test_known_cookie_ip_check_disabled(self):
self.env.config.set('trac', 'check_auth_ip', 'no')
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(incookie=incookie, outcookie=outcookie,
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='192.168.0.100', remote_user=None)
self.assertEqual('john', self.module.authenticate(req))
self.assertNotIn('auth_cookie', req.outcookie)
def test_login(self):
outcookie = Cookie()
# remote_user must be upper case to test that by default, case is
# preserved.
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=Cookie(), outcookie=outcookie,
remote_addr='127.0.0.1', remote_user='john',
authname='john', base_path='/trac.cgi')
self.module._do_login(req)
self.assertIn('trac_auth', outcookie, '"trac_auth" Cookie not set')
auth_cookie = outcookie['trac_auth'].value
self.assertEqual([('john', '127.0.0.1')], self.env.db_query(
"SELECT name, ipnr FROM auth_cookie WHERE cookie=%s",
(auth_cookie,)))
def test_login_ignore_case(self):
"""
Test that login is succesful when the usernames differ in case, but case
is ignored.
"""
self.env.config.set('trac', 'ignore_auth_case', 'yes')
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=Cookie(), outcookie=outcookie,
remote_addr='127.0.0.1', remote_user='John',
authname='anonymous', base_path='/trac.cgi')
self.module._do_login(req)
self.assertIn('trac_auth', outcookie, '"trac_auth" Cookie not set')
auth_cookie = outcookie['trac_auth'].value
self.assertEqual([('john', '127.0.0.1')], self.env.db_query(
"SELECT name, ipnr FROM auth_cookie WHERE cookie=%s",
(auth_cookie,)))
def test_login_no_username(self):
req = Mock(incookie=Cookie(), href=Href('/trac.cgi'),
remote_addr='127.0.0.1', remote_user=None,
base_path='/trac.cgi')
self.assertRaises(TracError, self.module._do_login, req)
def test_already_logged_in_same_user(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
req = Mock(incookie=incookie, outcookie=Cookie(),
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='127.0.0.1', remote_user='john', authname='john')
self.module._do_login(req) # this shouldn't raise an error
def test_already_logged_in_different_user(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
req = Mock(incookie=incookie, authname='john',
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='127.0.0.1', remote_user='tom')
self.assertRaises(TracError, self.module._do_login, req)
def test_logout(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=outcookie,
remote_addr='127.0.0.1', remote_user=None,
authname='john', method='POST', base_path='/trac.cgi')
self.module._do_logout(req)
self.assertIn('trac_auth', outcookie)
self.assertFalse(self.env.db_query(
"SELECT name, ipnr FROM auth_cookie WHERE name='john'"))
def test_logout_not_logged_in(self):
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=Cookie(), outcookie=Cookie(),
remote_addr='127.0.0.1', remote_user=None,
authname='anonymous', method='POST', base_path='/trac.cgi')
self.module._do_logout(req) # this shouldn't raise an error
def test_logout_protect(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=outcookie,
remote_addr='127.0.0.1', remote_user=None,
authname='john', method='GET', base_path='/trac.cgi')
self.module._do_logout(req)
self.assertNotIn('trac_auth', outcookie)
self.assertEqual(
[('john', '127.0.0.1')],
self.env.db_query("SELECT name, ipnr FROM auth_cookie "
"WHERE cookie='123'"))
class BasicAuthenticationTestCase(unittest.TestCase):
def setUp(self):
filename = os.path.join(os.path.split(__file__)[0], 'htpasswd.txt')
self.auth = BasicAuthentication(filename, 'realm')
def tearDown(self):
self.auth = None
def test_crypt(self):
self.assertTrue(self.auth.test('crypt', 'crypt'))
self.assertFalse(self.auth.test('crypt', 'other'))
def test_md5(self):
self.assertTrue(self.auth.test('md5', 'md5'))
self.assertFalse(self.auth.test('md5', 'other'))
def test_sha(self):
self.assertTrue(self.auth.test('sha', 'sha'))
self.assertFalse(self.auth.test('sha', 'other'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LoginModuleTestCase))
suite.addTest(unittest.makeSuite(BasicAuthenticationTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 40.480349
| 80
| 0.602697
|
import os
import trac.tests.compat
from trac.core import TracError
from trac.test import EnvironmentStub, Mock
from trac.web.auth import BasicAuthentication, LoginModule
from trac.web.href import Href
from Cookie import SimpleCookie as Cookie
import unittest
class LoginModuleTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.module = LoginModule(self.env)
def tearDown(self):
self.env.reset_db()
def test_anonymous_access(self):
req = Mock(incookie=Cookie(), href=Href('/trac.cgi'),
remote_addr='127.0.0.1', remote_user=None,
base_path='/trac.cgi')
self.assertIsNone(self.module.authenticate(req))
def test_unknown_cookie_access(self):
incookie = Cookie()
incookie['trac_auth'] = '123'
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=Cookie(),
remote_addr='127.0.0.1', remote_user=None,
base_path='/trac.cgi')
self.assertIsNone(self.module.authenticate(req))
def test_known_cookie_access(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(incookie=incookie, outcookie=outcookie,
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='127.0.0.1', remote_user=None)
self.assertEqual('john', self.module.authenticate(req))
self.assertNotIn('auth_cookie', req.outcookie)
def test_known_cookie_ip_check_enabled(self):
self.env.config.set('trac', 'check_auth_ip', 'yes')
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=outcookie,
remote_addr='192.168.0.100', remote_user=None,
base_path='/trac.cgi')
self.assertIsNone(self.module.authenticate(req))
self.assertIn('trac_auth', req.outcookie)
def test_known_cookie_ip_check_disabled(self):
self.env.config.set('trac', 'check_auth_ip', 'no')
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(incookie=incookie, outcookie=outcookie,
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='192.168.0.100', remote_user=None)
self.assertEqual('john', self.module.authenticate(req))
self.assertNotIn('auth_cookie', req.outcookie)
def test_login(self):
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=Cookie(), outcookie=outcookie,
remote_addr='127.0.0.1', remote_user='john',
authname='john', base_path='/trac.cgi')
self.module._do_login(req)
self.assertIn('trac_auth', outcookie, '"trac_auth" Cookie not set')
auth_cookie = outcookie['trac_auth'].value
self.assertEqual([('john', '127.0.0.1')], self.env.db_query(
"SELECT name, ipnr FROM auth_cookie WHERE cookie=%s",
(auth_cookie,)))
def test_login_ignore_case(self):
self.env.config.set('trac', 'ignore_auth_case', 'yes')
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=Cookie(), outcookie=outcookie,
remote_addr='127.0.0.1', remote_user='John',
authname='anonymous', base_path='/trac.cgi')
self.module._do_login(req)
self.assertIn('trac_auth', outcookie, '"trac_auth" Cookie not set')
auth_cookie = outcookie['trac_auth'].value
self.assertEqual([('john', '127.0.0.1')], self.env.db_query(
"SELECT name, ipnr FROM auth_cookie WHERE cookie=%s",
(auth_cookie,)))
def test_login_no_username(self):
req = Mock(incookie=Cookie(), href=Href('/trac.cgi'),
remote_addr='127.0.0.1', remote_user=None,
base_path='/trac.cgi')
self.assertRaises(TracError, self.module._do_login, req)
def test_already_logged_in_same_user(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
req = Mock(incookie=incookie, outcookie=Cookie(),
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='127.0.0.1', remote_user='john', authname='john')
self.module._do_login(req)
def test_already_logged_in_different_user(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
req = Mock(incookie=incookie, authname='john',
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='127.0.0.1', remote_user='tom')
self.assertRaises(TracError, self.module._do_login, req)
def test_logout(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=outcookie,
remote_addr='127.0.0.1', remote_user=None,
authname='john', method='POST', base_path='/trac.cgi')
self.module._do_logout(req)
self.assertIn('trac_auth', outcookie)
self.assertFalse(self.env.db_query(
"SELECT name, ipnr FROM auth_cookie WHERE name='john'"))
def test_logout_not_logged_in(self):
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=Cookie(), outcookie=Cookie(),
remote_addr='127.0.0.1', remote_user=None,
authname='anonymous', method='POST', base_path='/trac.cgi')
self.module._do_logout(req) # this shouldn't raise an error
def test_logout_protect(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=outcookie,
remote_addr='127.0.0.1', remote_user=None,
authname='john', method='GET', base_path='/trac.cgi')
self.module._do_logout(req)
self.assertNotIn('trac_auth', outcookie)
self.assertEqual(
[('john', '127.0.0.1')],
self.env.db_query("SELECT name, ipnr FROM auth_cookie "
"WHERE cookie='123'"))
class BasicAuthenticationTestCase(unittest.TestCase):
def setUp(self):
filename = os.path.join(os.path.split(__file__)[0], 'htpasswd.txt')
self.auth = BasicAuthentication(filename, 'realm')
def tearDown(self):
self.auth = None
def test_crypt(self):
self.assertTrue(self.auth.test('crypt', 'crypt'))
self.assertFalse(self.auth.test('crypt', 'other'))
def test_md5(self):
self.assertTrue(self.auth.test('md5', 'md5'))
self.assertFalse(self.auth.test('md5', 'other'))
def test_sha(self):
self.assertTrue(self.auth.test('sha', 'sha'))
self.assertFalse(self.auth.test('sha', 'other'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LoginModuleTestCase))
suite.addTest(unittest.makeSuite(BasicAuthenticationTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| true
| true
|
f7149f8bfbbe334b4581ab62205607d9646fef42
| 1,364
|
py
|
Python
|
tests/test_download.py
|
iacopoff/climetlab
|
cc01604de991928018291725407d891f3c01ce5b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_download.py
|
iacopoff/climetlab
|
cc01604de991928018291725407d891f3c01ce5b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_download.py
|
iacopoff/climetlab
|
cc01604de991928018291725407d891f3c01ce5b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
import pathlib
import time
from climetlab import settings
from climetlab.utils import download_and_cache
def path_to_url(path):
return pathlib.Path(os.path.abspath(path)).as_uri()
def test_download_1():
url = "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib?_=%s" % (
time.time(),
)
download_and_cache(url)
def test_download_2():
url = "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib"
download_and_cache(url)
def test_download_3():
with settings.temporary("download-out-of-date-urls", True):
url = "https://get.ecmwf.int/test-data/climetlab/input/test.txt"
download_and_cache(url)
def test_download_4():
url = "https://get.ecmwf.int/test-data/climetlab/input/missing.txt"
r = download_and_cache(url, return_none_on_404=True)
assert r is None, r
if __name__ == "__main__":
from climetlab.testing import main
main(globals())
| 26.230769
| 88
| 0.725806
|
import os
import pathlib
import time
from climetlab import settings
from climetlab.utils import download_and_cache
def path_to_url(path):
return pathlib.Path(os.path.abspath(path)).as_uri()
def test_download_1():
url = "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib?_=%s" % (
time.time(),
)
download_and_cache(url)
def test_download_2():
url = "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib"
download_and_cache(url)
def test_download_3():
with settings.temporary("download-out-of-date-urls", True):
url = "https://get.ecmwf.int/test-data/climetlab/input/test.txt"
download_and_cache(url)
def test_download_4():
url = "https://get.ecmwf.int/test-data/climetlab/input/missing.txt"
r = download_and_cache(url, return_none_on_404=True)
assert r is None, r
if __name__ == "__main__":
from climetlab.testing import main
main(globals())
| true
| true
|
f7149ff103223405ef329d427e9cce2551ef21f0
| 207
|
py
|
Python
|
vet_website/vet_website/doctype/vetasset/test_vetasset.py
|
rezazrna/vet_website
|
26e731cb10c31d69292f33659c49c3cfa5646c39
|
[
"MIT"
] | null | null | null |
vet_website/vet_website/doctype/vetasset/test_vetasset.py
|
rezazrna/vet_website
|
26e731cb10c31d69292f33659c49c3cfa5646c39
|
[
"MIT"
] | null | null | null |
vet_website/vet_website/doctype/vetasset/test_vetasset.py
|
rezazrna/vet_website
|
26e731cb10c31d69292f33659c49c3cfa5646c39
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, bikbuk and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestVetAsset(unittest.TestCase):
pass
| 18.818182
| 45
| 0.758454
|
from __future__ import unicode_literals
import unittest
class TestVetAsset(unittest.TestCase):
pass
| true
| true
|
f714a04a559914d1915f3c51c7c305616a25454f
| 2,090
|
py
|
Python
|
setup.py
|
kruserr/i6
|
90a198ae543844faa1073d8bd317f11e1bb80298
|
[
"MIT"
] | 1
|
2020-07-05T22:08:22.000Z
|
2020-07-05T22:08:22.000Z
|
setup.py
|
kruserr/i6
|
90a198ae543844faa1073d8bd317f11e1bb80298
|
[
"MIT"
] | null | null | null |
setup.py
|
kruserr/i6
|
90a198ae543844faa1073d8bd317f11e1bb80298
|
[
"MIT"
] | null | null | null |
import setuptools
import urllib.request
DESCRIPTION = 'A standardized collection of python libs and tools'
try:
with open('README.md', 'r') as f:
LONG_DESCRIPTION = f.read()
except FileNotFoundError:
LONG_DESCRIPTION = DESCRIPTION
try:
with open('VERSION', 'r') as f:
VERSION = f.read()
except FileNotFoundError:
VERSION = 'test'
# To whenever PYPI allows direct references for dependencies
# deps = [
# {
# 'name': 'aiocheck',
# 'url': 'https://github.com/kruserr/aiocheck',
# 'tag': '',
# },
# ]
# for i in range(len(deps)):
# try:
# if (deps[i]['tag'] is None) or (len(deps[i]['tag']) == 0):
# raise KeyError()
# except KeyError:
# request = urllib.request.urlopen(f"{deps[i]['url']}/releases/latest").geturl()
# deps[i]['tag'] = request.split('/')[::-1][0]
# deps[i] = f"{deps[i]['name']} @ git+{deps[i]['url']}@{deps[i]['tag']}"
setuptools.setup(
name='i6',
version=VERSION,
author='kruserr',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/kruserr/i6',
keywords='i6 toolchain collection libs tools',
project_urls={
'Documentation': 'https://github.com/kruserr/i6/wiki',
'Source': 'https://github.com/kruserr/i6',
},
packages=setuptools.find_packages(
where='src',
exclude=['tests*'],
),
package_dir={
'': 'src',
},
install_requires=[
'docker',
'pyftpdlib',
'SQLAlchemy',
'marshmallow',
'cryptography',
],
entry_points = {
'console_scripts': ['i6=i6.__main__:main'],
},
zip_safe=True,
classifiers=[
'Topic :: Software Development',
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.7',
)
| 26.455696
| 88
| 0.574641
|
import setuptools
import urllib.request
DESCRIPTION = 'A standardized collection of python libs and tools'
try:
with open('README.md', 'r') as f:
LONG_DESCRIPTION = f.read()
except FileNotFoundError:
LONG_DESCRIPTION = DESCRIPTION
try:
with open('VERSION', 'r') as f:
VERSION = f.read()
except FileNotFoundError:
VERSION = 'test'
setuptools.setup(
name='i6',
version=VERSION,
author='kruserr',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/kruserr/i6',
keywords='i6 toolchain collection libs tools',
project_urls={
'Documentation': 'https://github.com/kruserr/i6/wiki',
'Source': 'https://github.com/kruserr/i6',
},
packages=setuptools.find_packages(
where='src',
exclude=['tests*'],
),
package_dir={
'': 'src',
},
install_requires=[
'docker',
'pyftpdlib',
'SQLAlchemy',
'marshmallow',
'cryptography',
],
entry_points = {
'console_scripts': ['i6=i6.__main__:main'],
},
zip_safe=True,
classifiers=[
'Topic :: Software Development',
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.7',
)
| true
| true
|
f714a04aab9e0bff1e00c2120d335960fe4730f0
| 9,816
|
py
|
Python
|
surveysim/music2/interpolate.py
|
jakgel/clusterbuster
|
d79400a0faf43dece457d99b024b955aef544fc2
|
[
"MIT"
] | 1
|
2018-09-10T14:06:45.000Z
|
2018-09-10T14:06:45.000Z
|
surveysim/music2/interpolate.py
|
jakgel/clusterbuster
|
d79400a0faf43dece457d99b024b955aef544fc2
|
[
"MIT"
] | null | null | null |
surveysim/music2/interpolate.py
|
jakgel/clusterbuster
|
d79400a0faf43dece457d99b024b955aef544fc2
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.interpolate as interpolate
import matplotlib.pyplot as plt
import clusterbuster.mathut as math
"""
Start with e.g. InterpolateRadio2D(psiFile = '../Analysis_MUSIC2/Hoeft_radio/mach_psi_tablefine(10,3).txt', inter=(10,6))
"""
# from http://stackoverflow.com/questions/5328128/scipy-interpolation-of-large-matrix
def my_interp(X, Y, Z, x, y, spn=3):
xs,ys = map(np.array,(x,y))
z = np.zeros(xs.shape)
for i,(x,y) in enumerate(zip(xs,ys)):
# get the indices of the nearest x,y
xi = np.argmin(np.abs(X[0,:]-x))
yi = np.argmin(np.abs(Y[:,0]-y))
xlo = max(xi-spn, 0)
ylo = max(yi-spn, 0)
xhi = min(xi+spn, X[0,:].size)
yhi = min(yi+spn, Y[:,0].size)
# make slices of X,Y,Z that are only a few items wide
nX = X[xlo:xhi, ylo:yhi]
nY = Y[xlo:xhi, ylo:yhi]
nZ = Z[xlo:xhi, ylo:yhi]
intp = interpolate.interp2d(nX, nY, nZ)
z[i] = intp(x,y)[0]
return z
# from here on: done by myself
def LoadFile_psi(psiFile):
""" Just gives the Mach number and Temperature values """
#=== FILE A ===#
# read first line .... split it and convert sstring to float science float('1.31E+01') or for a list:map(float, ['3.76E+00', '1.31E+01', '1.14E+01'])
with open(psiFile, 'r') as f:
first_line = f.readline()
psi_x = first_line.split()[2:] # Splits into list without first two elements
psi_x = np.asarray( [float(i) for i in psi_x ] ) # Converts strings to floats # Converts strings to floats
psi_y = np.loadtxt(psiFile,skiprows=0)[:,0]
return psi_x, psi_y
def InterpolateRadio2D(psiFile='../Analysis_MUSIC2/Hoeft_radio/mach_psi_table.txt', machFile='../Analysis_MUSIC2/Hoeft_radio/q_mach_machr_table.txt', saveplot='../Analysis_MUSIC2/Hoeft_radio/interpolated', psiFileNew = False, machFileNew = False, inter=(10,3)):
# Currently the mach number is interpolated in an logarithmic space which is much sparser at lower mach numbers then anticipated
# I suspect an double-exponential function for mach (both efficiency dependency stepsize)
# Note that the original grid given in 'Hoeft_radio/mach_psi_table.txt' is (quite) regular in log-loglog space, which makes it very simple to invoke an interpolation function!
# Irregular data points would make it nececcary to use functions like scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='cubic')
plot_old = False
plot_new = False
plot_PhD = True
##==== psiFile for psi factor; machfile for mach-numbers conversion factors
H_mach = np.loadtxt(machFile,skiprows=0)
H_psi = np.loadtxt(psiFile,skiprows=0)[:,1::] # you wont get the temperature values ... read them separetely
psi_x,psi_y = LoadFile_psi(psiFile)
psi_x = np.log10( psi_x ) # converts to and log10 space
psi_y = np.log10(np.log10( psi_y )) # converts to and log10(log10) space
X, Y = np.meshgrid(psi_x, psi_y)
Z = np.log10(H_psi)
#interp_spline = interpolate.interp2d(x, y, Z) #, kind='cubic'
interp_spline = interpolate.RectBivariateSpline(psi_y, psi_x, Z) #, bbox=[None, None, None, None], kx=3, ky=3, s=0
xnew = np.arange(psi_x[0], psi_x[-1], (psi_x[-1]-psi_x[0])/(len(psi_x)*inter[0]) ) #np.arange(-4, 2, 4e-2) #
ynew = np.arange(psi_y[0], psi_y[-1], (psi_y[-1]-psi_y[0])/(len(psi_y)*inter[1]) ) #np.arange(0.2, 3, 2e-2) #
Znew = interp_spline(ynew, xnew )
keV2K = 1.16e7 # Translates keV to Kelvin
if plot_old:
plt.plot( np.arange(0, len(psi_x), 1 ), psi_x )
plt.plot( np.arange(0, len(psi_y), 1 ), psi_y )
plt.savefig(saveplot + '_linearity.png')
fig = plt.figure()
ax1 = plt.subplot(121)
ax1.pcolor( np.log10(keV2K) + psi_x, psi_y, Z)
ax1.set_title("Sparsely sampled function")
ax1.set_xlim([3.1, 9])
ax1.set_ylim([psi_y[0], 0.5])
ax1.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax1.set_ylabel('$\\mathrm{log_{10}(log_{10}(M))\\,[]}$')
ax2 = plt.subplot(122)
im2 = ax2.pcolor( np.log10(keV2K) + xnew, ynew, Znew)
ax2.set_title("Interpolated function")
ax2.set_xlim([3.1, 9])
ax2.set_ylim([psi_y[0], 0.5])
ax2.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax2.set_yticklabels([])
mach = [1.5,2.2,3.0,10.0]
c = [plt.cm.rainbow( (np.log10(np.log10(m))-ax1.get_ylim()[0])/abs(ax1.get_ylim()[1]-ax1.get_ylim()[0]) ) for m in mach]
for ii,m in enumerate(mach):
ax1.plot( [ax1.get_xlim()[0], ax1.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 )
ax2.plot( [ax2.get_xlim()[0], ax2.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 )
ax1.text(ax1.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)
ax2.text(ax2.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im2, cax=cbar_ax)
plt.savefig(saveplot + '.png')
if plot_new:
fig = plt.figure()
ax1 = plt.subplot(111)
im2 = ax1.pcolor( np.log10(keV2K) + xnew, ynew, Znew, vmin=-8)
# ax1.set_title("Interpolated function")
ax1.set_xlim([7, 8.4])
ax1.set_ylim([np.log10(np.log10(1.7)), np.log10(np.log10(10.))])
ax1.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax1.set_ylabel('$M$ ')
y_ticks = [np.log10(np.log10(m)) for m in [1.7,2.5,4,10]]
print( ['%.2e' % (y) for y in y_ticks], [10**(10**y) for y in y_ticks] )
ax1.set_yticklabels([10**(10**y) for y in y_ticks])
plt.yticks(y_ticks)
# temp = [1.5,2.2,3.0,10.0]
# c = [plt.cm.rainbow( (np.log10(np.log10(m))-ax1.get_ylim()[0])/abs(ax1.get_ylim()[1]-ax1.get_ylim()[0]) ) for m in mach]
# for ii,m in enumerate(mach):
# ax1.plot( [ax1.get_xlim()[0], ax1.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 )
# ax2.plot( [ax2.get_xlim()[0], ax2.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 )
#
# ax1.text(ax1.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)
# ax2.text(ax2.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im2, cax=cbar_ax, label='$\log_{10}\Phi$',)
plt.savefig(saveplot + '_DSA.pdf')
plt.savefig(saveplot + '_DSA.png', dpi=800)
if plot_PhD:
fig = plt.figure()
temp = np.linspace(2,20,20)
print(temp)
mach = np.linspace(2,7,300)
psi_x,psi_y = LoadFile_psi(psiFile)
import itertools
H,M,T = [],[],[]
for t in temp:
results_temp = math.find_closest(psi_x, t)
results_mach = math.find_closest(psi_y, mach) #
H.append(H_psi[results_mach,np.ones_like(results_mach)*results_temp])
M.append(mach)
T.append(np.ones_like(results_mach)*t)
H = list(itertools.chain.from_iterable(H))
M = list(itertools.chain.from_iterable(M))
T = list(itertools.chain.from_iterable(T))
plt.scatter(M,np.log10(H),c=T,alpha=0.1,s=5)
cb = plt.colorbar(label='Downstream Temperature [keV]')
cb.set_alpha(1)
cb.draw_all()
plt.xlabel('Mach number $M$')
plt.ylabel('$\log_{10}\,\Phi(M,T)$')
plt.savefig(saveplot + '_PhD.pdf')
plt.savefig(saveplot + '_PhD.png', dpi=800)
# Save File A
if psiFileNew:
location = psiFileNew
else:
location = psiFile.replace('.txt', 'fine(%i,%i).txt' % (inter[0],inter[1]) )
header = '# Mach'
for x in xnew:
header += '%13.4e' % (10**x)
mf = open(location,"w")
mf.write(header + '\n')
for ii,y in enumerate(ynew):
string = '%9.4f' % (10**(10**y)) + ''.join(['%13.4e' % (10**z) for z in Znew[ii][:]])
mf.write(string + '\n')
mf.close()
#=== FILE B ===#
Y_new = np.empty( (1,1) )
for ii,h in enumerate(H_mach.T):
interp_spline = interpolate.interp1d( 10**psi_y , h, kind='cubic')
if Y_new.shape[0] > 1:
Y_new = np.hstack( (Y_new, np.expand_dims(interp_spline( 10**ynew ), axis=1) ) )
else:
Y_new = np.expand_dims(interp_spline( 10**ynew ), axis=1)
# Save File B
if machFileNew:
location = machFileNew
else:
location = machFile.replace('.txt', 'fine(%i,%i).txt' % (inter[0],inter[1]) )
header = '# q M r M*(1-1/r) s'
mf = open(location,"w")
mf.write(header + '\n')
for ii,y in enumerate(10**ynew):
string = ''.join(['%14.6e' % (y) for y in Y_new[:][ii]]) #some numbers are very large and ewould need a good margin
mf.write(string + '\n')
mf.close()
return 0
if __name__ == "__main__":
InterpolateRadio2D(psiFile = '../Analysis_MUSIC2/Hoeft_radio/mach_psi_tablefine(10,3).txt', inter=(3,2)) #(90,27)
| 42.310345
| 262
| 0.561125
|
import numpy as np
import scipy.interpolate as interpolate
import matplotlib.pyplot as plt
import clusterbuster.mathut as math
def my_interp(X, Y, Z, x, y, spn=3):
xs,ys = map(np.array,(x,y))
z = np.zeros(xs.shape)
for i,(x,y) in enumerate(zip(xs,ys)):
xi = np.argmin(np.abs(X[0,:]-x))
yi = np.argmin(np.abs(Y[:,0]-y))
xlo = max(xi-spn, 0)
ylo = max(yi-spn, 0)
xhi = min(xi+spn, X[0,:].size)
yhi = min(yi+spn, Y[:,0].size)
nX = X[xlo:xhi, ylo:yhi]
nY = Y[xlo:xhi, ylo:yhi]
nZ = Z[xlo:xhi, ylo:yhi]
intp = interpolate.interp2d(nX, nY, nZ)
z[i] = intp(x,y)[0]
return z
def LoadFile_psi(psiFile):
with open(psiFile, 'r') as f:
first_line = f.readline()
psi_x = first_line.split()[2:]
psi_x = np.asarray( [float(i) for i in psi_x ] ) t(psiFile,skiprows=0)[:,0]
return psi_x, psi_y
def InterpolateRadio2D(psiFile='../Analysis_MUSIC2/Hoeft_radio/mach_psi_table.txt', machFile='../Analysis_MUSIC2/Hoeft_radio/q_mach_machr_table.txt', saveplot='../Analysis_MUSIC2/Hoeft_radio/interpolated', psiFileNew = False, machFileNew = False, inter=(10,3)):
plot_old = False
plot_new = False
plot_PhD = True
dtxt(psiFile,skiprows=0)[:,1::]
psi_x,psi_y = LoadFile_psi(psiFile)
psi_x = np.log10( psi_x )
psi_y = np.log10(np.log10( psi_y ))
X, Y = np.meshgrid(psi_x, psi_y)
Z = np.log10(H_psi)
ine = interpolate.RectBivariateSpline(psi_y, psi_x, Z)
xnew = np.arange(psi_x[0], psi_x[-1], (psi_x[-1]-psi_x[0])/(len(psi_x)*inter[0]) ) ynew = np.arange(psi_y[0], psi_y[-1], (psi_y[-1]-psi_y[0])/(len(psi_y)*inter[1]) ) Znew = interp_spline(ynew, xnew )
keV2K = 1.16e7
if plot_old:
plt.plot( np.arange(0, len(psi_x), 1 ), psi_x )
plt.plot( np.arange(0, len(psi_y), 1 ), psi_y )
plt.savefig(saveplot + '_linearity.png')
fig = plt.figure()
ax1 = plt.subplot(121)
ax1.pcolor( np.log10(keV2K) + psi_x, psi_y, Z)
ax1.set_title("Sparsely sampled function")
ax1.set_xlim([3.1, 9])
ax1.set_ylim([psi_y[0], 0.5])
ax1.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax1.set_ylabel('$\\mathrm{log_{10}(log_{10}(M))\\,[]}$')
ax2 = plt.subplot(122)
im2 = ax2.pcolor( np.log10(keV2K) + xnew, ynew, Znew)
ax2.set_title("Interpolated function")
ax2.set_xlim([3.1, 9])
ax2.set_ylim([psi_y[0], 0.5])
ax2.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax2.set_yticklabels([])
mach = [1.5,2.2,3.0,10.0]
c = [plt.cm.rainbow( (np.log10(np.log10(m))-ax1.get_ylim()[0])/abs(ax1.get_ylim()[1]-ax1.get_ylim()[0]) ) for m in mach]
for ii,m in enumerate(mach):
ax1.plot( [ax1.get_xlim()[0], ax1.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 )
ax2.plot( [ax2.get_xlim()[0], ax2.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 )
ax1.text(ax1.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)
ax2.text(ax2.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im2, cax=cbar_ax)
plt.savefig(saveplot + '.png')
if plot_new:
fig = plt.figure()
ax1 = plt.subplot(111)
im2 = ax1.pcolor( np.log10(keV2K) + xnew, ynew, Znew, vmin=-8)
ax1.set_xlim([7, 8.4])
ax1.set_ylim([np.log10(np.log10(1.7)), np.log10(np.log10(10.))])
ax1.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax1.set_ylabel('$M$ ')
y_ticks = [np.log10(np.log10(m)) for m in [1.7,2.5,4,10]]
print( ['%.2e' % (y) for y in y_ticks], [10**(10**y) for y in y_ticks] )
ax1.set_yticklabels([10**(10**y) for y in y_ticks])
plt.yticks(y_ticks)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im2, cax=cbar_ax, label='$\log_{10}\Phi$',)
plt.savefig(saveplot + '_DSA.pdf')
plt.savefig(saveplot + '_DSA.png', dpi=800)
if plot_PhD:
fig = plt.figure()
temp = np.linspace(2,20,20)
print(temp)
mach = np.linspace(2,7,300)
psi_x,psi_y = LoadFile_psi(psiFile)
import itertools
H,M,T = [],[],[]
for t in temp:
results_temp = math.find_closest(psi_x, t)
results_mach = math.find_closest(psi_y, mach)
H.append(H_psi[results_mach,np.ones_like(results_mach)*results_temp])
M.append(mach)
T.append(np.ones_like(results_mach)*t)
H = list(itertools.chain.from_iterable(H))
M = list(itertools.chain.from_iterable(M))
T = list(itertools.chain.from_iterable(T))
plt.scatter(M,np.log10(H),c=T,alpha=0.1,s=5)
cb = plt.colorbar(label='Downstream Temperature [keV]')
cb.set_alpha(1)
cb.draw_all()
plt.xlabel('Mach number $M$')
plt.ylabel('$\log_{10}\,\Phi(M,T)$')
plt.savefig(saveplot + '_PhD.pdf')
plt.savefig(saveplot + '_PhD.png', dpi=800)
if psiFileNew:
location = psiFileNew
else:
location = psiFile.replace('.txt', 'fine(%i,%i).txt' % (inter[0],inter[1]) )
header = '# Mach'
for x in xnew:
header += '%13.4e' % (10**x)
mf = open(location,"w")
mf.write(header + '\n')
for ii,y in enumerate(ynew):
string = '%9.4f' % (10**(10**y)) + ''.join(['%13.4e' % (10**z) for z in Znew[ii][:]])
mf.write(string + '\n')
mf.close()
Y_new = np.empty( (1,1) )
for ii,h in enumerate(H_mach.T):
interp_spline = interpolate.interp1d( 10**psi_y , h, kind='cubic')
if Y_new.shape[0] > 1:
Y_new = np.hstack( (Y_new, np.expand_dims(interp_spline( 10**ynew ), axis=1) ) )
else:
Y_new = np.expand_dims(interp_spline( 10**ynew ), axis=1)
if machFileNew:
location = machFileNew
else:
location = machFile.replace('.txt', 'fine(%i,%i).txt' % (inter[0],inter[1]) )
header = '# q M r M*(1-1/r) s'
mf = open(location,"w")
mf.write(header + '\n')
for ii,y in enumerate(10**ynew):
string = ''.join(['%14.6e' % (y) for y in Y_new[:][ii]])
mf.write(string + '\n')
mf.close()
return 0
if __name__ == "__main__":
InterpolateRadio2D(psiFile = '../Analysis_MUSIC2/Hoeft_radio/mach_psi_tablefine(10,3).txt', inter=(3,2))
| true
| true
|
f714a0761443c704df72e831e1a6e9881201965d
| 202
|
py
|
Python
|
flutterapi/myapp/models.py
|
PariTA05/Todolist
|
fa18d02e3f989cbb37f877fb18e3e715bfc76f0c
|
[
"MIT"
] | null | null | null |
flutterapi/myapp/models.py
|
PariTA05/Todolist
|
fa18d02e3f989cbb37f877fb18e3e715bfc76f0c
|
[
"MIT"
] | null | null | null |
flutterapi/myapp/models.py
|
PariTA05/Todolist
|
fa18d02e3f989cbb37f877fb18e3e715bfc76f0c
|
[
"MIT"
] | null | null | null |
from django.db import models
class Todolist(models.Model):
title = models.CharField(max_length=100)
detail = models.TextField(null=True, blank=True)
def __str__(self):
return self.title
| 25.25
| 50
| 0.732673
|
from django.db import models
class Todolist(models.Model):
title = models.CharField(max_length=100)
detail = models.TextField(null=True, blank=True)
def __str__(self):
return self.title
| true
| true
|
f714a098ed7801e900ae6b4770cd7b10a0067882
| 301
|
py
|
Python
|
hackerrank/an-interesting-game-1/solution.py
|
SamProkopchuk/coding-problems
|
fa0ca2c05ac90e41945de1a5751e5545a8459ac4
|
[
"MIT"
] | null | null | null |
hackerrank/an-interesting-game-1/solution.py
|
SamProkopchuk/coding-problems
|
fa0ca2c05ac90e41945de1a5751e5545a8459ac4
|
[
"MIT"
] | null | null | null |
hackerrank/an-interesting-game-1/solution.py
|
SamProkopchuk/coding-problems
|
fa0ca2c05ac90e41945de1a5751e5545a8459ac4
|
[
"MIT"
] | null | null | null |
for _ in range(int(input())):
n = int(input())
rounds = 0
minidx = 1_000_000_000
for _, idx in sorted(zip(map(int, input().split()), range(n)), reverse=True):
if idx < minidx:
minidx = idx
rounds += 1
print("BOB" if rounds % 2 else "ANDY")
| 30.1
| 82
| 0.521595
|
for _ in range(int(input())):
n = int(input())
rounds = 0
minidx = 1_000_000_000
for _, idx in sorted(zip(map(int, input().split()), range(n)), reverse=True):
if idx < minidx:
minidx = idx
rounds += 1
print("BOB" if rounds % 2 else "ANDY")
| true
| true
|
f714a0caf548812f27ce214b01b5f8336d284c42
| 4,545
|
py
|
Python
|
note12/download_text_data.py
|
zhuyawen/LearnPaddle2
|
c2ed0cea1634159b1f005a0d2d954ce44b51b739
|
[
"Apache-2.0"
] | 163
|
2019-01-30T04:34:01.000Z
|
2021-12-10T12:19:03.000Z
|
note12/download_text_data.py
|
stonebb/LearnPaddle2
|
c3b6a9f5897e684b6de544cb12c959f7771a6c3c
|
[
"Apache-2.0"
] | 3
|
2019-07-15T07:14:17.000Z
|
2022-03-24T01:14:06.000Z
|
note12/download_text_data.py
|
stonebb/LearnPaddle2
|
c3b6a9f5897e684b6de544cb12c959f7771a6c3c
|
[
"Apache-2.0"
] | 83
|
2018-10-31T02:44:09.000Z
|
2022-03-25T13:40:54.000Z
|
import os
import random
import requests
import json
import time
# 分类新闻参数
news_classify = [
[0, '民生', 'news_story'],
[1, '文化', 'news_culture'],
[2, '娱乐', 'news_entertainment'],
[3, '体育', 'news_sports'],
[4, '财经', 'news_finance'],
[5, '房产', 'news_house'],
[6, '汽车', 'news_car'],
[7, '教育', 'news_edu'],
[8, '科技', 'news_tech'],
[9, '军事', 'news_military'],
[10, '旅游', 'news_travel'],
[11, '国际', 'news_world'],
[12, '证券', 'stock'],
[13, '农业', 'news_agriculture'],
[14, '游戏', 'news_game']
]
# 已经下载的新闻标题的ID
downloaded_data_id = []
# 已经下载新闻标题的数量
downloaded_sum = 0
def get_data(tup, data_path):
global downloaded_data_id
global downloaded_sum
print('============%s============' % tup[1])
url = "http://it.snssdk.com/api/news/feed/v63/"
# 分类新闻的访问参数,模仿正常网络访问
t = int(time.time() / 10000)
t = random.randint(6 * t, 10 * t)
querystring = {"category": tup[2], "max_behot_time": t, "last_refresh_sub_entrance_interval": "1524907088",
"loc_mode": "5",
"tt_from": "pre_load_more", "cp": "51a5ee4f38c50q1", "plugin_enable": "0", "iid": "31047425023",
"device_id": "51425358841", "ac": "wifi", "channel": "tengxun", "aid": "13",
"app_name": "news_article", "version_code": "631", "version_name": "6.3.1",
"device_platform": "android",
"ab_version": "333116,297979,317498,336556,295827,325046,239097,324283,170988,335432,332098,325198,336443,330632,297058,276203,286212,313219,328615,332041,329358,322321,327537,335710,333883,335102,334828,328670,324007,317077,334305,280773,335671,319960,333985,331719,336452,214069,31643,332881,333968,318434,207253,266310,321519,247847,281298,328218,335998,325618,333327,336199,323429,287591,288418,260650,326188,324614,335477,271178,326588,326524,326532",
"ab_client": "a1,c4,e1,f2,g2,f7", "ab_feature": "94563,102749", "abflag": "3", "ssmix": "a",
"device_type": "MuMu", "device_brand": "Android", "language": "zh", "os_api": "19",
"os_version": "4.4.4", "uuid": "008796762094657", "openudid": "b7215ea70ca32066",
"manifest_version_code": "631", "resolution": "1280*720", "dpi": "240",
"update_version_code": "6310", "_rticket": "1524907088018", "plugin": "256"}
headers = {
'cache-control': "no-cache",
'postman-token': "26530547-e697-1e8b-fd82-7c6014b3ee86",
'User-Agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.4; MuMu Build/V417IR) NewsArticle/6.3.1 okhttp/3.7.0.2'
}
# 进行网络请求
response = requests.request("GET", url, headers=headers, params=querystring)
# 获取返回的数据
new_data = json.loads(response.text)
with open(data_path, 'a', encoding='utf-8') as fp:
for item in new_data['data']:
item = item['content']
item = item.replace('\"', '"')
item = json.loads(item)
# 判断数据中是否包含id和新闻标题
if 'item_id' in item.keys() and 'title' in item.keys():
item_id = item['item_id']
print(downloaded_sum, tup[0], tup[1], item['item_id'], item['title'])
# 通过新闻id判断是否已经下载过
if item_id not in downloaded_data_id:
downloaded_data_id.append(item_id)
# 安装固定格式追加写入文件中
line = u"{}_!_{}_!_{}_!_{}".format(item['item_id'], tup[0], tup[1], item['title'])
line = line.replace('\n', '').replace('\r', '')
line = line + '\n'
fp.write(line)
downloaded_sum += 1
def get_routine(data_path):
global downloaded_sum
# 从文件中读取已经有的数据,避免数据重复
if os.path.exists(data_path):
with open(data_path, 'r', encoding='utf-8') as fp:
lines = fp.readlines()
downloaded_sum = len(lines)
for line in lines:
item_id = int(line.split('_!_')[0])
downloaded_data_id.append(item_id)
print('在文件中已经读起了%d条数据' % downloaded_sum)
else:
os.makedirs(os.path.dirname(data_path))
while 1:
# 开始下载数据
time.sleep(10)
for classify in news_classify:
get_data(classify, data_path)
# 当下载量超过300000就停止下载
if downloaded_sum >= 300000:
break
if __name__ == '__main__':
data_path = 'datasets/news_classify_data.txt'
dict_path = "datasets/dict_txt.txt"
# 下载数据集
get_routine(data_path)
| 40.945946
| 475
| 0.572937
|
import os
import random
import requests
import json
import time
news_classify = [
[0, '民生', 'news_story'],
[1, '文化', 'news_culture'],
[2, '娱乐', 'news_entertainment'],
[3, '体育', 'news_sports'],
[4, '财经', 'news_finance'],
[5, '房产', 'news_house'],
[6, '汽车', 'news_car'],
[7, '教育', 'news_edu'],
[8, '科技', 'news_tech'],
[9, '军事', 'news_military'],
[10, '旅游', 'news_travel'],
[11, '国际', 'news_world'],
[12, '证券', 'stock'],
[13, '农业', 'news_agriculture'],
[14, '游戏', 'news_game']
]
downloaded_data_id = []
downloaded_sum = 0
def get_data(tup, data_path):
global downloaded_data_id
global downloaded_sum
print('============%s============' % tup[1])
url = "http://it.snssdk.com/api/news/feed/v63/"
t = int(time.time() / 10000)
t = random.randint(6 * t, 10 * t)
querystring = {"category": tup[2], "max_behot_time": t, "last_refresh_sub_entrance_interval": "1524907088",
"loc_mode": "5",
"tt_from": "pre_load_more", "cp": "51a5ee4f38c50q1", "plugin_enable": "0", "iid": "31047425023",
"device_id": "51425358841", "ac": "wifi", "channel": "tengxun", "aid": "13",
"app_name": "news_article", "version_code": "631", "version_name": "6.3.1",
"device_platform": "android",
"ab_version": "333116,297979,317498,336556,295827,325046,239097,324283,170988,335432,332098,325198,336443,330632,297058,276203,286212,313219,328615,332041,329358,322321,327537,335710,333883,335102,334828,328670,324007,317077,334305,280773,335671,319960,333985,331719,336452,214069,31643,332881,333968,318434,207253,266310,321519,247847,281298,328218,335998,325618,333327,336199,323429,287591,288418,260650,326188,324614,335477,271178,326588,326524,326532",
"ab_client": "a1,c4,e1,f2,g2,f7", "ab_feature": "94563,102749", "abflag": "3", "ssmix": "a",
"device_type": "MuMu", "device_brand": "Android", "language": "zh", "os_api": "19",
"os_version": "4.4.4", "uuid": "008796762094657", "openudid": "b7215ea70ca32066",
"manifest_version_code": "631", "resolution": "1280*720", "dpi": "240",
"update_version_code": "6310", "_rticket": "1524907088018", "plugin": "256"}
headers = {
'cache-control': "no-cache",
'postman-token': "26530547-e697-1e8b-fd82-7c6014b3ee86",
'User-Agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.4; MuMu Build/V417IR) NewsArticle/6.3.1 okhttp/3.7.0.2'
}
response = requests.request("GET", url, headers=headers, params=querystring)
new_data = json.loads(response.text)
with open(data_path, 'a', encoding='utf-8') as fp:
for item in new_data['data']:
item = item['content']
item = item.replace('\"', '"')
item = json.loads(item)
if 'item_id' in item.keys() and 'title' in item.keys():
item_id = item['item_id']
print(downloaded_sum, tup[0], tup[1], item['item_id'], item['title'])
if item_id not in downloaded_data_id:
downloaded_data_id.append(item_id)
line = u"{}_!_{}_!_{}_!_{}".format(item['item_id'], tup[0], tup[1], item['title'])
line = line.replace('\n', '').replace('\r', '')
line = line + '\n'
fp.write(line)
downloaded_sum += 1
def get_routine(data_path):
global downloaded_sum
if os.path.exists(data_path):
with open(data_path, 'r', encoding='utf-8') as fp:
lines = fp.readlines()
downloaded_sum = len(lines)
for line in lines:
item_id = int(line.split('_!_')[0])
downloaded_data_id.append(item_id)
print('在文件中已经读起了%d条数据' % downloaded_sum)
else:
os.makedirs(os.path.dirname(data_path))
while 1:
time.sleep(10)
for classify in news_classify:
get_data(classify, data_path)
if downloaded_sum >= 300000:
break
if __name__ == '__main__':
data_path = 'datasets/news_classify_data.txt'
dict_path = "datasets/dict_txt.txt"
get_routine(data_path)
| true
| true
|
f714a1b02ca1276050523ec8c90956dae69d86bf
| 3,521
|
py
|
Python
|
tools/emprofile.py
|
talrasha/emscripten
|
5ece531a4bc724b133da0e1b0ce061e0c2e7bebd
|
[
"MIT"
] | 1
|
2021-06-15T20:40:30.000Z
|
2021-06-15T20:40:30.000Z
|
tools/emprofile.py
|
talrasha/emscripten
|
5ece531a4bc724b133da0e1b0ce061e0c2e7bebd
|
[
"MIT"
] | null | null | null |
tools/emprofile.py
|
talrasha/emscripten
|
5ece531a4bc724b133da0e1b0ce061e0c2e7bebd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import json
import os
import shutil
import sys
import tempfile
import time
profiler_logs_path = os.path.join(tempfile.gettempdir(), 'emscripten_toolchain_profiler_logs')
OUTFILE = 'emprofile.' + time.strftime('%Y%m%d_%H%M')
for i in range(len(sys.argv)):
arg = sys.argv[i]
if arg.startswith('--outfile=') or arg.startswith('-o='):
OUTFILE = arg.split('=', 1)[1].strip().replace('.html', '')
sys.argv[i] = ''
elif arg == '-o':
OUTFILE = sys.argv[i + 1].strip().replace('.html', '')
sys.argv[i] = sys.argv[i + 1] = ''
# Deletes all previously captured log files to make room for a new clean run.
def delete_profiler_logs():
try:
shutil.rmtree(profiler_logs_path)
except IOError:
pass
def list_files_in_directory(d):
files = []
try:
items = os.listdir(d)
for i in items:
f = os.path.join(d, i)
if os.path.isfile(f):
files += [f]
return files
except IOError:
return []
def create_profiling_graph():
log_files = [f for f in list_files_in_directory(profiler_logs_path) if 'toolchain_profiler.pid_' in f]
all_results = []
if len(log_files):
print('Processing ' + str(len(log_files)) + ' profile log files in "' + profiler_logs_path + '"...')
for f in log_files:
try:
json_data = open(f, 'r').read()
if len(json_data.strip()) == 0:
continue
lines = json_data.split('\n')
lines = [x for x in lines if x != '[' and x != ']' and x != ',' and len(x.strip())]
lines = [(x + ',') if not x.endswith(',') else x for x in lines]
lines[-1] = lines[-1][:-1]
json_data = '[' + '\n'.join(lines) + ']'
all_results += json.loads(json_data)
except Exception as e:
print(str(e), file=sys.stderr)
print('Failed to parse JSON file "' + f + '"!', file=sys.stderr)
sys.exit(1)
if len(all_results) == 0:
print('No profiler logs were found in path "' + profiler_logs_path + '". Try setting the environment variable EMPROFILE=1 and run some emcc commands, and then rerun "emprofile" again.')
return
all_results.sort(key=lambda x: x['time'])
emprofile_json_data = json.dumps(all_results, indent=2)
html_file = OUTFILE + '.html'
html_contents = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'toolchain_profiler.results_template.html'), 'r').read().replace('{{{ emprofile_json_data }}}', emprofile_json_data)
open(html_file, 'w').write(html_contents)
print('Wrote "' + html_file + '"')
if '--help' in sys.argv:
print('''Usage:
emprofile.py --clear (or -c)
Deletes all previously recorded profiling log files.
Use this to abort/drop any previously collected
profiling data for a new profiling run.
emprofile.py [--no-clear]
Draws a graph from all recorded profiling log files,
and deletes the recorded profiling files, unless
--no-clear is also passed.
Optional parameters:
--outfile=x.html (or -o=x.html)
Specifies the name of the results file to generate.
''')
sys.exit(1)
if '--reset' in sys.argv or '--clear' in sys.argv or '-c' in sys.argv:
delete_profiler_logs()
else:
create_profiling_graph()
if '--no-clear' not in sys.argv:
delete_profiler_logs()
| 32.302752
| 197
| 0.650099
|
import json
import os
import shutil
import sys
import tempfile
import time
profiler_logs_path = os.path.join(tempfile.gettempdir(), 'emscripten_toolchain_profiler_logs')
OUTFILE = 'emprofile.' + time.strftime('%Y%m%d_%H%M')
for i in range(len(sys.argv)):
arg = sys.argv[i]
if arg.startswith('--outfile=') or arg.startswith('-o='):
OUTFILE = arg.split('=', 1)[1].strip().replace('.html', '')
sys.argv[i] = ''
elif arg == '-o':
OUTFILE = sys.argv[i + 1].strip().replace('.html', '')
sys.argv[i] = sys.argv[i + 1] = ''
def delete_profiler_logs():
try:
shutil.rmtree(profiler_logs_path)
except IOError:
pass
def list_files_in_directory(d):
files = []
try:
items = os.listdir(d)
for i in items:
f = os.path.join(d, i)
if os.path.isfile(f):
files += [f]
return files
except IOError:
return []
def create_profiling_graph():
log_files = [f for f in list_files_in_directory(profiler_logs_path) if 'toolchain_profiler.pid_' in f]
all_results = []
if len(log_files):
print('Processing ' + str(len(log_files)) + ' profile log files in "' + profiler_logs_path + '"...')
for f in log_files:
try:
json_data = open(f, 'r').read()
if len(json_data.strip()) == 0:
continue
lines = json_data.split('\n')
lines = [x for x in lines if x != '[' and x != ']' and x != ',' and len(x.strip())]
lines = [(x + ',') if not x.endswith(',') else x for x in lines]
lines[-1] = lines[-1][:-1]
json_data = '[' + '\n'.join(lines) + ']'
all_results += json.loads(json_data)
except Exception as e:
print(str(e), file=sys.stderr)
print('Failed to parse JSON file "' + f + '"!', file=sys.stderr)
sys.exit(1)
if len(all_results) == 0:
print('No profiler logs were found in path "' + profiler_logs_path + '". Try setting the environment variable EMPROFILE=1 and run some emcc commands, and then rerun "emprofile" again.')
return
all_results.sort(key=lambda x: x['time'])
emprofile_json_data = json.dumps(all_results, indent=2)
html_file = OUTFILE + '.html'
html_contents = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'toolchain_profiler.results_template.html'), 'r').read().replace('{{{ emprofile_json_data }}}', emprofile_json_data)
open(html_file, 'w').write(html_contents)
print('Wrote "' + html_file + '"')
if '--help' in sys.argv:
print('''Usage:
emprofile.py --clear (or -c)
Deletes all previously recorded profiling log files.
Use this to abort/drop any previously collected
profiling data for a new profiling run.
emprofile.py [--no-clear]
Draws a graph from all recorded profiling log files,
and deletes the recorded profiling files, unless
--no-clear is also passed.
Optional parameters:
--outfile=x.html (or -o=x.html)
Specifies the name of the results file to generate.
''')
sys.exit(1)
if '--reset' in sys.argv or '--clear' in sys.argv or '-c' in sys.argv:
delete_profiler_logs()
else:
create_profiling_graph()
if '--no-clear' not in sys.argv:
delete_profiler_logs()
| true
| true
|
f714a32598047c41a41ca8c84b2495aad909f3ac
| 829
|
py
|
Python
|
QiuBaiSpider/spider_main.py
|
lidenghong1/SmallReptileTraining
|
a1bfb81c9969edfb7554acc50370c0cb036da690
|
[
"MIT"
] | 133
|
2017-06-10T02:18:00.000Z
|
2022-01-08T03:29:08.000Z
|
QiuBaiSpider/spider_main.py
|
ljj2666/SmallReptileTraining
|
b6253e835120da457c416fbf4a012e545d9c70ad
|
[
"MIT"
] | null | null | null |
QiuBaiSpider/spider_main.py
|
ljj2666/SmallReptileTraining
|
b6253e835120da457c416fbf4a012e545d9c70ad
|
[
"MIT"
] | 212
|
2017-06-14T03:29:22.000Z
|
2022-01-29T15:14:47.000Z
|
from QiuBaiSpider.pymysqldb_manager import DbManager
from QiuBaiSpider.page_items import PageItems
from QiuBaiSpider.tools import Tools
'''
爬取糗事百科笑话剔除正文DOM标签然后将爬取数据存入MySQL数据库
Extra module:
PyMySQL
'''
class Main(object):
def __init__(self, max_page=1):
self.max_page = max_page
self.db_manager = DbManager()
def run(self):
self.db_manager.connect()
for index in range(self.max_page):
self._page_run(index)
self.db_manager.close()
def _page_run(self, page):
page_dict_items = PageItems(page).get_page_dict_items()
if page_dict_items is None:
return
for dict_item in page_dict_items:
self.db_manager.insertDict(dict_item)
pass
if __name__ == "__main__":
Tools.setup_log_mode(False)
Main(10).run()
| 24.382353
| 63
| 0.679131
|
from QiuBaiSpider.pymysqldb_manager import DbManager
from QiuBaiSpider.page_items import PageItems
from QiuBaiSpider.tools import Tools
class Main(object):
def __init__(self, max_page=1):
self.max_page = max_page
self.db_manager = DbManager()
def run(self):
self.db_manager.connect()
for index in range(self.max_page):
self._page_run(index)
self.db_manager.close()
def _page_run(self, page):
page_dict_items = PageItems(page).get_page_dict_items()
if page_dict_items is None:
return
for dict_item in page_dict_items:
self.db_manager.insertDict(dict_item)
pass
if __name__ == "__main__":
Tools.setup_log_mode(False)
Main(10).run()
| true
| true
|
f714a3eab380aa3fea7349fb1fb1cb7718fcf9fb
| 2,713
|
py
|
Python
|
test/pyaz/acr/helm/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/acr/helm/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/acr/helm/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def list(name, repository=None, resource_group=None, suffix=None, username=None, password=None):
params = get_params(locals())
command = "az acr helm list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(name, version=None, repository=None, resource_group=None, suffix=None, username=None, password=None):
params = get_params(locals())
command = "az acr helm show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(name, version=None, repository=None, resource_group=None, suffix=None, username=None, password=None, prov=None, yes=None):
params = get_params(locals())
command = "az acr helm delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def push(name, repository=None, force=None, resource_group=None, suffix=None, username=None, password=None):
params = get_params(locals())
command = "az acr helm push " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def install_cli(client_version=None, install_location=None, yes=None):
params = get_params(locals())
command = "az acr helm install-cli " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 36.662162
| 133
| 0.669001
|
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def list(name, repository=None, resource_group=None, suffix=None, username=None, password=None):
params = get_params(locals())
command = "az acr helm list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(name, version=None, repository=None, resource_group=None, suffix=None, username=None, password=None):
params = get_params(locals())
command = "az acr helm show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(name, version=None, repository=None, resource_group=None, suffix=None, username=None, password=None, prov=None, yes=None):
params = get_params(locals())
command = "az acr helm delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def push(name, repository=None, force=None, resource_group=None, suffix=None, username=None, password=None):
params = get_params(locals())
command = "az acr helm push " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def install_cli(client_version=None, install_location=None, yes=None):
params = get_params(locals())
command = "az acr helm install-cli " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| true
| true
|
f714a4d032c0d9c04d96fb38385edc266e67dc16
| 516
|
py
|
Python
|
worker/Facebook/service1/main.py
|
OmarZOS/remote-extraction-proxy-and-worker
|
739466a0df588d7eb5b1dae9666ceb8c7a25e928
|
[
"MIT"
] | null | null | null |
worker/Facebook/service1/main.py
|
OmarZOS/remote-extraction-proxy-and-worker
|
739466a0df588d7eb5b1dae9666ceb8c7a25e928
|
[
"MIT"
] | 10
|
2022-03-17T23:23:18.000Z
|
2022-03-18T00:15:11.000Z
|
worker/Facebook/service1/main.py
|
OmarZOS/remote-extraction-proxy-and-worker
|
739466a0df588d7eb5b1dae9666ceb8c7a25e928
|
[
"MIT"
] | 1
|
2022-03-24T23:56:46.000Z
|
2022-03-24T23:56:46.000Z
|
from http import cookies
from Extractor import Extractor
from context import Context
import networkx as nx
from facebook_scraper import get_posts,get_friends,get_profile,get_group_info
cxt=Context(account,creds,limit_post,limit_friends,max,post,False,True)
#print(get_profile("100009975842374"))
#print(get_group_info("journalmaracanaalgerie") )
ex =Extractor('Fb',cxt,Schema,cookie)
ex.create_Graphe_friends(file_graphe,cxt,Schema,cookie)
#ex.create_Graphe_group(file_graphe,cxt,Schema,cookies)
| 17.793103
| 77
| 0.813953
|
from http import cookies
from Extractor import Extractor
from context import Context
import networkx as nx
from facebook_scraper import get_posts,get_friends,get_profile,get_group_info
cxt=Context(account,creds,limit_post,limit_friends,max,post,False,True)
ex =Extractor('Fb',cxt,Schema,cookie)
ex.create_Graphe_friends(file_graphe,cxt,Schema,cookie)
| true
| true
|
f714a58617a8965d0d4cc18d282b5dffc7518090
| 4,801
|
py
|
Python
|
main.py
|
ammar-khan/raspberry-pi-opencv-dnn-face-detection
|
04ea998ee9e4d7bf71da022b0d8613940e8e7cfb
|
[
"MIT"
] | 3
|
2018-10-25T05:01:13.000Z
|
2021-01-22T11:29:15.000Z
|
main.py
|
ammar-khan/raspberry-pi-opencv-dnn-face-detection
|
04ea998ee9e4d7bf71da022b0d8613940e8e7cfb
|
[
"MIT"
] | null | null | null |
main.py
|
ammar-khan/raspberry-pi-opencv-dnn-face-detection
|
04ea998ee9e4d7bf71da022b0d8613940e8e7cfb
|
[
"MIT"
] | 1
|
2019-08-24T19:22:04.000Z
|
2019-08-24T19:22:04.000Z
|
##
# Copyright 2018, Ammar Ali Khan
# Licensed under MIT.
# Since: v1.0.0
##
import time
import cv2
import numpy as np
from src.common.package.config import application
from src.opencv.package.config import application as _application
from src.common.package.http import server as _server
from src.common.package.http.handler import Handler
from src.common.package.camera.capture import Capture as _capture
from src.common.package.frame.action import Action as _frame
from src.common.package.frame.draw import Draw as _draw
from src.opencv.package.opencv.opencv import OpenCV
# Constant
_opencv = OpenCV()
##
# StreamHandler class - inherit Handler
# This class provide handler for HTTP streaming
# Note: this class should override Handler.stream
##
class StreamHandler(Handler):
##
# Override method Handler.stream()
##
def stream(self):
Handler.stream(self)
print('[INFO] Overriding stream method...')
# Initialise capture
capture = _capture(src=application.CAPTURING_DEVICE,
use_pi_camera=application.USE_PI_CAMERA,
resolution=application.RESOLUTION,
frame_rate=application.FRAME_RATE)
if application.USE_PI_CAMERA:
print('[INFO] Warming up pi camera...')
else:
print('[INFO] Warming up camera...')
time.sleep(2.0)
print('[INFO] Start capturing...')
while True:
# Read a frame from capture
frame = capture.read()
# Down size frame to 50% (to increase performance on Raspberry Pi)
# frame = _frame.scale(frame=frame, scale=0.5)
# Convert frame to gray (to increase performance on Raspberry Pi)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Get frame dimensions
(height, width) = frame.shape[:2]
# OpenCV detection
detections = _opencv.dnn_face_detector(frame=frame,
scale_factor=1.0,
size=(300, 300),
mean=(104.0, 177.0, 123.0))
# Up size frame to 50% (how the frame was before down sizing)
# frame = _frame.scale(frame=frame, scale=2)
# If returns any detection
for i in range(0, detections.shape[2]):
# Get confidence associated with the detection
confidence = detections[0, 0, i, 2]
# Filter weak detection
if confidence < _application.CONFIDENCE:
continue
# Calculate coordinates
box = detections[0, 0, i, 3:7] * np.array([width,
height,
width,
height])
(left, top, right, bottom) = box.astype('int')
coordinates = {'left': left,
'top': top,
'right': right,
'bottom': bottom}
text = "{:.2f}%".format(confidence * 100)
frame = _draw.rectangle(frame=frame,
coordinates=coordinates,
text=text)
# Write date time on the frame
frame = _draw.text(frame=frame,
coordinates={'left': application.WIDTH - 150, 'top': application.HEIGHT - 20},
text=time.strftime('%d/%m/%Y %H:%M:%S', time.localtime()),
font_color=(0, 0, 255))
# Convert frame into buffer for streaming
retval, buffer = cv2.imencode('.jpg', frame)
# Write buffer to HTML Handler
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(buffer))
self.end_headers()
self.wfile.write(buffer)
self.wfile.write(b'\r\n')
##
# Method main()
##
def main():
try:
address = ('', application.HTTP_PORT)
server = _server.Server(address, StreamHandler)
print('[INFO] HTTP server started successfully at %s' % str(server.server_address))
print('[INFO] Waiting for client to connect to port %s' % str(application.HTTP_PORT))
server.serve_forever()
except Exception as e:
server.socket.close()
print('[INFO] HTTP server closed successfully.')
print('[ERROR] Exception: %s' % str(e))
if __name__ == '__main__':
main()
| 34.789855
| 109
| 0.534264
|
import time
import cv2
import numpy as np
from src.common.package.config import application
from src.opencv.package.config import application as _application
from src.common.package.http import server as _server
from src.common.package.http.handler import Handler
from src.common.package.camera.capture import Capture as _capture
from src.common.package.frame.action import Action as _frame
from src.common.package.frame.draw import Draw as _draw
from src.opencv.package.opencv.opencv import OpenCV
_opencv = OpenCV()
class StreamHandler(Handler):
def stream(self):
Handler.stream(self)
print('[INFO] Overriding stream method...')
capture = _capture(src=application.CAPTURING_DEVICE,
use_pi_camera=application.USE_PI_CAMERA,
resolution=application.RESOLUTION,
frame_rate=application.FRAME_RATE)
if application.USE_PI_CAMERA:
print('[INFO] Warming up pi camera...')
else:
print('[INFO] Warming up camera...')
time.sleep(2.0)
print('[INFO] Start capturing...')
while True:
frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
(height, width) = frame.shape[:2]
detections = _opencv.dnn_face_detector(frame=frame,
scale_factor=1.0,
size=(300, 300),
mean=(104.0, 177.0, 123.0))
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence < _application.CONFIDENCE:
continue
box = detections[0, 0, i, 3:7] * np.array([width,
height,
width,
height])
(left, top, right, bottom) = box.astype('int')
coordinates = {'left': left,
'top': top,
'right': right,
'bottom': bottom}
text = "{:.2f}%".format(confidence * 100)
frame = _draw.rectangle(frame=frame,
coordinates=coordinates,
text=text)
frame = _draw.text(frame=frame,
coordinates={'left': application.WIDTH - 150, 'top': application.HEIGHT - 20},
text=time.strftime('%d/%m/%Y %H:%M:%S', time.localtime()),
font_color=(0, 0, 255))
retval, buffer = cv2.imencode('.jpg', frame)
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(buffer))
self.end_headers()
self.wfile.write(buffer)
self.wfile.write(b'\r\n')
def main():
try:
address = ('', application.HTTP_PORT)
server = _server.Server(address, StreamHandler)
print('[INFO] HTTP server started successfully at %s' % str(server.server_address))
print('[INFO] Waiting for client to connect to port %s' % str(application.HTTP_PORT))
server.serve_forever()
except Exception as e:
server.socket.close()
print('[INFO] HTTP server closed successfully.')
print('[ERROR] Exception: %s' % str(e))
if __name__ == '__main__':
main()
| true
| true
|
f714a5f04300870022842049f2926bc20c34e5c2
| 4,647
|
py
|
Python
|
nova/tests/functional/libvirt/test_live_migration.py
|
muraliselva10/nova
|
97626394bcce5c8cd020b136ca54a6aa919eb3a9
|
[
"Apache-2.0"
] | 1
|
2022-02-24T08:49:48.000Z
|
2022-02-24T08:49:48.000Z
|
nova/tests/functional/libvirt/test_live_migration.py
|
muraliselva10/nova
|
97626394bcce5c8cd020b136ca54a6aa919eb3a9
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/functional/libvirt/test_live_migration.py
|
muraliselva10/nova
|
97626394bcce5c8cd020b136ca54a6aa919eb3a9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from lxml import etree
from nova.tests.functional import integrated_helpers
from nova.tests.functional.libvirt import base as libvirt_base
class LiveMigrationQueuedAbortTest(
libvirt_base.LibvirtMigrationMixin,
libvirt_base.ServersTestBase,
integrated_helpers.InstanceHelperMixin
):
"""Functional test for bug 1949808.
This test is used to confirm that VM's state is reverted properly
when queued Live migration is aborted.
"""
api_major_version = 'v2.1'
microversion = '2.74'
ADMIN_API = True
def setUp(self):
super().setUp()
# We will allow only one live migration to be processed at any
# given period of time
self.flags(max_concurrent_live_migrations='1')
self.src_hostname = self.start_compute(hostname='src')
self.dest_hostname = self.start_compute(hostname='dest')
self.src = self.computes[self.src_hostname]
self.dest = self.computes[self.dest_hostname]
# Live migration's execution could be locked if needed
self.lock_live_migration = threading.Lock()
def _migrate_stub(self, domain, destination, params, flags):
# Execute only if live migration is not locked
with self.lock_live_migration:
self.dest.driver._host.get_connection().createXML(
params['destination_xml'],
'fake-createXML-doesnt-care-about-flags')
conn = self.src.driver._host.get_connection()
# Because migrateToURI3 is spawned in a background thread,
# this method does not block the upper nova layers. Because
# we don't want nova to think the live migration has
# finished until this method is done, the last thing we do
# is make fakelibvirt's Domain.jobStats() return
# VIR_DOMAIN_JOB_COMPLETED.
server = etree.fromstring(
params['destination_xml']
).find('./uuid').text
dom = conn.lookupByUUIDString(server)
dom.complete_job()
def test_queued_live_migration_abort(self):
# Lock live migrations
self.lock_live_migration.acquire()
# Start instances: first one would be used to occupy
# executor's live migration queue, second one would be used
# to actually confirm that queued live migrations are
# aborted properly.
self.server_a = self._create_server(
host=self.src_hostname, networks='none')
self.server_b = self._create_server(
host=self.src_hostname, networks='none')
# Issue live migration requests for both servers. We expect that
# server_a live migration would be running, but locked by
# self.lock_live_migration and server_b live migration would be
# queued.
self._live_migrate(
self.server_a,
migration_expected_state='running',
server_expected_state='MIGRATING'
)
self._live_migrate(
self.server_b,
migration_expected_state='queued',
server_expected_state='MIGRATING'
)
# Abort live migration for server_b
serverb_migration = self.api.api_get(
'/os-migrations?instance_uuid=%s' % self.server_b['id']
).body['migrations'].pop()
self.api.api_delete(
'/servers/%s/migrations/%s' % (self.server_b['id'],
serverb_migration['id']))
self._wait_for_migration_status(self.server_b, ['cancelled'])
# Unlock live migrations and confirm that server_a becomes
# active again after successful live migration
self.lock_live_migration.release()
self._wait_for_state_change(self.server_a, 'ACTIVE')
# FIXME(artom) Assert the server_b never comes out of 'MIGRATING'
self.assertRaises(
AssertionError,
self._wait_for_state_change, self.server_b, 'ACTIVE')
self._wait_for_state_change(self.server_b, 'MIGRATING')
| 39.381356
| 75
| 0.663869
|
import threading
from lxml import etree
from nova.tests.functional import integrated_helpers
from nova.tests.functional.libvirt import base as libvirt_base
class LiveMigrationQueuedAbortTest(
libvirt_base.LibvirtMigrationMixin,
libvirt_base.ServersTestBase,
integrated_helpers.InstanceHelperMixin
):
api_major_version = 'v2.1'
microversion = '2.74'
ADMIN_API = True
def setUp(self):
super().setUp()
self.flags(max_concurrent_live_migrations='1')
self.src_hostname = self.start_compute(hostname='src')
self.dest_hostname = self.start_compute(hostname='dest')
self.src = self.computes[self.src_hostname]
self.dest = self.computes[self.dest_hostname]
self.lock_live_migration = threading.Lock()
def _migrate_stub(self, domain, destination, params, flags):
# Execute only if live migration is not locked
with self.lock_live_migration:
self.dest.driver._host.get_connection().createXML(
params['destination_xml'],
'fake-createXML-doesnt-care-about-flags')
conn = self.src.driver._host.get_connection()
# Because migrateToURI3 is spawned in a background thread,
# this method does not block the upper nova layers. Because
# we don't want nova to think the live migration has
# VIR_DOMAIN_JOB_COMPLETED.
server = etree.fromstring(
params['destination_xml']
).find('./uuid').text
dom = conn.lookupByUUIDString(server)
dom.complete_job()
def test_queued_live_migration_abort(self):
# Lock live migrations
self.lock_live_migration.acquire()
# Start instances: first one would be used to occupy
# executor's live migration queue, second one would be used
self.server_a = self._create_server(
host=self.src_hostname, networks='none')
self.server_b = self._create_server(
host=self.src_hostname, networks='none')
self._live_migrate(
self.server_a,
migration_expected_state='running',
server_expected_state='MIGRATING'
)
self._live_migrate(
self.server_b,
migration_expected_state='queued',
server_expected_state='MIGRATING'
)
serverb_migration = self.api.api_get(
'/os-migrations?instance_uuid=%s' % self.server_b['id']
).body['migrations'].pop()
self.api.api_delete(
'/servers/%s/migrations/%s' % (self.server_b['id'],
serverb_migration['id']))
self._wait_for_migration_status(self.server_b, ['cancelled'])
self.lock_live_migration.release()
self._wait_for_state_change(self.server_a, 'ACTIVE')
self.assertRaises(
AssertionError,
self._wait_for_state_change, self.server_b, 'ACTIVE')
self._wait_for_state_change(self.server_b, 'MIGRATING')
| true
| true
|
f714a5ff5c93a84a57edfda15f6f4e42f0eb012f
| 175
|
py
|
Python
|
run.py
|
B02902008/TaipeiWater
|
7364ce0bdfafddb7448cd8943c0c048f1a199dda
|
[
"MIT"
] | null | null | null |
run.py
|
B02902008/TaipeiWater
|
7364ce0bdfafddb7448cd8943c0c048f1a199dda
|
[
"MIT"
] | null | null | null |
run.py
|
B02902008/TaipeiWater
|
7364ce0bdfafddb7448cd8943c0c048f1a199dda
|
[
"MIT"
] | null | null | null |
from app import app
if __name__ == '__main__':
context = ('/etc/ssl/certificate.crt', '/etc/ssl/private.key')
app.run(host='0.0.0.0', port=8443, ssl_context=context)
| 29.166667
| 66
| 0.668571
|
from app import app
if __name__ == '__main__':
context = ('/etc/ssl/certificate.crt', '/etc/ssl/private.key')
app.run(host='0.0.0.0', port=8443, ssl_context=context)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.