hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ddbaf11f01859eaa081ff72a9bb1921842bfb30e | 117 | py | Python | Python/Tests/TestData/Grammar/ImportStmt.py | nanshuiyu/pytools | 9f9271fe8cf564b4f94e9456d400f4306ea77c23 | [
"Apache-2.0"
] | null | null | null | Python/Tests/TestData/Grammar/ImportStmt.py | nanshuiyu/pytools | 9f9271fe8cf564b4f94e9456d400f4306ea77c23 | [
"Apache-2.0"
] | null | null | null | Python/Tests/TestData/Grammar/ImportStmt.py | nanshuiyu/pytools | 9f9271fe8cf564b4f94e9456d400f4306ea77c23 | [
"Apache-2.0"
] | null | null | null | import sys
import sys, fob
import sys as oar
import sys as oar, fob as baz
import sys.fob
import sys.fob as oar | 19.5 | 30 | 0.735043 |
a3f38d52523ac75c38247f1a9c63ea51abd899ca | 495 | py | Python | stuff/accum.py | Code-JD/pytest-tutorial | c142358cb7571d65ca8844bc91d9b921c0868a50 | [
"MIT"
] | null | null | null | stuff/accum.py | Code-JD/pytest-tutorial | c142358cb7571d65ca8844bc91d9b921c0868a50 | [
"MIT"
] | null | null | null | stuff/accum.py | Code-JD/pytest-tutorial | c142358cb7571d65ca8844bc91d9b921c0868a50 | [
"MIT"
] | null | null | null | """
This module contains a basic accumulator class.
It's purpose is to show how to use the pytest framework for testing classes.
"""
# --------------------------------------------------------------------------------------
# Class: Accumulator
# --------------------------------------------------------------------------------------
class Accumulator:
def __init__(self):
self._count = 0
@property
def count(self):
return self._count
def add(self, more=1):
self._count += more
| 23.571429 | 89 | 0.452525 |
90517269f2ea344544ac093e3b22d6e2181509fc | 1,576 | py | Python | src/doc/hu/a_tour_of_sage/conf.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | src/doc/hu/a_tour_of_sage/conf.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | src/doc/hu/a_tour_of_sage/conf.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | # nodoctest
# Numerical Sage documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 6 11:08:04 2008.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the
# namespace that aren't pickleable (module imports are okay, they're
# removed automatically).
#
# All configuration values have a default; values that are commented
# out serve to show the default.
from sage_docbuild.conf import release
from sage_docbuild.conf import * # NOQA
# Add any paths that contain custom static files (such as style sheets),
# relative to this directory to html_static_path. They are copied after the
# builtin static files, so a file named "default.css" will overwrite the
# builtin "default.css". html_common_static_path imported from sage_docbuild.conf
# contains common paths.
html_static_path = [] + html_common_static_path
# General information about the project.
project = 'A Sage bemutatása'
name = 'a_tour_of_sage'
language = 'hu'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = project + " v" + release
html_short_title = project + " v" + release
# Output file base name for HTML help builder.
htmlhelp_basename = name
# Grouping the document tree into LaTeX files. List of tuples (source
# start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', name + '.tex', 'A Tour Of Sage',
'The Sage Development Team', 'manual'),
]
| 35.022222 | 81 | 0.752538 |
2ec2bb9a4b8d4b302e48987115ab8849d70b8179 | 524 | py | Python | simio/app/default_config.py | RB387/simio | f799a08b0dc8871d6fc5eebe4e8635881721b511 | [
"Apache-2.0"
] | null | null | null | simio/app/default_config.py | RB387/simio | f799a08b0dc8871d6fc5eebe4e8635881721b511 | [
"Apache-2.0"
] | null | null | null | simio/app/default_config.py | RB387/simio | f799a08b0dc8871d6fc5eebe4e8635881721b511 | [
"Apache-2.0"
] | null | null | null | import os
from typing import Dict, Any
from tzlocal import get_localzone
from simio.app.config_names import AppConfig
def get_default_config() -> Dict[Any, Any]:
return {
AppConfig: {
AppConfig.version: "0.1.0",
AppConfig.autogen_swagger: True,
AppConfig.enable_swagger: True,
AppConfig.timezone: get_localzone(),
AppConfig.swagger_config: {
"config_path": os.path.join(os.getcwd(), "swagger.json"),
},
},
}
| 24.952381 | 73 | 0.597328 |
4d49c8018f69f35b0b044495ee4aaaa079fdcf56 | 4,677 | py | Python | spider/adt/course/__init__.py | UAws/myadelaide-timetable-synchronizer | 8cee9c895a7916cd035785cd2fa6f0a3a3d2235d | [
"MIT"
] | 6 | 2022-02-25T06:59:06.000Z | 2022-02-27T09:29:32.000Z | spider/adt/course/__init__.py | UAws/myadelaide-timetable-synchronizer | 8cee9c895a7916cd035785cd2fa6f0a3a3d2235d | [
"MIT"
] | null | null | null | spider/adt/course/__init__.py | UAws/myadelaide-timetable-synchronizer | 8cee9c895a7916cd035785cd2fa6f0a3a3d2235d | [
"MIT"
] | 2 | 2022-02-25T07:52:57.000Z | 2022-02-28T10:27:43.000Z | # Code generated by jtd-codegen for Python v0.3.1
import re
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from typing import Any, Dict, Optional, Union, get_args, get_origin
@dataclass
class Course:
a_emplid: 'str'
a_strm: 'str'
b_catalog_nbr: 'str'
b_crse_id: 'str'
b_descr: 'str'
b_subject: 'str'
c_sort_order: 'str'
c_weekday_name: 'str'
d_xlatlongname: 'str'
date: 'str'
e_descr: 'str'
e_room: 'str'
end_time: 'str'
f_descr: 'str'
start_time: 'str'
attr_rownumber: 'str'
@classmethod
def from_json_data(cls, data: Any) -> 'Course':
return cls(
_from_json_data(str, data.get("A.EMPLID")),
_from_json_data(str, data.get("A.STRM")),
_from_json_data(str, data.get("B.CATALOG_NBR")),
_from_json_data(str, data.get("B.CRSE_ID")),
_from_json_data(str, data.get("B.DESCR")),
_from_json_data(str, data.get("B.SUBJECT")),
_from_json_data(str, data.get("C.SORT_ORDER")),
_from_json_data(str, data.get("C.WEEKDAY_NAME")),
_from_json_data(str, data.get("D.XLATLONGNAME")),
_from_json_data(str, data.get("DATE")),
_from_json_data(str, data.get("E.DESCR")),
_from_json_data(str, data.get("E.ROOM")),
_from_json_data(str, data.get("END_TIME")),
_from_json_data(str, data.get("F.DESCR")),
_from_json_data(str, data.get("START_TIME")),
_from_json_data(str, data.get("attr:rownumber")),
)
def to_json_data(self) -> Any:
data: Dict[str, Any] = {}
data["A.EMPLID"] = _to_json_data(self.a_emplid)
data["A.STRM"] = _to_json_data(self.a_strm)
data["B.CATALOG_NBR"] = _to_json_data(self.b_catalog_nbr)
data["B.CRSE_ID"] = _to_json_data(self.b_crse_id)
data["B.DESCR"] = _to_json_data(self.b_descr)
data["B.SUBJECT"] = _to_json_data(self.b_subject)
data["C.SORT_ORDER"] = _to_json_data(self.c_sort_order)
data["C.WEEKDAY_NAME"] = _to_json_data(self.c_weekday_name)
data["D.XLATLONGNAME"] = _to_json_data(self.d_xlatlongname)
data["DATE"] = _to_json_data(self.date)
data["E.DESCR"] = _to_json_data(self.e_descr)
data["E.ROOM"] = _to_json_data(self.e_room)
data["END_TIME"] = _to_json_data(self.end_time)
data["F.DESCR"] = _to_json_data(self.f_descr)
data["START_TIME"] = _to_json_data(self.start_time)
data["attr:rownumber"] = _to_json_data(self.attr_rownumber)
return data
def _from_json_data(cls: Any, data: Any) -> Any:
if data is None or cls in [bool, int, float, str, object] or cls is Any:
return data
if cls is datetime:
return _parse_rfc3339(data)
if get_origin(cls) is Union:
return _from_json_data(get_args(cls)[0], data)
if get_origin(cls) is list:
return [_from_json_data(get_args(cls)[0], d) for d in data]
if get_origin(cls) is dict:
return { k: _from_json_data(get_args(cls)[1], v) for k, v in data.items() }
return cls.from_json_data(data)
def _to_json_data(data: Any) -> Any:
if data is None or type(data) in [bool, int, float, str, object]:
return data
if type(data) is datetime:
return data.isoformat()
if type(data) is list:
return [_to_json_data(d) for d in data]
if type(data) is dict:
return { k: _to_json_data(v) for k, v in data.items() }
return data.to_json_data()
def _parse_rfc3339(s: str) -> datetime:
datetime_re = '^(\d{4})-(\d{2})-(\d{2})[tT](\d{2}):(\d{2}):(\d{2})(\.\d+)?([zZ]|((\+|-)(\d{2}):(\d{2})))$'
match = re.match(datetime_re, s)
if not match:
raise ValueError('Invalid RFC3339 date/time', s)
(year, month, day, hour, minute, second, frac_seconds, offset,
*tz) = match.groups()
frac_seconds_parsed = None
if frac_seconds:
frac_seconds_parsed = int(float(frac_seconds) * 1_000_000)
else:
frac_seconds_parsed = 0
tzinfo = None
if offset == 'Z':
tzinfo = timezone.utc
else:
hours = int(tz[2])
minutes = int(tz[3])
sign = 1 if tz[1] == '+' else -1
if minutes not in range(60):
raise ValueError('minute offset must be in 0..59')
tzinfo = timezone(timedelta(minutes=sign * (60 * hours + minutes)))
second_parsed = int(second)
if second_parsed == 60:
second_parsed = 59
return datetime(int(year), int(month), int(day), int(hour), int(minute),
second_parsed, frac_seconds_parsed, tzinfo)
| 36.826772 | 110 | 0.613427 |
7c5665002886b8c71a2ccf29a190d5e6b701e0dc | 519 | py | Python | plotly/validators/scattergl/marker/_cauto.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/scattergl/marker/_cauto.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/scattergl/marker/_cauto.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class CautoValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name='cauto', parent_name='scattergl.marker', **kwargs
):
super(CautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
implied_edits=kwargs.pop('implied_edits', {}),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 30.529412 | 75 | 0.628131 |
6d3a204cf412d5728b7ddb6fcfa2e9600054a9f4 | 2,503 | py | Python | tests/unit_tests/test_engine.py | RussTheAerialist/render_engine | 426184c652bf5d2f812656f195e8b89827af33ff | [
"MIT"
] | null | null | null | tests/unit_tests/test_engine.py | RussTheAerialist/render_engine | 426184c652bf5d2f812656f195e8b89827af33ff | [
"MIT"
] | null | null | null | tests/unit_tests/test_engine.py | RussTheAerialist/render_engine | 426184c652bf5d2f812656f195e8b89827af33ff | [
"MIT"
] | null | null | null | from render_engine import Engine
import pytest
@pytest.fixture()
def base_engine(site_url):
return Engine(site_url=site_url, routes=[])
def test_engine_has_internal_env():
"""This ensures that each Engine instance has its own environment.
This allows you have different settings apply to each engine"""
engine1 = Engine()
engine2 = Engine()
engine1.env.name = 'engine1'
engine2.env.name = 'engine2'
assert engine1.env.name == 'engine1'
assert engine2.env.name == 'engine2'
def test_engine_kwargs_become_environment_global_properties():
"""Pass all wildcard kwargs into the environment as global variables to use
in templates"""
engine = Engine(custom_val='custom')
assert engine.env.globals['custom_val'] == 'custom'
def test_engine_route_adds_route_items(base_engine):
"""Each engine starts with no routes and routes are added with the @route
decorator"""
assert not base_engine.routes
@base_engine.route('about', './pages/about')
def about():
pass
assert base_engine.routes[0].slug == 'about'
def test_engine_config_path_added_to_env(mocker):
"""When a config_path is provided parse the yaml file and add it to configs
and further the environment globals"""
custom_val='''Engine:
Environment:
CUSTOM_KEY: CUSTOM_VALUE'''
mocker.patch(
'pathlib.Path.read_text',
return_value=custom_val,
)
env = Engine(config_path="config.yaml").env.globals
assert env['CUSTOM_KEY'] == 'CUSTOM_VALUE'
def test_engine_build_collection(mocker, base_engine, base_collection):
"""Setup a Collection using the build_collection decorator"""
assert len(base_engine.pages) == 0
@base_engine.collection('/collection', pages=(base_collection.pages))
def sample_collection():
pass
assert len(base_engine.pages) == 3
def test_engine_has_default_base_content_path():
"""If no base content path is presented a default content path of 'content'
is set. This is set even because a Collection with no pages defined MUST
have a content_path set"""
env = Engine()
assert env.base_content_path == 'content'
def test_engine_default_base_content_path_can_be_overridden():
"""If content_path is presented when the engine is initialized it can
overwrite the default content_path."""
env = Engine(content_path='override_the_content_path')
assert env.base_content_path == 'override_the_content_path'
| 33.373333 | 79 | 0.717139 |
ea9723b6dc64cf04b92b7d511576ca8f7cd2d928 | 3,285 | py | Python | openaerostruct/tests/test_v1_struct_analysis.py | mdolab/OpenAeroStruct | a10a673ec0c0fd7e4c41b8ec39b856606ce7ec78 | [
"Apache-2.0"
] | 114 | 2017-04-06T15:24:19.000Z | 2022-03-21T09:57:43.000Z | openaerostruct/tests/test_v1_struct_analysis.py | mdolab/OpenAeroStruct | a10a673ec0c0fd7e4c41b8ec39b856606ce7ec78 | [
"Apache-2.0"
] | 322 | 2017-04-07T01:40:03.000Z | 2022-03-17T21:50:52.000Z | openaerostruct/tests/test_v1_struct_analysis.py | mdolab/OpenAeroStruct | a10a673ec0c0fd7e4c41b8ec39b856606ce7ec78 | [
"Apache-2.0"
] | 83 | 2017-04-06T16:53:26.000Z | 2022-03-19T19:34:05.000Z | from openmdao.utils.assert_utils import assert_rel_error
import unittest
import numpy as np
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.structures.struct_groups import SpatialBeamAlone
import openmdao.api as om
class Test(unittest.TestCase):
def test(self):
# Create a dictionary to store options about the surface
mesh_dict = {
"num_y": 5,
"num_x": 3,
"wing_type": "rect",
"symmetry": True,
"span_cos_spacing": 1.0,
"span": 10,
"chord": 1,
}
mesh = generate_mesh(mesh_dict)
surf_dict = {
# Wing definition
"name": "wing", # name of the surface
"symmetry": True, # if true, model one half of wing
# reflected across the plane y = 0
"fem_model_type": "tube",
"mesh": mesh,
# Structural values are based on aluminum 7075
"E": 70.0e9, # [Pa] Young's modulus of the spar
"G": 30.0e9, # [Pa] shear modulus of the spar
"yield": 500.0e6 / 2.5, # [Pa] yield stress divided by 2.5 for limiting case
"mrho": 3.0e3, # [kg/m^3] material density
"fem_origin": 0.35, # normalized chordwise location of the spar
"t_over_c_cp": np.array([0.15]), # maximum airfoil thickness
"thickness_cp": np.ones((3)) * 0.0075,
"wing_weight_ratio": 1.0,
"struct_weight_relief": False, # True to add the weight of the structure to the loads on the structure
"distributed_fuel_weight": False,
"exact_failure_constraint": False,
}
# Create the problem and assign the model group
prob = om.Problem()
ny = surf_dict["mesh"].shape[1]
loads = np.zeros((ny, 6))
loads[0, 2] = 1e4
indep_var_comp = om.IndepVarComp()
indep_var_comp.add_output("loads", val=loads, units="N")
indep_var_comp.add_output("load_factor", val=1.0)
struct_group = SpatialBeamAlone(surface=surf_dict)
# Add indep_vars to the structural group
struct_group.add_subsystem("indep_vars", indep_var_comp, promotes=["*"])
prob.model.add_subsystem(surf_dict["name"], struct_group)
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options["disp"] = True
# Setup problem and add design variables, constraint, and objective
prob.model.add_design_var("wing.thickness_cp", lower=0.01, upper=0.5, scaler=1e2)
prob.model.add_constraint("wing.failure", upper=0.0)
prob.model.add_constraint("wing.thickness_intersects", upper=0.0)
# Add design variables, constraisnt, and objective on the problem
prob.model.add_objective("wing.structural_mass", scaler=1e-5)
# Set up the problem
prob.setup()
# om.view_model(prob)
prob.run_model()
assert_rel_error(self, prob["wing.structural_mass"][0], 100.727314456, 1e-4)
assert_rel_error(self, prob["wing.disp"][0, 2], 0.696503988153, 1e-6)
np.testing.assert_allclose(prob["wing.disp"][1, :], np.array([-0.0, 0.0, 0.39925232, -0.19102602, 0.0, 0.0]))
if __name__ == "__main__":
unittest.main()
| 36.098901 | 117 | 0.61035 |
ed8a30afd83b2830831ae80c48cad45569a0b4e3 | 18,014 | py | Python | ibis/backends/bigquery/tests/test_compiler.py | ZeroCool2u/ibis | 19d152fa97f828ad70e4cd46f3cd7d2bae27eb64 | [
"Apache-2.0"
] | null | null | null | ibis/backends/bigquery/tests/test_compiler.py | ZeroCool2u/ibis | 19d152fa97f828ad70e4cd46f3cd7d2bae27eb64 | [
"Apache-2.0"
] | null | null | null | ibis/backends/bigquery/tests/test_compiler.py | ZeroCool2u/ibis | 19d152fa97f828ad70e4cd46f3cd7d2bae27eb64 | [
"Apache-2.0"
] | null | null | null | import datetime
import pandas as pd
import pytest
import ibis
import ibis.backends.bigquery as bq
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
from ibis.expr.types import TableExpr
pytestmark = pytest.mark.bigquery
pytest.importorskip('google.cloud.bigquery')
def test_timestamp_accepts_date_literals(alltypes, project_id):
date_string = '2009-03-01'
param = ibis.param(dt.timestamp).name('param_0')
expr = alltypes.mutate(param=param)
params = {param: date_string}
result = expr.compile(params=params)
expected = f"""\
SELECT *, @param AS `param`
FROM `{project_id}.testing.functional_alltypes`"""
assert result == expected
@pytest.mark.parametrize(
('distinct', 'expected_keyword'), [(True, 'DISTINCT'), (False, 'ALL')]
)
def test_union(alltypes, distinct, expected_keyword, project_id):
expr = alltypes.union(alltypes, distinct=distinct)
result = expr.compile()
expected = f"""\
SELECT *
FROM `{project_id}.testing.functional_alltypes`
UNION {expected_keyword}
SELECT *
FROM `{project_id}.testing.functional_alltypes`"""
assert result == expected
def test_ieee_divide(alltypes, project_id):
expr = alltypes.double_col / 0
result = expr.compile()
expected = f"""\
SELECT IEEE_DIVIDE(`double_col`, 0) AS `tmp`
FROM `{project_id}.testing.functional_alltypes`"""
assert result == expected
def test_identical_to(alltypes, project_id):
t = alltypes
pred = t.string_col.identical_to('a') & t.date_string_col.identical_to('b')
expr = t[pred]
result = expr.compile()
expected = f"""\
SELECT *
FROM `{project_id}.testing.functional_alltypes`
WHERE (((`string_col` IS NULL) AND ('a' IS NULL)) OR (`string_col` = 'a')) AND
(((`date_string_col` IS NULL) AND ('b' IS NULL)) OR (`date_string_col` = 'b'))""" # noqa: E501
assert result == expected
@pytest.mark.parametrize('timezone', [None, 'America/New_York'])
def test_to_timestamp(alltypes, timezone, project_id):
expr = alltypes.date_string_col.to_timestamp('%F', timezone)
result = expr.compile()
if timezone:
expected = f"""\
SELECT PARSE_TIMESTAMP('%F', `date_string_col`, 'America/New_York') AS `tmp`
FROM `{project_id}.testing.functional_alltypes`"""
else:
expected = f"""\
SELECT PARSE_TIMESTAMP('%F', `date_string_col`) AS `tmp`
FROM `{project_id}.testing.functional_alltypes`"""
assert result == expected
@pytest.mark.parametrize(
('case', 'expected', 'dtype'),
[
(datetime.date(2017, 1, 1), "DATE '2017-01-01'", dt.date),
(pd.Timestamp('2017-01-01'), "DATE '2017-01-01'", dt.date,),
('2017-01-01', "DATE '2017-01-01'", dt.date),
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
],
)
def test_literal_date(case, expected, dtype):
expr = ibis.literal(case, type=dtype).year()
result = ibis.bigquery.compile(expr)
assert result == f"SELECT EXTRACT(year from {expected}) AS `tmp`"
@pytest.mark.parametrize(
('case', 'expected', 'dtype', 'strftime_func'),
[
(
datetime.date(2017, 1, 1),
"DATE '2017-01-01'",
dt.date,
'FORMAT_DATE',
),
(
pd.Timestamp('2017-01-01'),
"DATE '2017-01-01'",
dt.date,
'FORMAT_DATE',
),
('2017-01-01', "DATE '2017-01-01'", dt.date, 'FORMAT_DATE',),
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
'FORMAT_TIMESTAMP',
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
'FORMAT_TIMESTAMP',
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
'FORMAT_TIMESTAMP',
),
],
)
def test_day_of_week(case, expected, dtype, strftime_func):
date_var = ibis.literal(case, type=dtype)
expr_index = date_var.day_of_week.index()
result = ibis.bigquery.compile(expr_index)
assert (
result
== f"SELECT MOD(EXTRACT(DAYOFWEEK FROM {expected}) + 5, 7) AS `tmp`"
)
expr_name = date_var.day_of_week.full_name()
result = ibis.bigquery.compile(expr_name)
if strftime_func == 'FORMAT_TIMESTAMP':
assert (
result
== f"SELECT {strftime_func}('%A', {expected}, 'UTC') AS `tmp`"
)
else:
assert result == f"SELECT {strftime_func}('%A', {expected}) AS `tmp`"
@pytest.mark.parametrize(
('case', 'expected', 'dtype'),
[
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
(datetime.time(4, 55, 59), "TIME '04:55:59'", dt.time),
('04:55:59', "TIME '04:55:59'", dt.time),
],
)
def test_literal_timestamp_or_time(case, expected, dtype):
expr = ibis.literal(case, type=dtype).hour()
result = ibis.bigquery.compile(expr)
assert result == f"SELECT EXTRACT(hour from {expected}) AS `tmp`"
def test_window_function(alltypes, project_id):
t = alltypes
w1 = ibis.window(
preceding=1, following=0, group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w1))
result = expr.compile()
expected = f"""\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `timestamp_col` ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS `win_avg`
FROM `{project_id}.testing.functional_alltypes`""" # noqa: E501
assert result == expected
w2 = ibis.window(
preceding=0, following=2, group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w2))
result = expr.compile()
expected = f"""\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `timestamp_col` ROWS BETWEEN CURRENT ROW AND 2 FOLLOWING) AS `win_avg`
FROM `{project_id}.testing.functional_alltypes`""" # noqa: E501
assert result == expected
w3 = ibis.window(
preceding=(4, 2), group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w3))
result = expr.compile()
expected = f"""\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `timestamp_col` ROWS BETWEEN 4 PRECEDING AND 2 PRECEDING) AS `win_avg`
FROM `{project_id}.testing.functional_alltypes`""" # noqa: E501
assert result == expected
def test_range_window_function(alltypes, project_id):
t = alltypes
w = ibis.range_window(
preceding=1, following=0, group_by='year', order_by='month'
)
expr = t.mutate(two_month_avg=t.float_col.mean().over(w))
result = expr.compile()
expected = f"""\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `month` RANGE BETWEEN 1 PRECEDING AND CURRENT ROW) AS `two_month_avg`
FROM `{project_id}.testing.functional_alltypes`""" # noqa: E501
assert result == expected
w3 = ibis.range_window(
preceding=(4, 2), group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w3))
result = expr.compile()
expected = f"""\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY UNIX_MICROS(`timestamp_col`) RANGE BETWEEN 4 PRECEDING AND 2 PRECEDING) AS `win_avg`
FROM `{project_id}.testing.functional_alltypes`""" # noqa: E501
assert result == expected
@pytest.mark.parametrize(
('preceding', 'value'),
[
(5, 5),
(ibis.interval(nanoseconds=1), 0.001),
(ibis.interval(microseconds=1), 1),
(ibis.interval(seconds=1), 1000000),
(ibis.interval(minutes=1), 1000000 * 60),
(ibis.interval(hours=1), 1000000 * 60 * 60),
(ibis.interval(days=1), 1000000 * 60 * 60 * 24),
(2 * ibis.interval(days=1), 1000000 * 60 * 60 * 24 * 2),
(ibis.interval(weeks=1), 1000000 * 60 * 60 * 24 * 7),
],
)
def test_trailing_range_window(alltypes, preceding, value, project_id):
t = alltypes
w = ibis.trailing_range_window(
preceding=preceding, order_by=t.timestamp_col
)
expr = t.mutate(win_avg=t.float_col.mean().over(w))
result = expr.compile()
expected = f"""\
SELECT *,
avg(`float_col`) OVER (ORDER BY UNIX_MICROS(`timestamp_col`) RANGE BETWEEN {value} PRECEDING AND CURRENT ROW) AS `win_avg`
FROM `{project_id}.testing.functional_alltypes`""" # noqa: E501
assert result == expected
@pytest.mark.parametrize(
('preceding', 'value'), [(ibis.interval(years=1), None)]
)
def test_trailing_range_window_unsupported(alltypes, preceding, value):
t = alltypes
w = ibis.trailing_range_window(
preceding=preceding, order_by=t.timestamp_col
)
expr = t.mutate(win_avg=t.float_col.mean().over(w))
with pytest.raises(ValueError):
expr.compile()
@pytest.mark.parametrize(
('distinct1', 'distinct2', 'expected1', 'expected2'),
[
(True, True, 'UNION DISTINCT', 'UNION DISTINCT'),
(True, False, 'UNION DISTINCT', 'UNION ALL'),
(False, True, 'UNION ALL', 'UNION DISTINCT'),
(False, False, 'UNION ALL', 'UNION ALL'),
],
)
def test_union_cte(
alltypes, distinct1, distinct2, expected1, expected2, project_id
):
t = alltypes
expr1 = t.group_by(t.string_col).aggregate(metric=t.double_col.sum())
expr2 = expr1.view()
expr3 = expr1.view()
expr = expr1.union(expr2, distinct=distinct1).union(
expr3, distinct=distinct2
)
result = expr.compile()
expected = f"""\
WITH t0 AS (
SELECT `string_col`, sum(`double_col`) AS `metric`
FROM `{project_id}.testing.functional_alltypes`
GROUP BY 1
)
SELECT *
FROM t0
{expected1}
SELECT `string_col`, sum(`double_col`) AS `metric`
FROM `{project_id}.testing.functional_alltypes`
GROUP BY 1
{expected2}
SELECT `string_col`, sum(`double_col`) AS `metric`
FROM `{project_id}.testing.functional_alltypes`
GROUP BY 1"""
assert result == expected
def test_projection_fusion_only_peeks_at_immediate_parent():
schema = [
('file_date', 'timestamp'),
('PARTITIONTIME', 'date'),
('val', 'int64'),
]
table = ibis.table(schema, name='unbound_table')
table = table[table.PARTITIONTIME < ibis.date('2017-01-01')]
table = table.mutate(file_date=table.file_date.cast('date'))
table = table[table.file_date < ibis.date('2017-01-01')]
table = table.mutate(XYZ=table.val * 2)
expr = table.join(table.view())[table]
result = ibis.bigquery.compile(expr)
expected = """\
WITH t0 AS (
SELECT *
FROM unbound_table
WHERE `PARTITIONTIME` < DATE '2017-01-01'
),
t1 AS (
SELECT CAST(`file_date` AS DATE) AS `file_date`, `PARTITIONTIME`, `val`
FROM t0
),
t2 AS (
SELECT t1.*
FROM t1
WHERE t1.`file_date` < DATE '2017-01-01'
),
t3 AS (
SELECT *, `val` * 2 AS `XYZ`
FROM t2
)
SELECT t3.*
FROM t3
CROSS JOIN t3 t4"""
assert result == expected
def test_bool_reducers(alltypes, project_id):
b = alltypes.bool_col
expr = b.mean()
result = expr.compile()
expected = f"""\
SELECT avg(CAST(`bool_col` AS INT64)) AS `mean`
FROM `{project_id}.testing.functional_alltypes`"""
assert result == expected
expr2 = b.sum()
result = expr2.compile()
expected = f"""\
SELECT sum(CAST(`bool_col` AS INT64)) AS `sum`
FROM `{project_id}.testing.functional_alltypes`"""
assert result == expected
def test_bool_reducers_where(alltypes, project_id):
b = alltypes.bool_col
m = alltypes.month
expr = b.mean(where=m > 6)
result = expr.compile()
expected = f"""\
SELECT avg(CASE WHEN `month` > 6 THEN CAST(`bool_col` AS INT64) ELSE NULL END) AS `mean`
FROM `{project_id}.testing.functional_alltypes`""" # noqa: E501
assert result == expected
expr2 = b.sum(where=((m > 6) & (m < 10)))
result = expr2.compile()
expected = f"""\
SELECT sum(CASE WHEN (`month` > 6) AND (`month` < 10) THEN CAST(`bool_col` AS INT64) ELSE NULL END) AS `sum`
FROM `{project_id}.testing.functional_alltypes`""" # noqa: E501
assert result == expected
def test_approx_nunique(alltypes, project_id):
d = alltypes.double_col
expr = d.approx_nunique()
result = expr.compile()
expected = f"""\
SELECT APPROX_COUNT_DISTINCT(`double_col`) AS `approx_nunique`
FROM `{project_id}.testing.functional_alltypes`"""
assert result == expected
b = alltypes.bool_col
m = alltypes.month
expr2 = b.approx_nunique(where=m > 6)
result = expr2.compile()
expected = f"""\
SELECT APPROX_COUNT_DISTINCT(CASE WHEN `month` > 6 THEN `bool_col` ELSE NULL END) AS `approx_nunique`
FROM `{project_id}.testing.functional_alltypes`""" # noqa: E501
assert result == expected
def test_approx_median(alltypes, project_id):
d = alltypes.double_col
expr = d.approx_median()
result = expr.compile()
expected = f"""\
SELECT APPROX_QUANTILES(`double_col`, 2)[OFFSET(1)] AS `approx_median`
FROM `{project_id}.testing.functional_alltypes`"""
assert result == expected
m = alltypes.month
expr2 = d.approx_median(where=m > 6)
result = expr2.compile()
expected = f"""\
SELECT APPROX_QUANTILES(CASE WHEN `month` > 6 THEN `double_col` ELSE NULL END, 2)[OFFSET(1)] AS `approx_median`
FROM `{project_id}.testing.functional_alltypes`""" # noqa: E501
assert result == expected
def test_cov(alltypes, project_id):
d = alltypes.double_col
expr = d.cov(d)
result = expr.compile()
expected = f"""\
SELECT COVAR_SAMP(`double_col`, `double_col`) AS `tmp`
FROM `{project_id}.testing.functional_alltypes`"""
assert result == expected
expr = d.cov(d, how='pop')
result = expr.compile()
expected = f"""\
SELECT COVAR_POP(`double_col`, `double_col`) AS `tmp`
FROM `{project_id}.testing.functional_alltypes`"""
assert result == expected
with pytest.raises(ValueError):
d.cov(d, how='error')
@pytest.mark.parametrize(
('unit', 'expected_unit', 'expected_func'),
[
('Y', 'YEAR', 'TIMESTAMP'),
('Q', 'QUARTER', 'TIMESTAMP'),
('M', 'MONTH', 'TIMESTAMP'),
('W', 'WEEK', 'TIMESTAMP'),
('D', 'DAY', 'TIMESTAMP'),
('h', 'HOUR', 'TIMESTAMP'),
('m', 'MINUTE', 'TIMESTAMP'),
('s', 'SECOND', 'TIMESTAMP'),
('ms', 'MILLISECOND', 'TIMESTAMP'),
('us', 'MICROSECOND', 'TIMESTAMP'),
('Y', 'YEAR', 'DATE'),
('Q', 'QUARTER', 'DATE'),
('M', 'MONTH', 'DATE'),
('W', 'WEEK', 'DATE'),
('D', 'DAY', 'DATE'),
('h', 'HOUR', 'TIME'),
('m', 'MINUTE', 'TIME'),
('s', 'SECOND', 'TIME'),
('ms', 'MILLISECOND', 'TIME'),
('us', 'MICROSECOND', 'TIME'),
],
)
def test_temporal_truncate(unit, expected_unit, expected_func):
t = ibis.table([('a', getattr(dt, expected_func.lower()))], name='t')
expr = t.a.truncate(unit)
result = ibis.bigquery.compile(expr)
expected = f"""\
SELECT {expected_func}_TRUNC(`a`, {expected_unit}) AS `tmp`
FROM t"""
assert result == expected
@pytest.mark.parametrize('kind', ['date', 'time'])
def test_extract_temporal_from_timestamp(kind):
t = ibis.table([('ts', dt.timestamp)], name='t')
expr = getattr(t.ts, kind)()
result = ibis.bigquery.compile(expr)
expected = f"""\
SELECT {kind.upper()}(`ts`) AS `tmp`
FROM t"""
assert result == expected
def test_now():
expr = ibis.now()
result = ibis.bigquery.compile(expr)
expected = 'SELECT CURRENT_TIMESTAMP() AS `tmp`'
assert result == expected
def test_bucket():
t = ibis.table([('value', 'double')], name='t')
buckets = [0, 1, 3]
expr = t.value.bucket(buckets).name('foo')
result = ibis.bigquery.compile(expr)
expected = """\
SELECT
CASE
WHEN (`value` >= 0) AND (`value` < 1) THEN 0
WHEN (`value` >= 1) AND (`value` <= 3) THEN 1
ELSE CAST(NULL AS INT64)
END AS `tmp`
FROM t"""
assert result == expected
@pytest.mark.parametrize(
('kind', 'begin', 'end', 'expected'),
[
('preceding', None, 1, 'UNBOUNDED PRECEDING AND 1 PRECEDING'),
('following', 1, None, '1 FOLLOWING AND UNBOUNDED FOLLOWING'),
],
)
def test_window_unbounded(kind, begin, end, expected):
t = ibis.table([('a', 'int64')], name='t')
kwargs = {kind: (begin, end)}
expr = t.a.sum().over(ibis.window(**kwargs))
result = ibis.bigquery.compile(expr)
assert (
result
== f"""\
SELECT sum(`a`) OVER (ROWS BETWEEN {expected}) AS `tmp`
FROM t"""
)
def test_large_compile():
"""
Tests that compiling a large expression tree finishes
within a reasonable amount of time
"""
num_columns = 20
num_joins = 7
class MockBigQueryClient(bq.BigQueryClient):
def __init__(self):
pass
names = [f"col_{i}" for i in range(num_columns)]
schema = ibis.Schema(names, ['string'] * num_columns)
ibis_client = MockBigQueryClient()
table = TableExpr(
ops.SQLQueryResult("select * from t", schema, ibis_client)
)
for _ in range(num_joins):
table = table.mutate(dummy=ibis.literal(""))
table = table.left_join(table, ["dummy"])[[table]]
start = datetime.datetime.now()
table.compile()
delta = datetime.datetime.now() - start
assert delta.total_seconds() < 10
| 31.112263 | 143 | 0.61974 |
1fa252b38d0f2b644f587058098d8d7f7a3a4621 | 3,668 | py | Python | nipy/modalities/fmri/tests/test_hrf.py | bpinsard/nipy | d49e8292adad6619e3dac710752131b567efe90e | [
"BSD-3-Clause"
] | 236 | 2015-01-09T21:28:37.000Z | 2022-03-27T11:51:58.000Z | nipy/modalities/fmri/tests/test_hrf.py | bpinsard/nipy | d49e8292adad6619e3dac710752131b567efe90e | [
"BSD-3-Clause"
] | 171 | 2015-03-23T00:31:43.000Z | 2021-11-22T12:43:00.000Z | nipy/modalities/fmri/tests/test_hrf.py | bpinsard/nipy | d49e8292adad6619e3dac710752131b567efe90e | [
"BSD-3-Clause"
] | 94 | 2015-02-01T12:39:47.000Z | 2022-01-27T06:38:19.000Z | """ Testing hrf module
"""
from __future__ import absolute_import
from os.path import dirname, join as pjoin
import numpy as np
from scipy.stats import gamma
import scipy.io as sio
from ..hrf import (
gamma_params,
gamma_expr,
lambdify_t,
spm_hrf_compat,
spmt,
dspmt,
ddspmt,
)
from nose.tools import assert_raises
from numpy.testing import assert_almost_equal
def test_gamma():
t = np.linspace(0, 30, 5000)
# make up some numbers
pk_t = 5.0
fwhm = 6.0
# get the estimated parameters
shape, scale, coef = gamma_params(pk_t, fwhm)
# get distribution function
g_exp = gamma_expr(pk_t, fwhm)
# make matching standard distribution
gf = gamma(shape, scale=scale).pdf
# get values
L1t = gf(t)
L2t = lambdify_t(g_exp)(t)
# they are the same bar a scaling factor
nz = np.abs(L1t) > 1e-15
sf = np.mean(L1t[nz] / L2t[nz])
assert_almost_equal(L1t , L2t*sf)
def test_spm_hrf():
# Regression tests for spm hrf, time derivative and dispersion derivative
# Check that absolute values don't change (much) with different dt, and that
# max values are roughly the same and in the same place in time
for dt in 0.1, 0.01, 0.001:
t_vec = np.arange(0, 32, dt)
hrf = spmt(t_vec)
assert_almost_equal(np.max(hrf), 0.21053, 5)
assert_almost_equal(t_vec[np.argmax(hrf)], 5, 2)
dhrf = dspmt(t_vec)
assert_almost_equal(np.max(dhrf), 0.08, 3)
assert_almost_equal(t_vec[np.argmax(dhrf)], 3.3, 1)
dhrf = ddspmt(t_vec)
assert_almost_equal(np.max(dhrf), 0.10, 2)
assert_almost_equal(t_vec[np.argmax(dhrf)], 5.7, 1)
# Test reversed time vector to check that order of time values does not
# affect result
rt_vec = np.arange(0, 32, 0.01)
rhrf = spmt(rt_vec)
assert_almost_equal(np.max(rhrf), 0.21053, 5)
assert_almost_equal(t_vec[np.argmax(hrf)], 5, 2)
def test_spm_hrf_octave():
# Test SPM hrf against output from SPM code running in Octave
my_path = dirname(__file__)
hrfs_path = pjoin(my_path, 'spm_hrfs.mat')
# mat file resulting from make_hrfs.m
hrfs_mat = sio.loadmat(hrfs_path, squeeze_me=True)
params = hrfs_mat['params']
hrfs = hrfs_mat['hrfs']
for i, pvec in enumerate(params):
dt, ppk, upk, pdsp, udsp, rat = pvec
t_vec = np.arange(0, 32.1, dt)
our_hrf = spm_hrf_compat(t_vec,
peak_delay=ppk,
peak_disp=pdsp,
under_delay=upk,
under_disp=udsp,
p_u_ratio=rat)
# Normalize integral to match SPM
assert_almost_equal(our_hrf, hrfs[i])
# Test basis functions
# mat file resulting from get_td_dd.m
bases_path = pjoin(my_path, 'spm_bases.mat')
bases_mat = sio.loadmat(bases_path, squeeze_me=True)
dt = bases_mat['dt']
t_vec = np.arange(0, 32 + dt, dt)
# SPM function divides by sum of values - revert with dt
assert_almost_equal(spmt(t_vec), bases_mat['hrf'] / dt, 4)
assert_almost_equal(dspmt(t_vec), bases_mat['dhrf'] / dt, 4)
assert_almost_equal(ddspmt(t_vec), bases_mat['ddhrf'] / dt, 4)
def test_spm_hrf_errors():
t_vec = np.arange(0, 32)
# All 1s is fine
res = spm_hrf_compat(t_vec, 1, 1, 1, 1)
# 0 or negative raise error for other args
args = [0]
for i in range(4):
assert_raises(ValueError, spm_hrf_compat, t_vec, *args)
args[-1] = -1
assert_raises(ValueError, spm_hrf_compat, t_vec, *args)
args[-1] = 1
args.append(0)
| 32.75 | 80 | 0.629226 |
b68a37d555aa43c8c53daa57d8e7fb68a6a8011f | 1,013 | py | Python | tensorflow_datasets/text/drop/drop_test.py | jvishnuvardhan/datasets | b8e38187058f1221e67c6291b3f29385ebb35fa2 | [
"Apache-2.0"
] | 3,380 | 2018-09-11T05:03:31.000Z | 2022-03-31T20:04:57.000Z | tensorflow_datasets/text/drop/drop_test.py | jvishnuvardhan/datasets | b8e38187058f1221e67c6291b3f29385ebb35fa2 | [
"Apache-2.0"
] | 3,142 | 2018-09-14T10:09:00.000Z | 2022-03-31T18:25:44.000Z | tensorflow_datasets/text/drop/drop_test.py | jvishnuvardhan/datasets | b8e38187058f1221e67c6291b3f29385ebb35fa2 | [
"Apache-2.0"
] | 1,438 | 2018-09-16T13:58:22.000Z | 2022-03-31T11:19:54.000Z | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""drop dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.text.drop import drop
class DropTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for drop dataset."""
DATASET_CLASS = drop.Drop
SPLITS = {
'train': 20, # Number of fake train example
'dev': 19, # Number of fake test example
}
if __name__ == '__main__':
tfds.testing.test_main()
| 30.69697 | 74 | 0.739388 |
4c93329531fdacea25097a01ff6c249044f1d1e4 | 17,482 | py | Python | tests/unit/fake_api.py | kennylajara/docker-py | a48a5a9647761406d66e8271f19fab7fa0c5f582 | [
"Apache-2.0"
] | 5,611 | 2015-01-02T16:46:16.000Z | 2022-03-31T21:49:58.000Z | tests/unit/fake_api.py | sdrees/docker-py | 8595cca8186b5d53c04ef71a1a3db86b7c53b012 | [
"Apache-2.0"
] | 2,176 | 2015-01-01T00:57:56.000Z | 2022-03-31T13:21:54.000Z | tests/unit/fake_api.py | sdrees/docker-py | 8595cca8186b5d53c04ef71a1a3db86b7c53b012 | [
"Apache-2.0"
] | 1,774 | 2015-01-05T12:49:03.000Z | 2022-03-29T13:27:47.000Z | from docker import constants
from . import fake_stat
CURRENT_VERSION = f'v{constants.DEFAULT_DOCKER_API_VERSION}'
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
FAKE_EXEC_ID = 'd5d177f121dc'
FAKE_NETWORK_ID = '33fb6a3462b8'
FAKE_IMAGE_NAME = 'test_image'
FAKE_TARBALL_PATH = '/path/to/tarball'
FAKE_REPO_NAME = 'repo'
FAKE_TAG_NAME = 'tag'
FAKE_FILE_NAME = 'file'
FAKE_URL = 'myurl'
FAKE_PATH = '/path'
FAKE_VOLUME_NAME = 'perfectcherryblossom'
FAKE_NODE_ID = '24ifsmvkjbyhk'
FAKE_SECRET_ID = 'epdyrw4tsi03xy3deu8g8ly6o'
FAKE_SECRET_NAME = 'super_secret'
# Each method is prefixed with HTTP method (get, post...)
# for clarity and readability
def get_fake_version():
status_code = 200
response = {
'ApiVersion': '1.35',
'Arch': 'amd64',
'BuildTime': '2018-01-10T20:09:37.000000000+00:00',
'Components': [{
'Details': {
'ApiVersion': '1.35',
'Arch': 'amd64',
'BuildTime': '2018-01-10T20:09:37.000000000+00:00',
'Experimental': 'false',
'GitCommit': '03596f5',
'GoVersion': 'go1.9.2',
'KernelVersion': '4.4.0-112-generic',
'MinAPIVersion': '1.12',
'Os': 'linux'
},
'Name': 'Engine',
'Version': '18.01.0-ce'
}],
'GitCommit': '03596f5',
'GoVersion': 'go1.9.2',
'KernelVersion': '4.4.0-112-generic',
'MinAPIVersion': '1.12',
'Os': 'linux',
'Platform': {'Name': ''},
'Version': '18.01.0-ce'
}
return status_code, response
def get_fake_info():
status_code = 200
response = {'Containers': 1, 'Images': 1, 'Debug': False,
'MemoryLimit': False, 'SwapLimit': False,
'IPv4Forwarding': True}
return status_code, response
def post_fake_auth():
status_code = 200
response = {'Status': 'Login Succeeded',
'IdentityToken': '9cbaf023786cd7'}
return status_code, response
def get_fake_ping():
return 200, "OK"
def get_fake_search():
status_code = 200
response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
return status_code, response
def get_fake_images():
status_code = 200
response = [{
'Id': FAKE_IMAGE_ID,
'Created': '2 days ago',
'Repository': 'busybox',
'RepoTags': ['busybox:latest', 'busybox:1.0'],
}]
return status_code, response
def get_fake_image_history():
status_code = 200
response = [
{
"Id": "b750fe79269d",
"Created": 1364102658,
"CreatedBy": "/bin/bash"
},
{
"Id": "27cf78414709",
"Created": 1364068391,
"CreatedBy": ""
}
]
return status_code, response
def post_fake_import_image():
status_code = 200
response = 'Import messages...'
return status_code, response
def get_fake_containers():
status_code = 200
response = [{
'Id': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
'Created': '2 days ago',
'Command': 'true',
'Status': 'fake status'
}]
return status_code, response
def post_fake_start_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_resize_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_create_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def get_fake_inspect_container(tty=False):
status_code = 200
response = {
'Id': FAKE_CONTAINER_ID,
'Config': {'Labels': {'foo': 'bar'}, 'Privileged': True, 'Tty': tty},
'ID': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
'Name': 'foobar',
"State": {
"Status": "running",
"Running": True,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-09-25T14:01:18.869545111+02:00",
"Ghost": False
},
"HostConfig": {
"LogConfig": {
"Type": "json-file",
"Config": {}
},
},
"MacAddress": "02:42:ac:11:00:0a"
}
return status_code, response
def get_fake_inspect_image():
status_code = 200
response = {
'Id': FAKE_IMAGE_ID,
'Parent': "27cf784147099545",
'Created': "2013-03-23T22:24:18.818426-07:00",
'Container': FAKE_CONTAINER_ID,
'Config': {'Labels': {'bar': 'foo'}},
'ContainerConfig':
{
"Hostname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"PortSpecs": "",
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"Env": "",
"Cmd": ["/bin/bash"],
"Dns": "",
"Image": "base",
"Volumes": "",
"VolumesFrom": "",
"WorkingDir": ""
},
'Size': 6823592
}
return status_code, response
def get_fake_insert_image():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_wait():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_logs():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x02\x00\x00\x00\x00\x00\x00\x00'
b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n')
return status_code, response
def get_fake_diff():
status_code = 200
response = [{'Path': '/test', 'Kind': 1}]
return status_code, response
def get_fake_events():
status_code = 200
response = [{'status': 'stop', 'id': FAKE_CONTAINER_ID,
'from': FAKE_IMAGE_ID, 'time': 1423247867}]
return status_code, response
def get_fake_export():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_exec_create():
status_code = 200
response = {'Id': FAKE_EXEC_ID}
return status_code, response
def post_fake_exec_start():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x11bin\nboot\ndev\netc\n'
b'\x01\x00\x00\x00\x00\x00\x00\x12lib\nmnt\nproc\nroot\n'
b'\x01\x00\x00\x00\x00\x00\x00\x0csbin\nusr\nvar\n')
return status_code, response
def post_fake_exec_resize():
status_code = 201
return status_code, ''
def get_fake_exec_inspect():
return 200, {
'OpenStderr': True,
'OpenStdout': True,
'Container': get_fake_inspect_container()[1],
'Running': False,
'ProcessConfig': {
'arguments': ['hello world'],
'tty': False,
'entrypoint': 'echo',
'privileged': False,
'user': ''
},
'ExitCode': 0,
'ID': FAKE_EXEC_ID,
'OpenStdin': False
}
def post_fake_stop_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_kill_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_pause_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_unpause_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_restart_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_rename_container():
status_code = 204
return status_code, None
def delete_fake_remove_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_image_create():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def delete_fake_remove_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_get_image():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_load_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_commit():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_push():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_build_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_tag_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_stats():
status_code = 200
response = fake_stat.OBJ
return status_code, response
def get_fake_top():
return 200, {
'Processes': [
[
'root',
'26501',
'6907',
'0',
'10:32',
'pts/55',
'00:00:00',
'sleep 60',
],
],
'Titles': [
'UID',
'PID',
'PPID',
'C',
'STIME',
'TTY',
'TIME',
'CMD',
],
}
def get_fake_volume_list():
status_code = 200
response = {
'Volumes': [
{
'Name': 'perfectcherryblossom',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
'Scope': 'local'
}, {
'Name': 'subterraneananimism',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/subterraneananimism',
'Scope': 'local'
}
]
}
return status_code, response
def get_fake_volume():
status_code = 200
response = {
'Name': 'perfectcherryblossom',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
'Labels': {
'com.example.some-label': 'some-value'
},
'Scope': 'local'
}
return status_code, response
def fake_remove_volume():
return 204, None
def post_fake_update_container():
return 200, {'Warnings': []}
def post_fake_update_node():
return 200, None
def post_fake_join_swarm():
return 200, None
def get_fake_network_list():
return 200, [{
"Name": "bridge",
"Id": FAKE_NETWORK_ID,
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": False,
"Internal": False,
"IPAM": {
"Driver": "default",
"Config": [
{
"Subnet": "172.17.0.0/16"
}
]
},
"Containers": {
FAKE_CONTAINER_ID: {
"EndpointID": "ed2419a97c1d99",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
}
}]
def get_fake_network():
return 200, get_fake_network_list()[1][0]
def post_fake_network():
return 201, {"Id": FAKE_NETWORK_ID, "Warnings": []}
def delete_fake_network():
return 204, None
def post_fake_network_connect():
return 200, None
def post_fake_network_disconnect():
return 200, None
def post_fake_secret():
status_code = 200
response = {'ID': FAKE_SECRET_ID}
return status_code, response
# Maps real api url to fake response callback
prefix = 'http+docker://localhost'
if constants.IS_WINDOWS_PLATFORM:
prefix = 'http+docker://localnpipe'
fake_responses = {
f'{prefix}/version':
get_fake_version,
f'{prefix}/{CURRENT_VERSION}/version':
get_fake_version,
f'{prefix}/{CURRENT_VERSION}/info':
get_fake_info,
f'{prefix}/{CURRENT_VERSION}/auth':
post_fake_auth,
f'{prefix}/{CURRENT_VERSION}/_ping':
get_fake_ping,
f'{prefix}/{CURRENT_VERSION}/images/search':
get_fake_search,
f'{prefix}/{CURRENT_VERSION}/images/json':
get_fake_images,
f'{prefix}/{CURRENT_VERSION}/images/test_image/history':
get_fake_image_history,
f'{prefix}/{CURRENT_VERSION}/images/create':
post_fake_import_image,
f'{prefix}/{CURRENT_VERSION}/containers/json':
get_fake_containers,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/start':
post_fake_start_container,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/resize':
post_fake_resize_container,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/json':
get_fake_inspect_container,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/rename':
post_fake_rename_container,
f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/tag':
post_fake_tag_image,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/wait':
get_fake_wait,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/logs':
get_fake_logs,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/changes':
get_fake_diff,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/export':
get_fake_export,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/update':
post_fake_update_container,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/exec':
post_fake_exec_create,
f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/start':
post_fake_exec_start,
f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/json':
get_fake_exec_inspect,
f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/resize':
post_fake_exec_resize,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stats':
get_fake_stats,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/top':
get_fake_top,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stop':
post_fake_stop_container,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/kill':
post_fake_kill_container,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/pause':
post_fake_pause_container,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/unpause':
post_fake_unpause_container,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/restart':
post_fake_restart_container,
f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b':
delete_fake_remove_container,
f'{prefix}/{CURRENT_VERSION}/images/create':
post_fake_image_create,
f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128':
delete_fake_remove_image,
f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/get':
get_fake_get_image,
f'{prefix}/{CURRENT_VERSION}/images/load':
post_fake_load_image,
f'{prefix}/{CURRENT_VERSION}/images/test_image/json':
get_fake_inspect_image,
f'{prefix}/{CURRENT_VERSION}/images/test_image/insert':
get_fake_insert_image,
f'{prefix}/{CURRENT_VERSION}/images/test_image/push':
post_fake_push,
f'{prefix}/{CURRENT_VERSION}/commit':
post_fake_commit,
f'{prefix}/{CURRENT_VERSION}/containers/create':
post_fake_create_container,
f'{prefix}/{CURRENT_VERSION}/build':
post_fake_build_container,
f'{prefix}/{CURRENT_VERSION}/events':
get_fake_events,
(f'{prefix}/{CURRENT_VERSION}/volumes', 'GET'):
get_fake_volume_list,
(f'{prefix}/{CURRENT_VERSION}/volumes/create', 'POST'):
get_fake_volume,
('{1}/{0}/volumes/{2}'.format(
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
), 'GET'):
get_fake_volume,
('{1}/{0}/volumes/{2}'.format(
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
), 'DELETE'):
fake_remove_volume,
('{1}/{0}/nodes/{2}/update?version=1'.format(
CURRENT_VERSION, prefix, FAKE_NODE_ID
), 'POST'):
post_fake_update_node,
(f'{prefix}/{CURRENT_VERSION}/swarm/join', 'POST'):
post_fake_join_swarm,
(f'{prefix}/{CURRENT_VERSION}/networks', 'GET'):
get_fake_network_list,
(f'{prefix}/{CURRENT_VERSION}/networks/create', 'POST'):
post_fake_network,
('{1}/{0}/networks/{2}'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'GET'):
get_fake_network,
('{1}/{0}/networks/{2}'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'DELETE'):
delete_fake_network,
('{1}/{0}/networks/{2}/connect'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'POST'):
post_fake_network_connect,
('{1}/{0}/networks/{2}/disconnect'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'POST'):
post_fake_network_disconnect,
f'{prefix}/{CURRENT_VERSION}/secrets/create':
post_fake_secret,
}
| 26.608828 | 77 | 0.604679 |
eda61c698589c9af040eb324c7b8c550e70f735a | 20,399 | py | Python | frappe/desk/query_report.py | kevingdc/frappe | 985bf3042e8277ef8ca93065b89f12a8c097f1a8 | [
"MIT"
] | null | null | null | frappe/desk/query_report.py | kevingdc/frappe | 985bf3042e8277ef8ca93065b89f12a8c097f1a8 | [
"MIT"
] | null | null | null | frappe/desk/query_report.py | kevingdc/frappe | 985bf3042e8277ef8ca93065b89f12a8c097f1a8 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import os, json
from frappe import _
from frappe.modules import scrub, get_module_path
from frappe.utils import flt, cint, get_html_format, get_url_to_form, gzip_decompress, format_duration
from frappe.model.utils import render_include
from frappe.translate import send_translations
import frappe.desk.reportview
from frappe.permissions import get_role_permissions
from six import string_types, iteritems
from datetime import timedelta
from frappe.core.utils import ljust_list
def get_report_doc(report_name):
doc = frappe.get_doc("Report", report_name)
doc.custom_columns = []
if doc.report_type == 'Custom Report':
custom_report_doc = doc
reference_report = custom_report_doc.reference_report
doc = frappe.get_doc("Report", reference_report)
doc.custom_report = report_name
doc.custom_columns = custom_report_doc.json
doc.is_custom_report = True
if not doc.is_permitted():
frappe.throw(_("You don't have access to Report: {0}").format(report_name), frappe.PermissionError)
if not frappe.has_permission(doc.ref_doctype, "report"):
frappe.throw(_("You don't have permission to get a report on: {0}").format(doc.ref_doctype),
frappe.PermissionError)
if doc.disabled:
frappe.throw(_("Report {0} is disabled").format(report_name))
return doc
def generate_report_result(report, filters=None, user=None, custom_columns=None):
user = user or frappe.session.user
filters = filters or []
if filters and isinstance(filters, string_types):
filters = json.loads(filters)
res = []
if report.report_type == "Query Report":
res = report.execute_query_report(filters)
elif report.report_type == 'Script Report':
res = report.execute_script_report(filters)
columns, result, message, chart, report_summary, skip_total_row = \
ljust_list(res, 6)
if report.custom_columns:
# Original query columns, needed to reorder data as per custom columns
query_columns = columns
# Reordered columns
columns = json.loads(report.custom_columns)
result = reorder_data_for_custom_columns(columns, query_columns, result)
result = add_data_to_custom_columns(columns, result)
if custom_columns:
result = add_data_to_custom_columns(custom_columns, result)
for custom_column in custom_columns:
columns.insert(custom_column['insert_after_index'] + 1, custom_column)
if result:
result = get_filtered_data(report.ref_doctype, columns, result, user)
if cint(report.add_total_row) and result and not skip_total_row:
result = add_total_row(result, columns)
return {
"result": result,
"columns": columns,
"message": message,
"chart": chart,
"report_summary": report_summary,
"skip_total_row": skip_total_row or 0,
"status": None,
"execution_time": frappe.cache().hget('report_execution_time', report.name) or 0
}
@frappe.whitelist()
def background_enqueue_run(report_name, filters=None, user=None):
"""run reports in background"""
if not user:
user = frappe.session.user
report = get_report_doc(report_name)
track_instance = \
frappe.get_doc({
"doctype": "Prepared Report",
"report_name": report_name,
# This looks like an insanity but, without this it'd be very hard to find Prepared Reports matching given condition
# We're ensuring that spacing is consistent. e.g. JS seems to put no spaces after ":", Python on the other hand does.
"filters": json.dumps(json.loads(filters)),
"ref_report_doctype": report_name,
"report_type": report.report_type,
"query": report.query,
"module": report.module,
})
track_instance.insert(ignore_permissions=True)
frappe.db.commit()
track_instance.enqueue_report()
return {
"name": track_instance.name,
"redirect_url": get_url_to_form("Prepared Report", track_instance.name)
}
@frappe.whitelist()
def get_script(report_name):
report = get_report_doc(report_name)
module = report.module or frappe.db.get_value("DocType", report.ref_doctype, "module")
module_path = get_module_path(module)
report_folder = os.path.join(module_path, "report", scrub(report.name))
script_path = os.path.join(report_folder, scrub(report.name) + ".js")
print_path = os.path.join(report_folder, scrub(report.name) + ".html")
script = None
if os.path.exists(script_path):
with open(script_path, "r") as f:
script = f.read()
html_format = get_html_format(print_path)
if not script and report.javascript:
script = report.javascript
if not script:
script = "frappe.query_reports['%s']={}" % report_name
# load translations
if frappe.lang != "en":
send_translations(frappe.get_lang_dict("report", report_name))
return {
"script": render_include(script),
"html_format": html_format,
"execution_time": frappe.cache().hget('report_execution_time', report_name) or 0
}
@frappe.whitelist()
@frappe.read_only()
def run(report_name, filters=None, user=None, ignore_prepared_report=False, custom_columns=None):
report = get_report_doc(report_name)
if not user:
user = frappe.session.user
if not frappe.has_permission(report.ref_doctype, "report"):
frappe.msgprint(_("Must have report permission to access this report."),
raise_exception=True)
result = None
if report.prepared_report and not report.disable_prepared_report and not ignore_prepared_report and not custom_columns:
if filters:
if isinstance(filters, string_types):
filters = json.loads(filters)
dn = filters.get("prepared_report_name")
filters.pop("prepared_report_name", None)
else:
dn = ""
result = get_prepared_report_result(report, filters, dn, user)
else:
result = generate_report_result(report, filters, user, custom_columns)
result["add_total_row"] = report.add_total_row and not result.get('skip_total_row', False)
return result
def add_data_to_custom_columns(columns, result):
custom_fields_data = get_data_for_custom_report(columns)
data = []
for row in result:
row_obj = {}
if isinstance(row, tuple):
row = list(row)
if isinstance(row, list):
for idx, column in enumerate(columns):
if column.get('link_field'):
row_obj[column['fieldname']] = None
row.insert(idx, None)
else:
row_obj[column['fieldname']] = row[idx]
data.append(row_obj)
else:
data.append(row)
for row in data:
for column in columns:
if column.get('link_field'):
fieldname = column['fieldname']
key = (column['doctype'], fieldname)
link_field = column['link_field']
row[fieldname] = custom_fields_data.get(key, {}).get(row.get(link_field))
return data
def reorder_data_for_custom_columns(custom_columns, columns, result):
if not result:
return []
columns = [get_column_as_dict(col) for col in columns]
if isinstance(result[0], list) or isinstance(result[0], tuple):
# If the result is a list of lists
custom_column_names = [col["label"] for col in custom_columns]
original_column_names = [col["label"] for col in columns]
return get_columns_from_list(custom_column_names, original_column_names, result)
else:
# columns do not need to be reordered if result is a list of dicts
return result
def get_columns_from_list(columns, target_columns, result):
reordered_result = []
for res in result:
r = []
for col_name in columns:
try:
idx = target_columns.index(col_name)
r.append(res[idx])
except ValueError:
pass
reordered_result.append(r)
return reordered_result
def get_prepared_report_result(report, filters, dn="", user=None):
latest_report_data = {}
doc = None
if dn:
# Get specified dn
doc = frappe.get_doc("Prepared Report", dn)
else:
# Only look for completed prepared reports with given filters.
doc_list = frappe.get_all("Prepared Report",
filters={
"status": "Completed",
"filters": json.dumps(filters),
"owner": user,
"report_name": report.get('custom_report') or report.get('report_name')
},
order_by = 'creation desc'
)
if doc_list:
# Get latest
doc = frappe.get_doc("Prepared Report", doc_list[0])
if doc:
try:
# Prepared Report data is stored in a GZip compressed JSON file
attached_file_name = frappe.db.get_value("File", {"attached_to_doctype": doc.doctype, "attached_to_name":doc.name}, "name")
attached_file = frappe.get_doc('File', attached_file_name)
compressed_content = attached_file.get_content()
uncompressed_content = gzip_decompress(compressed_content)
data = json.loads(uncompressed_content)
if data:
columns = json.loads(doc.columns) if doc.columns else data[0]
for column in columns:
if isinstance(column, dict) and column.get("label"):
column["label"] = _(column["label"])
latest_report_data = {
"columns": columns,
"result": data
}
except Exception:
frappe.log_error(frappe.get_traceback())
frappe.delete_doc("Prepared Report", doc.name)
frappe.db.commit()
doc = None
latest_report_data.update({
"prepared_report": True,
"doc": doc
})
return latest_report_data
@frappe.whitelist()
def export_query():
"""export from query reports"""
data = frappe._dict(frappe.local.form_dict)
del data["cmd"]
if "csrf_token" in data:
del data["csrf_token"]
if isinstance(data.get("filters"), string_types):
filters = json.loads(data["filters"])
if isinstance(data.get("report_name"), string_types):
report_name = data["report_name"]
frappe.permissions.can_export(
frappe.get_cached_value('Report', report_name, 'ref_doctype'),
raise_exception=True
)
if isinstance(data.get("file_format_type"), string_types):
file_format_type = data["file_format_type"]
custom_columns = frappe.parse_json(data["custom_columns"])
include_indentation = data["include_indentation"]
if isinstance(data.get("visible_idx"), string_types):
visible_idx = json.loads(data.get("visible_idx"))
else:
visible_idx = None
if file_format_type == "Excel":
data = run(report_name, filters, custom_columns=custom_columns)
data = frappe._dict(data)
if not data.columns:
frappe.respond_as_web_page(_("No data to export"),
_("You can try changing the filters of your report."))
return
columns = get_columns_dict(data.columns)
from frappe.utils.xlsxutils import make_xlsx
data['result'] = handle_duration_fieldtype_values(data.get('result'), data.get('columns'))
xlsx_data = build_xlsx_data(columns, data, visible_idx, include_indentation)
xlsx_file = make_xlsx(xlsx_data, "Query Report")
frappe.response['filename'] = report_name + '.xlsx'
frappe.response['filecontent'] = xlsx_file.getvalue()
frappe.response['type'] = 'binary'
def handle_duration_fieldtype_values(result, columns):
for i, col in enumerate(columns):
fieldtype = None
if isinstance(col, string_types):
col = col.split(":")
if len(col) > 1:
if col[1]:
fieldtype = col[1]
if "/" in fieldtype:
fieldtype, options = fieldtype.split("/")
else:
fieldtype = "Data"
else:
fieldtype = col.get("fieldtype")
if fieldtype == "Duration":
for entry in range(0, len(result)):
val_in_seconds = result[entry][i]
if val_in_seconds:
duration_val = format_duration(val_in_seconds)
result[entry][i] = duration_val
return result
def build_xlsx_data(columns, data, visible_idx, include_indentation):
result = [[]]
# add column headings
for idx in range(len(data.columns)):
if not columns[idx].get("hidden"):
result[0].append(columns[idx]["label"])
# build table from result
for i, row in enumerate(data.result):
# only pick up rows that are visible in the report
if i in visible_idx:
row_data = []
if isinstance(row, dict) and row:
for idx in range(len(data.columns)):
# check if column is not hidden
if not columns[idx].get("hidden"):
label = columns[idx]["label"]
fieldname = columns[idx]["fieldname"]
cell_value = row.get(fieldname, row.get(label, ""))
if cint(include_indentation) and 'indent' in row and idx == 0:
cell_value = (' ' * cint(row['indent'])) + cell_value
row_data.append(cell_value)
else:
row_data = row
result.append(row_data)
return result
def add_total_row(result, columns, meta = None):
total_row = [""]*len(columns)
has_percent = []
for i, col in enumerate(columns):
fieldtype, options, fieldname = None, None, None
if isinstance(col, string_types):
if meta:
# get fieldtype from the meta
field = meta.get_field(col)
if field:
fieldtype = meta.get_field(col).fieldtype
fieldname = meta.get_field(col).fieldname
else:
col = col.split(":")
if len(col) > 1:
if col[1]:
fieldtype = col[1]
if "/" in fieldtype:
fieldtype, options = fieldtype.split("/")
else:
fieldtype = "Data"
else:
fieldtype = col.get("fieldtype")
fieldname = col.get("fieldname")
options = col.get("options")
for row in result:
if i >= len(row): continue
cell = row.get(fieldname) if isinstance(row, dict) else row[i]
if fieldtype in ["Currency", "Int", "Float", "Percent", "Duration"] and flt(cell):
total_row[i] = flt(total_row[i]) + flt(cell)
if fieldtype == "Percent" and i not in has_percent:
has_percent.append(i)
if fieldtype == "Time" and cell:
if not total_row[i]:
total_row[i]=timedelta(hours=0,minutes=0,seconds=0)
total_row[i] = total_row[i] + cell
if fieldtype=="Link" and options == "Currency":
total_row[i] = result[0].get(fieldname) if isinstance(result[0], dict) else result[0][i]
for i in has_percent:
total_row[i] = flt(total_row[i]) / len(result)
first_col_fieldtype = None
if isinstance(columns[0], string_types):
first_col = columns[0].split(":")
if len(first_col) > 1:
first_col_fieldtype = first_col[1].split("/")[0]
else:
first_col_fieldtype = columns[0].get("fieldtype")
if first_col_fieldtype not in ["Currency", "Int", "Float", "Percent", "Date"]:
total_row[0] = _("Total")
result.append(total_row)
return result
@frappe.whitelist()
def get_data_for_custom_field(doctype, field):
if not frappe.has_permission(doctype, "read"):
frappe.throw(_("Not Permitted"), frappe.PermissionError)
value_map = frappe._dict(frappe.get_all(doctype,
fields=["name", field],
as_list=1))
return value_map
def get_data_for_custom_report(columns):
doc_field_value_map = {}
for column in columns:
if column.get('link_field'):
fieldname = column.get('fieldname')
doctype = column.get('doctype')
doc_field_value_map[(doctype, fieldname)] = get_data_for_custom_field(doctype, fieldname)
return doc_field_value_map
@frappe.whitelist()
def save_report(reference_report, report_name, columns):
report_doc = get_report_doc(reference_report)
docname = frappe.db.exists("Report",
{'report_name': report_name, 'is_standard': 'No', 'report_type': 'Custom Report'})
if docname:
report = frappe.get_doc("Report", docname)
report.update({"json": columns})
report.save()
frappe.msgprint(_("Report updated successfully"))
return docname
else:
new_report = frappe.get_doc({
'doctype': 'Report',
'report_name': report_name,
'json': columns,
'ref_doctype': report_doc.ref_doctype,
'is_standard': 'No',
'report_type': 'Custom Report',
'reference_report': reference_report
}).insert(ignore_permissions = True)
frappe.msgprint(_("{0} saved successfully").format(new_report.name))
return new_report.name
def get_filtered_data(ref_doctype, columns, data, user):
result = []
linked_doctypes = get_linked_doctypes(columns, data)
match_filters_per_doctype = get_user_match_filters(linked_doctypes, user=user)
shared = frappe.share.get_shared(ref_doctype, user)
columns_dict = get_columns_dict(columns)
role_permissions = get_role_permissions(frappe.get_meta(ref_doctype), user)
if_owner = role_permissions.get("if_owner", {}).get("report")
if match_filters_per_doctype:
for row in data:
# Why linked_doctypes.get(ref_doctype)? because if column is empty, linked_doctypes[ref_doctype] is removed
if linked_doctypes.get(ref_doctype) and shared and row[linked_doctypes[ref_doctype]] in shared:
result.append(row)
elif has_match(row, linked_doctypes, match_filters_per_doctype, ref_doctype, if_owner, columns_dict, user):
result.append(row)
else:
result = list(data)
return result
def has_match(row, linked_doctypes, doctype_match_filters, ref_doctype, if_owner, columns_dict, user):
"""Returns True if after evaluating permissions for each linked doctype
- There is an owner match for the ref_doctype
- `and` There is a user permission match for all linked doctypes
Returns True if the row is empty
Note:
Each doctype could have multiple conflicting user permission doctypes.
Hence even if one of the sets allows a match, it is true.
This behavior is equivalent to the trickling of user permissions of linked doctypes to the ref doctype.
"""
resultant_match = True
if not row:
# allow empty rows :)
return resultant_match
for doctype, filter_list in doctype_match_filters.items():
matched_for_doctype = False
if doctype==ref_doctype and if_owner:
idx = linked_doctypes.get("User")
if (idx is not None
and row[idx]==user
and columns_dict[idx]==columns_dict.get("owner")):
# owner match is true
matched_for_doctype = True
if not matched_for_doctype:
for match_filters in filter_list:
match = True
for dt, idx in linked_doctypes.items():
# case handled above
if dt=="User" and columns_dict[idx]==columns_dict.get("owner"):
continue
cell_value = None
if isinstance(row, dict):
cell_value = row.get(idx)
elif isinstance(row, (list, tuple)):
cell_value = row[idx]
if dt in match_filters and cell_value not in match_filters.get(dt) and frappe.db.exists(dt, cell_value):
match = False
break
# each doctype could have multiple conflicting user permission doctypes, hence using OR
# so that even if one of the sets allows a match, it is true
matched_for_doctype = matched_for_doctype or match
if matched_for_doctype:
break
# each doctype's user permissions should match the row! hence using AND
resultant_match = resultant_match and matched_for_doctype
if not resultant_match:
break
return resultant_match
def get_linked_doctypes(columns, data):
linked_doctypes = {}
columns_dict = get_columns_dict(columns)
for idx, col in enumerate(columns):
df = columns_dict[idx]
if df.get("fieldtype")=="Link":
if data and isinstance(data[0], (list, tuple)):
linked_doctypes[df["options"]] = idx
else:
# dict
linked_doctypes[df["options"]] = df["fieldname"]
# remove doctype if column is empty
columns_with_value = []
for row in data:
if row:
if len(row) != len(columns_with_value):
if isinstance(row, (list, tuple)):
row = enumerate(row)
elif isinstance(row, dict):
row = row.items()
for col, val in row:
if val and col not in columns_with_value:
columns_with_value.append(col)
items = list(iteritems(linked_doctypes))
for doctype, key in items:
if key not in columns_with_value:
del linked_doctypes[doctype]
return linked_doctypes
def get_columns_dict(columns):
"""Returns a dict with column docfield values as dict
The keys for the dict are both idx and fieldname,
so either index or fieldname can be used to search for a column's docfield properties
"""
columns_dict = frappe._dict()
for idx, col in enumerate(columns):
col_dict = get_column_as_dict(col)
columns_dict[idx] = col_dict
columns_dict[col_dict["fieldname"]] = col_dict
return columns_dict
def get_column_as_dict(col):
col_dict = frappe._dict()
# string
if isinstance(col, string_types):
col = col.split(":")
if len(col) > 1:
if "/" in col[1]:
col_dict["fieldtype"], col_dict["options"] = col[1].split("/")
else:
col_dict["fieldtype"] = col[1]
col_dict["label"] = col[0]
col_dict["fieldname"] = frappe.scrub(col[0])
# dict
else:
col_dict.update(col)
if "fieldname" not in col_dict:
col_dict["fieldname"] = frappe.scrub(col_dict["label"])
return col_dict
def get_user_match_filters(doctypes, user):
match_filters = {}
for dt in doctypes:
filter_list = frappe.desk.reportview.build_match_conditions(dt, user, False)
if filter_list:
match_filters[dt] = filter_list
return match_filters
| 29.866764 | 126 | 0.722241 |
0d20ce57d44c445571bf38474e19d410ced24aa0 | 1,902 | py | Python | lessons/cb2logger.py | chrisb13/CFDPython | 0b408ad8f1691b1f2b2785cf50898e713c3326d8 | [
"CC-BY-3.0"
] | null | null | null | lessons/cb2logger.py | chrisb13/CFDPython | 0b408ad8f1691b1f2b2785cf50898e713c3326d8 | [
"CC-BY-3.0"
] | null | null | null | lessons/cb2logger.py | chrisb13/CFDPython | 0b408ad8f1691b1f2b2785cf50898e713c3326d8 | [
"CC-BY-3.0"
] | null | null | null |
#python logging
import logging as lg
import time
import subprocess
import sys
import os
class LogStart(object):
"class that sets up a logger"
def __init__(self, fname,fout=False,level='debug'):
if level=='debug':
lvl=lg.DEBUG
elif level=='info':
lvl=lg.INFO
elif level=='warning':
lvl=lg.WARNING
elif level=='error':
lvl=lg.ERROR
else:
raise Exception('You passed a bad logging level')
if fout:
lg.basicConfig(filename=fname,filemode='w', format='%(name)s - %(levelname)s - %(message)s' , level=lvl) #where filemode clobbers file
else:
lg.basicConfig(format='%(name)s - %(levelname)s - %(message)s', level=lvl)
lg.info('')
lg.info('SCRIPT started')
lg.info('Logging level is: ' + level)
localtime = time.asctime( time.localtime(time.time()) )
#found from (see limitations):
#http://stackoverflow.com/questions/7871319/how-to-know-who-is-importing-me-in-python
#lg.info("Path for script is : "+os.path.dirname(os.path.realpath(__name__)) )
lg.info("Script name is : "+ str(sys.argv[0]))
lg.info("Local current time : "+ str(localtime))
#lg.info("Machine run on : "+ os.getenv('HOSTNAME'))
if hasattr(sys, 'real_prefix'):
lg.info("We are running inside a venv.")
else:
lg.info("We are not running inside a venv.")
return
lg.info("")
command=subprocess.Popen(['pip','freeze'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
pipout, piperr = command.communicate()
lg.info("---Pip freeze (including system-wide) START...--- ")
for pkg in pipout.splitlines():
lg.info(pkg)
lg.info("---Pip freeze (including system-wide) END.---")
lg.info("")
| 34.581818 | 178 | 0.578864 |
881c614238a33e27963045472ebe29ab8d651ab0 | 46,549 | py | Python | src/sage/geometry/polyhedron/parent.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | src/sage/geometry/polyhedron/parent.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | src/sage/geometry/polyhedron/parent.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | r"""
Parents for Polyhedra
"""
#*****************************************************************************
# Copyright (C) 2014 Volker Braun <vbraun.name@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.structure.parent import Parent
from sage.structure.element import get_coercion_model
from sage.structure.unique_representation import UniqueRepresentation
from sage.modules.free_module import FreeModule, is_FreeModule
from sage.misc.cachefunc import cached_method, cached_function
from sage.misc.lazy_import import lazy_import
import sage.rings.abc
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.real_double import RDF
from sage.rings.ring import CommutativeRing
from sage.categories.fields import Fields
from sage.categories.rings import Rings
from sage.categories.modules import Modules
from sage.geometry.polyhedron.base import is_Polyhedron
from .representation import Inequality, Equation, Vertex, Ray, Line
def Polyhedra(ambient_space_or_base_ring=None, ambient_dim=None, backend=None, *,
ambient_space=None, base_ring=None):
r"""
Construct a suitable parent class for polyhedra
INPUT:
- ``base_ring`` -- A ring. Currently there are backends for `\ZZ`,
`\QQ`, and `\RDF`.
- ``ambient_dim`` -- integer. The ambient space dimension.
- ``ambient_space`` -- A free module.
- ``backend`` -- string. The name of the backend for computations. There are
several backends implemented:
* ``backend="ppl"`` uses the Parma Polyhedra Library
* ``backend="cdd"`` uses CDD
* ``backend="normaliz"`` uses normaliz
* ``backend="polymake"`` uses polymake
* ``backend="field"`` a generic Sage implementation
OUTPUT:
A parent class for polyhedra over the given base ring if the
backend supports it. If not, the parent base ring can be larger
(for example, `\QQ` instead of `\ZZ`). If there is no
implementation at all, a ``ValueError`` is raised.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(AA, 3)
Polyhedra in AA^3
sage: Polyhedra(ZZ, 3)
Polyhedra in ZZ^3
sage: type(_)
<class 'sage.geometry.polyhedron.parent.Polyhedra_ZZ_ppl_with_category'>
sage: Polyhedra(QQ, 3, backend='cdd')
Polyhedra in QQ^3
sage: type(_)
<class 'sage.geometry.polyhedron.parent.Polyhedra_QQ_cdd_with_category'>
CDD does not support integer polytopes directly::
sage: Polyhedra(ZZ, 3, backend='cdd')
Polyhedra in QQ^3
Using a more general form of the constructor::
sage: V = VectorSpace(QQ, 3)
sage: Polyhedra(V) is Polyhedra(QQ, 3)
True
sage: Polyhedra(V, backend='field') is Polyhedra(QQ, 3, 'field')
True
sage: Polyhedra(backend='field', ambient_space=V) is Polyhedra(QQ, 3, 'field')
True
sage: M = FreeModule(ZZ, 2)
sage: Polyhedra(M, backend='ppl') is Polyhedra(ZZ, 2, 'ppl')
True
TESTS::
sage: Polyhedra(RR, 3, backend='field')
Traceback (most recent call last):
...
ValueError: the 'field' backend for polyhedron cannot be used with non-exact fields
sage: Polyhedra(RR, 3)
Traceback (most recent call last):
...
ValueError: no default backend for computations with Real Field with 53 bits of precision
sage: Polyhedra(QQ[I], 2)
Traceback (most recent call last):
...
ValueError: invalid base ring: Number Field in I with defining polynomial x^2 + 1 with I = 1*I cannot be coerced to a real field
sage: Polyhedra(AA, 3, backend='polymake') # optional - polymake
Traceback (most recent call last):
...
ValueError: the 'polymake' backend for polyhedron cannot be used with Algebraic Real Field
sage: Polyhedra(QQ, 2, backend='normaliz') # optional - pynormaliz
Polyhedra in QQ^2
sage: Polyhedra(SR, 2, backend='normaliz') # optional - pynormaliz # optional - sage.symbolic
Polyhedra in (Symbolic Ring)^2
sage: SCR = SR.subring(no_variables=True) # optional - sage.symbolic
sage: Polyhedra(SCR, 2, backend='normaliz') # optional - pynormaliz # optional - sage.symbolic
Polyhedra in (Symbolic Constants Subring)^2
"""
if ambient_space_or_base_ring is not None:
if ambient_space_or_base_ring in Rings():
base_ring = ambient_space_or_base_ring
else:
ambient_space = ambient_space_or_base_ring
if ambient_space is not None:
if ambient_space not in Modules:
# There is no category of free modules, unfortunately
# (see https://trac.sagemath.org/ticket/30164)...
raise ValueError('ambient_space must be a free module')
if base_ring is None:
base_ring = ambient_space.base_ring()
if ambient_dim is None:
try:
ambient_dim = ambient_space.rank()
except AttributeError:
# ... so we test whether it is free using the existence of
# a rank method
raise ValueError('ambient_space must be a free module')
if ambient_space is not FreeModule(base_ring, ambient_dim):
raise NotImplementedError('ambient_space must be a standard free module')
if backend is None:
if base_ring is ZZ or base_ring is QQ:
backend = 'ppl'
elif base_ring is RDF:
backend = 'cdd'
elif base_ring.is_exact():
# TODO: find a more robust way of checking that the coefficients are indeed
# real numbers
if not RDF.has_coerce_map_from(base_ring):
raise ValueError("invalid base ring: {} cannot be coerced to a real field".format(base_ring))
backend = 'field'
else:
raise ValueError("no default backend for computations with {}".format(base_ring))
if backend == 'ppl' and base_ring is QQ:
return Polyhedra_QQ_ppl(base_ring, ambient_dim, backend)
elif backend == 'ppl' and base_ring is ZZ:
return Polyhedra_ZZ_ppl(base_ring, ambient_dim, backend)
elif backend == 'normaliz' and base_ring is QQ:
return Polyhedra_QQ_normaliz(base_ring, ambient_dim, backend)
elif backend == 'normaliz' and base_ring is ZZ:
return Polyhedra_ZZ_normaliz(base_ring, ambient_dim, backend)
elif backend == 'normaliz' and (isinstance(base_ring, sage.rings.abc.SymbolicRing) or base_ring.is_exact()):
return Polyhedra_normaliz(base_ring, ambient_dim, backend)
elif backend == 'cdd' and base_ring in (ZZ, QQ):
return Polyhedra_QQ_cdd(QQ, ambient_dim, backend)
elif backend == 'cdd' and base_ring is RDF:
return Polyhedra_RDF_cdd(RDF, ambient_dim, backend)
elif backend == 'polymake':
base_field = base_ring.fraction_field()
try:
from sage.interfaces.polymake import polymake
polymake_base_field = polymake(base_field)
except TypeError:
raise ValueError(f"the 'polymake' backend for polyhedron cannot be used with {base_field}")
return Polyhedra_polymake(base_field, ambient_dim, backend)
elif backend == 'field':
if not base_ring.is_exact():
raise ValueError("the 'field' backend for polyhedron cannot be used with non-exact fields")
return Polyhedra_field(base_ring.fraction_field(), ambient_dim, backend)
else:
raise ValueError('No such backend (=' + str(backend) +
') implemented for given basering (=' + str(base_ring)+').')
class Polyhedra_base(UniqueRepresentation, Parent):
r"""
Polyhedra in a fixed ambient space.
INPUT:
- ``base_ring`` -- either ``ZZ``, ``QQ``, or ``RDF``. The base
ring of the ambient module/vector space.
- ``ambient_dim`` -- integer. The ambient space dimension.
- ``backend`` -- string. The name of the backend for computations. There are
several backends implemented:
* ``backend="ppl"`` uses the Parma Polyhedra Library
* ``backend="cdd"`` uses CDD
* ``backend="normaliz"`` uses normaliz
* ``backend="polymake"`` uses polymake
* ``backend="field"`` a generic Sage implementation
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(ZZ, 3)
Polyhedra in ZZ^3
"""
def __init__(self, base_ring, ambient_dim, backend):
"""
The Python constructor.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(QQ, 3)
Polyhedra in QQ^3
TESTS::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: P = Polyhedra(QQ, 3)
sage: TestSuite(P).run()
sage: P = Polyhedra(QQ, 0)
sage: TestSuite(P).run()
"""
self._backend = backend
self._ambient_dim = ambient_dim
from sage.categories.polyhedra import PolyhedralSets
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
category = PolyhedralSets(base_ring)
if ambient_dim == 0:
category = category & FiniteEnumeratedSets()
else:
category = category.Infinite()
Parent.__init__(self, base=base_ring, category=category)
self._Inequality_pool = []
self._Equation_pool = []
self._Vertex_pool = []
self._Ray_pool = []
self._Line_pool = []
def list(self):
"""
Return the two polyhedra in ambient dimension 0, raise an error otherwise
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: P = Polyhedra(QQ, 3)
sage: P.cardinality()
+Infinity
sage: P = Polyhedra(AA, 0)
sage: P.category()
Category of finite enumerated polyhedral sets over Algebraic Real Field
sage: P.list()
[The empty polyhedron in AA^0,
A 0-dimensional polyhedron in AA^0 defined as the convex hull of 1 vertex]
sage: P.cardinality()
2
"""
if self.ambient_dim():
raise NotImplementedError
return [self.empty(), self.universe()]
def recycle(self, polyhedron):
"""
Recycle the H/V-representation objects of a polyhedron.
This speeds up creation of new polyhedra by reusing
objects. After recycling a polyhedron object, it is not in a
consistent state any more and neither the polyhedron nor its
H/V-representation objects may be used any more.
INPUT:
- ``polyhedron`` -- a polyhedron whose parent is ``self``.
EXAMPLES::
sage: p = Polyhedron([(0,0),(1,0),(0,1)])
sage: p.parent().recycle(p)
TESTS::
sage: p = Polyhedron([(0,0),(1,0),(0,1)])
sage: n = len(p.parent()._Vertex_pool)
sage: p._delete()
sage: len(p.parent()._Vertex_pool) - n
3
"""
if self is not polyhedron.parent():
raise TypeError('The polyhedron has the wrong parent class.')
self._Inequality_pool.extend(polyhedron.inequalities())
self._Equation_pool.extend(polyhedron.equations())
self._Vertex_pool.extend(polyhedron.vertices())
self._Ray_pool.extend(polyhedron.rays())
self._Line_pool.extend(polyhedron.lines())
for Hrep in polyhedron.Hrep_generator():
Hrep._polyhedron = None
for Vrep in polyhedron.Vrep_generator():
Vrep._polyhedron = None
polyhedron._Hrepresentation = None
polyhedron._Vrepresentation = None
if polyhedron.is_mutable():
polyhedron._dependent_objects = []
def ambient_dim(self):
r"""
Return the dimension of the ambient space.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(QQ, 3).ambient_dim()
3
"""
return self._ambient_dim
def backend(self):
r"""
Return the backend.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(QQ, 3).backend()
'ppl'
"""
return self._backend
@cached_method
def an_element(self):
r"""
Return a Polyhedron.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(QQ, 4).an_element()
A 4-dimensional polyhedron in QQ^4 defined as the convex hull of 5 vertices
"""
zero = self.base_ring().zero()
one = self.base_ring().one()
p = [zero] * self.ambient_dim()
points = [p]
for i in range(self.ambient_dim()):
p = [zero] * self.ambient_dim()
p[i] = one
points.append(p)
return self.element_class(self, [points, [], []], None)
@cached_method
def some_elements(self):
r"""
Return a list of some elements of the semigroup.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(QQ, 4).some_elements()
[A 3-dimensional polyhedron in QQ^4 defined as the convex hull of 4 vertices,
A 4-dimensional polyhedron in QQ^4 defined as the convex hull of 1 vertex and 4 rays,
A 2-dimensional polyhedron in QQ^4 defined as the convex hull of 2 vertices and 1 ray,
The empty polyhedron in QQ^4]
sage: Polyhedra(ZZ,0).some_elements()
[The empty polyhedron in ZZ^0,
A 0-dimensional polyhedron in ZZ^0 defined as the convex hull of 1 vertex]
"""
if self.ambient_dim() == 0:
return [
self.element_class(self, None, None),
self.element_class(self, None, [[], []])]
points = []
R = self.base_ring()
for i in range(self.ambient_dim() + 5):
points.append([R(i*j^2) for j in range(self.ambient_dim())])
return [
self.element_class(self, [points[0:self.ambient_dim()+1], [], []], None),
self.element_class(self, [points[0:1], points[1:self.ambient_dim()+1], []], None),
self.element_class(self, [points[0:3], points[4:5], []], None),
self.element_class(self, None, None)]
@cached_method
def zero(self):
r"""
Return the polyhedron consisting of the origin, which is the
neutral element for Minkowski addition.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: p = Polyhedra(QQ, 4).zero(); p
A 0-dimensional polyhedron in QQ^4 defined as the convex hull of 1 vertex
sage: p+p == p
True
"""
Vrep = [[[self.base_ring().zero()]*self.ambient_dim()], [], []]
return self.element_class(self, Vrep, None)
def empty(self):
"""
Return the empty polyhedron.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: P = Polyhedra(QQ, 4)
sage: P.empty()
The empty polyhedron in QQ^4
sage: P.empty().is_empty()
True
"""
return self(None, None)
def universe(self):
"""
Return the entire ambient space as polyhedron.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: P = Polyhedra(QQ, 4)
sage: P.universe()
A 4-dimensional polyhedron in QQ^4 defined as the convex hull of 1 vertex and 4 lines
sage: P.universe().is_universe()
True
"""
R = self.base_ring()
return self(None, [[[R.one()]+[R.zero()]*self.ambient_dim()], []], convert=True)
@cached_method
def Vrepresentation_space(self):
r"""
Return the ambient vector space.
This is the vector space or module containing the
Vrepresentation vectors.
OUTPUT:
A free module over the base ring of dimension :meth:`ambient_dim`.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(QQ, 4).Vrepresentation_space()
Vector space of dimension 4 over Rational Field
sage: Polyhedra(QQ, 4).ambient_space()
Vector space of dimension 4 over Rational Field
"""
if self.base_ring() in Fields():
from sage.modules.free_module import VectorSpace
return VectorSpace(self.base_ring(), self.ambient_dim())
else:
from sage.modules.free_module import FreeModule
return FreeModule(self.base_ring(), self.ambient_dim())
ambient_space = Vrepresentation_space
@cached_method
def Hrepresentation_space(self):
r"""
Return the linear space containing the H-representation vectors.
OUTPUT:
A free module over the base ring of dimension :meth:`ambient_dim` + 1.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(ZZ, 2).Hrepresentation_space()
Ambient free module of rank 3 over the principal ideal domain Integer Ring
"""
if self.base_ring() in Fields():
from sage.modules.free_module import VectorSpace
return VectorSpace(self.base_ring(), self.ambient_dim()+1)
else:
from sage.modules.free_module import FreeModule
return FreeModule(self.base_ring(), self.ambient_dim()+1)
def _repr_ambient_module(self):
"""
Return an abbreviated string representation of the ambient
space.
OUTPUT:
String.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(QQ, 3)._repr_ambient_module()
'QQ^3'
sage: K.<sqrt3> = NumberField(x^2 - 3, embedding=AA(3).sqrt())
sage: Polyhedra(K, 4)._repr_ambient_module()
'(Number Field in sqrt3 with defining polynomial x^2 - 3 with sqrt3 = 1.732050807568878?)^4'
"""
from sage.rings.qqbar import AA
if self.base_ring() is ZZ:
s = 'ZZ'
elif self.base_ring() is QQ:
s = 'QQ'
elif self.base_ring() is RDF:
s = 'RDF'
elif self.base_ring() is AA:
s = 'AA'
else:
s = '({0})'.format(self.base_ring())
s += '^' + repr(self.ambient_dim())
return s
def _repr_(self):
"""
Return a string representation.
OUTPUT:
String.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(QQ, 3)
Polyhedra in QQ^3
sage: Polyhedra(QQ, 3)._repr_()
'Polyhedra in QQ^3'
"""
return 'Polyhedra in '+self._repr_ambient_module()
def _element_constructor_(self, *args, **kwds):
"""
The element (polyhedron) constructor.
INPUT:
- ``Vrep`` -- a list ``[vertices, rays, lines]`` or ``None``.
- ``Hrep`` -- a list ``[ieqs, eqns]`` or ``None``.
- ``convert`` -- boolean keyword argument (default:
``True``). Whether to convert the coordinates into the base
ring.
- ``**kwds`` -- optional remaining keywords that are passed to the
polyhedron constructor.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: P = Polyhedra(QQ, 3)
sage: P._element_constructor_([[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0,0,1)], [], []], None)
A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 4 vertices
sage: P([[(0,0,0),(1,0,0),(0,1,0),(0,0,1)], [], []], None)
A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 4 vertices
sage: P(0)
A 0-dimensional polyhedron in QQ^3 defined as the convex hull of 1 vertex
Check that :trac:`21270` is fixed::
sage: poly = polytopes.regular_polygon(7) # optional - sage.rings.number_field
sage: lp, x = poly.to_linear_program(solver='InteractiveLP', return_variable=True) # optional - sage.rings.number_field
sage: lp.set_objective(x[0] + x[1]) # optional - sage.rings.number_field
sage: b = lp.get_backend() # optional - sage.rings.number_field
sage: P = b.interactive_lp_problem() # optional - sage.rings.number_field
sage: p = P.plot() # optional - sage.plot # optional - sage.rings.number_field
sage: Q = Polyhedron(ieqs=[[-499999, 1000000], [1499999, -1000000]])
sage: P = Polyhedron(ieqs=[[0, 1.0], [1.0, -1.0]], base_ring=RDF)
sage: Q.intersection(P)
A 1-dimensional polyhedron in RDF^1 defined as the convex hull of 2 vertices
sage: P.intersection(Q)
A 1-dimensional polyhedron in RDF^1 defined as the convex hull of 2 vertices
The default is not to copy an object if the parent is ``self``::
sage: p = polytopes.cube(backend='field')
sage: P = p.parent()
sage: q = P._element_constructor_(p)
sage: q is p
True
sage: r = P._element_constructor_(p, copy=True)
sage: r is p
False
When the parent of the object is not ``self``, the default is not to copy::
sage: Q = P.base_extend(AA)
sage: q = Q._element_constructor_(p)
sage: q is p
False
sage: q = Q._element_constructor_(p, copy=False)
Traceback (most recent call last):
...
ValueError: you need to make a copy when changing the parent
For mutable polyhedra either ``copy`` or ``mutable`` must be specified::
sage: p = Polyhedron(vertices=[[0, 1], [1, 0]], mutable=True)
sage: P = p.parent()
sage: q = P._element_constructor_(p)
Traceback (most recent call last):
...
ValueError: must make a copy to obtain immutable object from mutable input
sage: q = P._element_constructor_(p, mutable=True)
sage: q is p
True
sage: r = P._element_constructor_(p, copy=True)
sage: r.is_mutable()
False
sage: r is p
False
"""
nargs = len(args)
convert = kwds.pop('convert', True)
def convert_base_ring(lstlst):
return [[self.base_ring()(x) for x in lst] for lst in lstlst]
# renormalize before converting when going from QQ to RDF, see trac 21270
def convert_base_ring_Hrep(lstlst):
newlstlst = []
for lst in lstlst:
if all(c in QQ for c in lst):
m = max(abs(w) for w in lst)
if m == 0:
newlstlst.append(lst)
else:
newlstlst.append([q/m for q in lst])
else:
newlstlst.append(lst)
return convert_base_ring(newlstlst)
if nargs == 2:
Vrep, Hrep = args
if convert and Hrep:
if self.base_ring == RDF:
Hrep = [convert_base_ring_Hrep(_) for _ in Hrep]
else:
Hrep = [convert_base_ring(_) for _ in Hrep]
if convert and Vrep:
Vrep = [convert_base_ring(_) for _ in Vrep]
return self.element_class(self, Vrep, Hrep, **kwds)
if nargs == 1 and is_Polyhedron(args[0]):
copy = kwds.pop('copy', args[0].parent() is not self)
mutable = kwds.pop('mutable', False)
if not copy and args[0].parent() is not self:
raise ValueError("you need to make a copy when changing the parent")
if args[0].is_mutable() and not copy and not mutable:
raise ValueError("must make a copy to obtain immutable object from mutable input")
if not copy and mutable is args[0].is_mutable():
return args[0]
polyhedron = args[0]
return self._element_constructor_polyhedron(polyhedron, mutable=mutable, **kwds)
if nargs == 1 and args[0] == 0:
return self.zero()
raise ValueError('Cannot convert to polyhedron object.')
def _element_constructor_polyhedron(self, polyhedron, **kwds):
"""
The element (polyhedron) constructor for the case of 1 argument, a polyhedron.
Set up the element using both representations,
if the backend can handle it.
Otherwise set up the element from Hrepresentation.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: P = Polyhedra(QQ, 3, backend='cdd')
sage: p = Polyhedron(vertices=[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)])
sage: p
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 4 vertices
sage: P(p)
A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 4 vertices
sage: P = Polyhedra(AA, 3, backend='field')
sage: p = Polyhedron(vertices=[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)])
sage: P(p)
A 3-dimensional polyhedron in AA^3 defined as the convex hull of 4 vertices
"""
Vrep = None
if hasattr(self.Element, '_init_from_Vrepresentation_and_Hrepresentation'):
Vrep = [polyhedron.vertex_generator(), polyhedron.ray_generator(),
polyhedron.line_generator()]
Hrep = [polyhedron.inequality_generator(), polyhedron.equation_generator()]
return self._element_constructor_(Vrep, Hrep, Vrep_minimal=True, Hrep_minimal=True, **kwds)
def base_extend(self, base_ring, backend=None, ambient_dim=None):
"""
Return the base extended parent.
INPUT:
- ``base_ring``, ``backend`` -- see
:func:`~sage.geometry.polyhedron.constructor.Polyhedron`.
- ``ambient_dim`` -- if not ``None`` change ambient dimension
accordingly.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(ZZ,3).base_extend(QQ)
Polyhedra in QQ^3
sage: Polyhedra(ZZ,3).an_element().base_extend(QQ)
A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 4 vertices
sage: Polyhedra(QQ, 2).base_extend(ZZ)
Polyhedra in QQ^2
TESTS:
Test that :trac:`22575` is fixed::
sage: P = Polyhedra(ZZ,3).base_extend(QQ, backend='field')
sage: P.backend()
'field'
"""
if self.base_ring().has_coerce_map_from(base_ring):
new_ring = self.base_ring()
else:
new_ring = self._coerce_base_ring(base_ring)
return self.change_ring(new_ring, backend=backend, ambient_dim=ambient_dim)
def change_ring(self, base_ring, backend=None, ambient_dim=None):
"""
Return the parent with the new base ring.
INPUT:
- ``base_ring``, ``backend`` -- see
:func:`~sage.geometry.polyhedron.constructor.Polyhedron`.
- ``ambient_dim`` -- if not ``None`` change ambient dimension
accordingly.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(ZZ,3).change_ring(QQ)
Polyhedra in QQ^3
sage: Polyhedra(ZZ,3).an_element().change_ring(QQ)
A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 4 vertices
sage: Polyhedra(RDF, 3).change_ring(QQ).backend()
'cdd'
sage: Polyhedra(QQ, 3).change_ring(ZZ, ambient_dim=4)
Polyhedra in ZZ^4
sage: Polyhedra(QQ, 3, backend='cdd').change_ring(QQ, ambient_dim=4).backend()
'cdd'
"""
if ambient_dim is None:
ambient_dim = self.ambient_dim()
if base_ring == self.base_ring() and \
ambient_dim == self.ambient_dim() and \
(backend is None or backend == self.backend()):
return self
# if not specified try the same backend
if backend is None and does_backend_handle_base_ring(base_ring, self.backend()):
return Polyhedra(base_ring, ambient_dim, backend=self.backend())
return Polyhedra(base_ring, ambient_dim, backend=backend)
def _coerce_base_ring(self, other):
r"""
Return the common base ring for both ``self`` and ``other``.
This method is not part of the coercion framework, but only a
convenience function for :class:`Polyhedra_base`.
INPUT:
- ``other`` -- must be either:
* another ``Polyhedron`` object
* `\ZZ`, `\QQ`, `RDF`, or a ring that can be coerced into them.
* a constant that can be coerced to `\ZZ`, `\QQ`, or `RDF`.
OUTPUT:
Either `\ZZ`, `\QQ`, or `RDF`. Raises ``TypeError`` if
``other`` is not a suitable input.
.. NOTE::
"Real" numbers in sage are not necessarily elements of
`RDF`. For example, the literal `1.0` is not.
EXAMPLES::
sage: triangle_QQ = Polyhedron(vertices = [[1,0],[0,1],[1,1]], base_ring=QQ).parent()
sage: triangle_RDF = Polyhedron(vertices = [[1,0],[0,1],[1,1]], base_ring=RDF).parent()
sage: triangle_QQ._coerce_base_ring(QQ)
Rational Field
sage: triangle_QQ._coerce_base_ring(triangle_RDF)
Real Double Field
sage: triangle_RDF._coerce_base_ring(triangle_QQ)
Real Double Field
sage: triangle_QQ._coerce_base_ring(RDF)
Real Double Field
sage: triangle_QQ._coerce_base_ring(ZZ)
Rational Field
sage: triangle_QQ._coerce_base_ring(1/2)
Rational Field
sage: triangle_QQ._coerce_base_ring(0.5)
Real Double Field
TESTS:
Test that :trac:`28770` is fixed::
sage: z = QQ['z'].0
sage: K = NumberField(z^2 - 2,'s')
sage: triangle_QQ._coerce_base_ring(K)
Number Field in s with defining polynomial z^2 - 2
sage: triangle_QQ._coerce_base_ring(K.gen())
Number Field in s with defining polynomial z^2 - 2
sage: z = QQ['z'].0
sage: K = NumberField(z^2 - 2,'s')
sage: K.gen()*polytopes.simplex(backend='field')
A 3-dimensional polyhedron in (Number Field in s with defining polynomial z^2 - 2)^4 defined as the convex hull of 4 vertices
"""
from sage.structure.element import Element
if isinstance(other, Element):
other = other.parent()
if hasattr(other, "is_ring") and other.is_ring():
other_ring = other
else:
try:
other_ring = other.base_ring()
except AttributeError:
try:
# other is a constant?
other_ring = other.parent()
except AttributeError:
other_ring = None
for ring in (ZZ, QQ, RDF):
try:
ring.coerce(other)
other_ring = ring
break
except TypeError:
pass
if other_ring is None:
raise TypeError('Could not coerce '+str(other)+' into ZZ, QQ, or RDF.')
if not other_ring.is_exact():
other_ring = RDF # the only supported floating-point numbers for now
cm_map, cm_ring = get_coercion_model().analyse(self.base_ring(), other_ring)
if cm_ring is None:
raise TypeError('Could not coerce type '+str(other)+' into ZZ, QQ, or RDF.')
return cm_ring
def _coerce_map_from_(self, X):
r"""
Return whether there is a coercion from ``X``
INPUT:
- ``X`` -- anything.
OUTPUT:
Boolean.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: Polyhedra(QQ,3).has_coerce_map_from( Polyhedra(ZZ,3) ) # indirect doctest
True
sage: Polyhedra(ZZ,3).has_coerce_map_from( Polyhedra(QQ,3) )
False
"""
if not isinstance(X, Polyhedra_base):
return False
if self.ambient_dim() != X.ambient_dim():
return False
return self.base_ring().has_coerce_map_from(X.base_ring())
def _get_action_(self, other, op, self_is_left):
"""
Register actions with the coercion model.
The monoid actions are Minkowski sum and Cartesian product. In
addition, we want multiplication by a scalar to be dilation
and addition by a vector to be translation. This is
implemented as an action in the coercion model.
INPUT:
- ``other`` -- a scalar or a vector.
- ``op`` -- the operator.
- ``self_is_left`` -- boolean. Whether ``self`` is on the left
of the operator.
OUTPUT:
An action that is used by the coercion model.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: PZZ2 = Polyhedra(ZZ, 2)
sage: PZZ2.get_action(ZZ) # indirect doctest
Right action by Integer Ring on Polyhedra in ZZ^2
sage: PZZ2.get_action(QQ)
Right action by Rational Field on Polyhedra in QQ^2
with precomposition on left by Coercion map:
From: Polyhedra in ZZ^2
To: Polyhedra in QQ^2
with precomposition on right by Identity endomorphism of Rational Field
sage: PQQ2 = Polyhedra(QQ, 2)
sage: PQQ2.get_action(ZZ)
Right action by Integer Ring on Polyhedra in QQ^2
sage: PQQ2.get_action(QQ)
Right action by Rational Field on Polyhedra in QQ^2
sage: Polyhedra(ZZ,2).an_element() * 2
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: Polyhedra(ZZ,2).an_element() * (2/3)
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 3 vertices
sage: Polyhedra(QQ,2).an_element() * 2
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 3 vertices
sage: Polyhedra(QQ,2).an_element() * (2/3)
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 3 vertices
sage: 2 * Polyhedra(ZZ,2).an_element()
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: (2/3) * Polyhedra(ZZ,2).an_element()
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 3 vertices
sage: 2 * Polyhedra(QQ,2).an_element()
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 3 vertices
sage: (2/3) * Polyhedra(QQ,2).an_element()
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 3 vertices
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: PZZ2.get_action(ZZ^2, op=operator.add)
Right action by Ambient free module of rank 2 over the principal ideal domain Integer Ring on Polyhedra in ZZ^2
with precomposition on left by Identity endomorphism of Polyhedra in ZZ^2
with precomposition on right by Generic endomorphism of Ambient free module of rank 2 over the principal ideal domain Integer Ring
"""
import operator
from sage.structure.coerce_actions import ActedUponAction
from sage.categories.action import PrecomposedAction
if op is operator.add and is_FreeModule(other):
base_ring = self._coerce_base_ring(other)
extended_self = self.base_extend(base_ring)
extended_other = other.base_extend(base_ring)
action = ActedUponAction(extended_other, extended_self, not self_is_left)
if self_is_left:
action = PrecomposedAction(action,
extended_self._internal_coerce_map_from(self).__copy__(),
extended_other._internal_coerce_map_from(other).__copy__())
else:
action = PrecomposedAction(action,
extended_other._internal_coerce_map_from(other).__copy__(),
extended_self._internal_coerce_map_from(self).__copy__())
return action
if op is operator.mul and isinstance(other, CommutativeRing):
ring = self._coerce_base_ring(other)
if ring is self.base_ring():
return ActedUponAction(other, self, not self_is_left)
extended = self.base_extend(ring)
action = ActedUponAction(ring, extended, not self_is_left)
if self_is_left:
action = PrecomposedAction(action,
extended._internal_coerce_map_from(self).__copy__(),
ring._internal_coerce_map_from(other).__copy__())
else:
action = PrecomposedAction(action,
ring._internal_coerce_map_from(other).__copy__(),
extended._internal_coerce_map_from(self).__copy__())
return action
def _make_Inequality(self, polyhedron, data):
"""
Create a new inequality object.
INPUT:
- ``polyhedron`` -- the new polyhedron.
- ``data`` -- the H-representation data.
OUTPUT:
A new :class:`~sage.geometry.polyhedron.representation.Inequality` object.
EXAMPLES::
sage: p = Polyhedron([(1,2,3),(2/3,3/4,4/5)]) # indirect doctest
sage: next(p.inequality_generator())
An inequality (0, 0, -1) x + 3 >= 0
"""
try:
obj = self._Inequality_pool.pop()
except IndexError:
obj = Inequality(self)
obj._set_data(polyhedron, data)
return obj
def _make_Equation(self, polyhedron, data):
"""
Create a new equation object.
INPUT:
- ``polyhedron`` -- the new polyhedron.
- ``data`` -- the H-representation data.
OUTPUT:
A new :class:`~sage.geometry.polyhedron.representation.Equation` object.
EXAMPLES::
sage: p = Polyhedron([(1,2,3),(2/3,3/4,4/5)]) # indirect doctest
sage: next(p.equation_generator())
An equation (0, 44, -25) x - 13 == 0
"""
try:
obj = self._Equation_pool.pop()
except IndexError:
obj = Equation(self)
obj._set_data(polyhedron, data)
return obj
def _make_Vertex(self, polyhedron, data):
"""
Create a new vertex object.
INPUT:
- ``polyhedron`` -- the new polyhedron.
- ``data`` -- the V-representation data.
OUTPUT:
A new :class:`~sage.geometry.polyhedron.representation.Vertex` object.
EXAMPLES::
sage: p = Polyhedron([(1,2,3),(2/3,3/4,4/5)], rays=[(5/6,6/7,7/8)]) # indirect doctest
sage: next(p.vertex_generator())
A vertex at (1, 2, 3)
"""
try:
obj = self._Vertex_pool.pop()
except IndexError:
obj = Vertex(self)
obj._set_data(polyhedron, data)
return obj
def _make_Ray(self, polyhedron, data):
"""
Create a new ray object.
INPUT:
- ``polyhedron`` -- the new polyhedron.
- ``data`` -- the V-representation data.
OUTPUT:
A new :class:`~sage.geometry.polyhedron.representation.Ray` object.
EXAMPLES::
sage: p = Polyhedron([(1,2,3),(2/3,3/4,4/5)], rays=[(5/6,6/7,7/8)]) # indirect doctest
sage: next(p.ray_generator())
A ray in the direction (140, 144, 147)
"""
try:
obj = self._Ray_pool.pop()
except IndexError:
obj = Ray(self)
obj._set_data(polyhedron, data)
return obj
def _make_Line(self, polyhedron, data):
"""
Create a new line object.
INPUT:
- ``polyhedron`` -- the new polyhedron.
- ``data`` -- the V-representation data.
OUTPUT:
A new :class:`~sage.geometry.polyhedron.representation.Line` object.
EXAMPLES::
sage: p = Polyhedron([(1,2,3),(2/3,3/4,4/5)], lines=[(5/6,6/7,7/8)]) # indirect doctest
sage: next(p.line_generator())
A line in the direction (140, 144, 147)
"""
try:
obj = self._Line_pool.pop()
except IndexError:
obj = Line(self)
obj._set_data(polyhedron, data)
return obj
from sage.geometry.polyhedron.backend_cdd import Polyhedron_QQ_cdd
lazy_import('sage.geometry.polyhedron.backend_cdd_rdf', 'Polyhedron_RDF_cdd')
from sage.geometry.polyhedron.backend_ppl import Polyhedron_ZZ_ppl, Polyhedron_QQ_ppl
from sage.geometry.polyhedron.backend_normaliz import Polyhedron_normaliz, Polyhedron_ZZ_normaliz, Polyhedron_QQ_normaliz
from sage.geometry.polyhedron.backend_polymake import Polyhedron_polymake
from sage.geometry.polyhedron.backend_field import Polyhedron_field
class Polyhedra_ZZ_ppl(Polyhedra_base):
Element = Polyhedron_ZZ_ppl
def _element_constructor_polyhedron(self, polyhedron, **kwds):
"""
The element (polyhedron) constructor for the case of 1 argument, a polyhedron.
Set up with the ``ppl_polyhedron`` of ``self``, if available.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: P = Polyhedra(ZZ, 3)
sage: p = Polyhedron(vertices=[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)], base_ring=QQ)
sage: p
A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 4 vertices
sage: P(p)
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 4 vertices
sage: p = Polyhedron(vertices=[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)], backend='cdd')
sage: P(p)
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 4 vertices
"""
from copy import copy
if polyhedron.backend() == "ppl":
return self._element_constructor_(None, None, ppl_polyhedron=copy(polyhedron._ppl_polyhedron), **kwds)
else:
return Polyhedra_base._element_constructor_polyhedron(self, polyhedron, **kwds)
class Polyhedra_ZZ_normaliz(Polyhedra_base):
Element = Polyhedron_ZZ_normaliz
class Polyhedra_QQ_ppl(Polyhedra_base):
Element = Polyhedron_QQ_ppl
def _element_constructor_polyhedron(self, polyhedron, **kwds):
"""
The element (polyhedron) constructor for the case of 1 argument, a polyhedron.
Set up with the ``ppl_polyhedron`` of ``self``, if available.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import Polyhedra
sage: P = Polyhedra(QQ, 3)
sage: p = Polyhedron(vertices=[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)])
sage: p
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 4 vertices
sage: P(p)
A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 4 vertices
sage: p = Polyhedron(vertices=[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)], backend='cdd')
sage: P(p)
A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 4 vertices
"""
from copy import copy
if polyhedron.backend() == "ppl":
return self._element_constructor_(None, None, ppl_polyhedron=copy(polyhedron._ppl_polyhedron), **kwds)
else:
return Polyhedra_base._element_constructor_polyhedron(self, polyhedron, **kwds)
class Polyhedra_QQ_normaliz(Polyhedra_base):
Element = Polyhedron_QQ_normaliz
class Polyhedra_QQ_cdd(Polyhedra_base):
Element = Polyhedron_QQ_cdd
class Polyhedra_RDF_cdd(Polyhedra_base):
Element = Polyhedron_RDF_cdd
class Polyhedra_normaliz(Polyhedra_base):
Element = Polyhedron_normaliz
class Polyhedra_polymake(Polyhedra_base):
Element = Polyhedron_polymake
class Polyhedra_field(Polyhedra_base):
Element = Polyhedron_field
@cached_function
def does_backend_handle_base_ring(base_ring, backend):
r"""
Return true, if ``backend`` can handle ``base_ring``.
EXAMPLES::
sage: from sage.geometry.polyhedron.parent import does_backend_handle_base_ring
sage: does_backend_handle_base_ring(QQ, 'ppl')
True
sage: does_backend_handle_base_ring(QQ[sqrt(5)], 'ppl')
False
sage: does_backend_handle_base_ring(QQ[sqrt(5)], 'field')
True
"""
try:
Polyhedra(base_ring, 0, backend)
except ValueError:
return False
return True
| 37.328789 | 142 | 0.588154 |
9c63375754887301035959316ae62cf9223a55c4 | 12,241 | py | Python | tests/test_stac.py | stactools-packages/stactools-goes | 8d81c9644c02d3c79ac369228d659d065e0e6889 | [
"Apache-2.0"
] | 5 | 2021-07-06T21:48:11.000Z | 2021-11-09T15:57:45.000Z | tests/test_stac.py | stactools-packages/stactools-goes | 8d81c9644c02d3c79ac369228d659d065e0e6889 | [
"Apache-2.0"
] | 5 | 2021-06-15T16:16:51.000Z | 2021-09-07T14:44:57.000Z | tests/test_stac.py | stactools-packages/stactools-goes | 8d81c9644c02d3c79ac369228d659d065e0e6889 | [
"Apache-2.0"
] | null | null | null | import dataclasses
from datetime import datetime, timezone
from tests.test_mpc import MicrosoftPCData
from typing import Any, Callable, List
import math
import os.path
from tempfile import TemporaryDirectory
import unittest
from shapely.geometry import shape, box
import planetary_computer
from pystac import MediaType
from pystac.extensions.projection import ProjectionExtension
from pystac.extensions.eo import EOExtension
from stactools.goes import stac, __version__
from stactools.goes.errors import GOESRProductHrefsError, GOESMissingExtentError
from stactools.goes.stac import ProductHrefs
from stactools.goes.enums import ProductAcronym
from stactools.goes.file_name import ABIL2FileName
from tests import (EXTERNAL_DATA, INVALID_LAT_LNG, PC_FDC_C, PC_LST_M,
PC_MCMIP_C, PC_MCMIP_F, PC_MCMIP_F_17, test_data,
CMIP_FILE_NAME, CMIP_FULL_FILE_NAME, MCMIP_FILE_NAME)
US_CENTER = shape({
"type":
"Polygon",
"coordinates": [[
[-101.25, 34.59704151614417],
[-88.24218749999999, 34.59704151614417],
[-88.24218749999999, 41.244772343082076],
[-101.25, 41.244772343082076],
[-101.25, 34.59704151614417],
]],
})
class CreateItemFromHrefTest(unittest.TestCase):
def test_create_item(self):
path = test_data.get_external_data(CMIP_FILE_NAME)
item = stac.create_item_from_href(path)
self.assertEqual(item.id, "OR_ABI-L2-M1-M6_G16_s20211231619248")
self.assertTrue(item.geometry)
self.assertTrue(item.bbox)
self.assertEqual(
item.datetime,
datetime(2021, 5, 3, 16, 19, 24, 800000, timezone.utc))
self.assertEqual(item.common_metadata.platform, "GOES-16")
self.assertEqual(item.common_metadata.instruments, ["ABI"])
self.assertTrue(
"https://stac-extensions.github.io/processing/v1.0.0/schema.json"
in item.stac_extensions)
self.assertDictEqual(item.properties["processing:software"],
{"stactools-goes": __version__})
self.assertEqual(item.properties["goes:image-type"], "MESOSCALE")
self.assertEqual(item.properties["goes:mode"], "6")
self.assertEqual(item.properties["goes:mesoscale-image-number"], 1)
data = item.assets["CMIP_C02-nc"]
self.assertEqual(data.href, path)
self.assertEqual(
data.title,
"Cloud and Moisture Imagery reflectance factor - Band 02")
self.assertEqual(data.media_type, "application/netcdf")
self.assertEqual(data.roles, ["data"])
projection = ProjectionExtension.ext(item)
self.assertIsNone(projection.epsg)
self.assertIsNotNone(projection.wkt2)
self.assertIsNotNone(projection.shape, [2000, 2000])
expected_transform = [
501.0043288718852, 0.0, -2224459.203445637, 0.0,
-501.0043288718852, 4068155.14931683, 0.0, 0.0, 1.0
]
for actual, expected in zip(projection.transform, expected_transform):
self.assertAlmostEqual(actual, expected, delta=1e-4)
item.validate()
def test_read_href_modifier(self):
did_it = False
def modify_href(href: str) -> str:
nonlocal did_it
did_it = True
return href
path = test_data.get_external_data(CMIP_FILE_NAME)
_ = stac.create_item_from_href(path, modify_href)
self.assertTrue(did_it)
def test_backoff_fn(self):
did_it = False
def with_backoff(fn: Callable[[], Any]) -> Any:
nonlocal did_it
did_it = True
return fn()
path = test_data.get_external_data(CMIP_FILE_NAME)
_ = stac.create_item_from_href(path, backoff_func=with_backoff)
self.assertTrue(did_it)
def test_cog_directory(self):
path = test_data.get_external_data(CMIP_FILE_NAME)
with TemporaryDirectory() as tmp_dir:
item = stac.create_item_from_href(path, cog_directory=tmp_dir)
cog_asset = item.assets["CMIP_C02"]
self.assertTrue(os.path.exists(cog_asset.href))
self.assertEqual(
cog_asset.title,
"Cloud and Moisture Imagery reflectance factor - Band 02")
self.assertEqual(cog_asset.roles, ["data"])
self.assertEqual(cog_asset.media_type, MediaType.COG)
def test_different_product(self):
path = test_data.get_path(
"data-files/"
"OR_ABI-L2-LSTM2-M6_G16_s20211381700538_e20211381700595_c20211381701211.nc"
)
item = stac.create_item_from_href(path)
self.assertEqual(item.properties["goes:mesoscale-image-number"], 2)
item.validate()
def test_full_product_geometry(self):
for path in [
test_data.get_external_data(CMIP_FULL_FILE_NAME),
test_data.get_external_data(PC_MCMIP_F_17)
]:
with self.subTest(path):
item = stac.create_item_from_href(path)
self.assertNotIn("goes:mesoscale-image-number",
item.properties)
self.assertEqual(item.properties.get("goes:image-type"),
"FULL DISK")
geometry = shape(item.geometry)
self.assertTrue(geometry.is_valid)
# https://github.com/stactools-packages/goes/issues/4
self.assertFalse(math.isnan(geometry.area),
f"This geometry has a NaN area: {geometry}")
# Test is in a sane location
bbox = box(*item.bbox)
self.assertTrue(bbox.covers(geometry))
self.assertTrue(geometry.contains(US_CENTER))
def test_conus_product_geometry(self):
path = test_data.get_external_data(PC_MCMIP_C)
item = stac.create_item_from_href(path)
self.assertNotIn("goes:mesoscale-image-number", item.properties)
self.assertEqual(item.properties.get("goes:image-type"), "CONUS")
geometry = shape(item.geometry)
self.assertTrue(geometry.is_valid)
self.assertFalse(math.isnan(geometry.area),
f"This geometry has a NaN area: {geometry}")
def test_mcmip_eo(self):
path = test_data.get_external_data(MCMIP_FILE_NAME)
with TemporaryDirectory() as tmp_dir:
item = stac.create_item_from_href(path, cog_directory=tmp_dir)
data = item.assets["MCMIP-nc"]
eo = EOExtension.ext(data)
assert eo.bands
self.assertEqual(len(eo.bands), 16)
for band in eo.bands:
self.assertIsNotNone(band.name)
self.assertIsNotNone(band.center_wavelength)
for channel in range(1, 17):
cmi = item.assets[f"CMI_C{channel:0>2d}_2km"]
eo = EOExtension.ext(cmi)
assert eo.bands
self.assertEqual(len(eo.bands), 1)
self.assertEqual(eo.bands[0].name, f"ABI Band {channel}")
dqf = item.assets[f"CMI_C{channel:0>2d}_DQF_2km"]
eo = EOExtension.ext(dqf)
self.assertIsNone(eo.bands)
def test_fdc(self):
path = test_data.get_external_data(PC_FDC_C)
with TemporaryDirectory() as tmp_dir:
item = stac.create_item_from_href(path, cog_directory=tmp_dir)
self.assertEqual(item.properties.get("goes:image-type"), "CONUS")
# All assets have the same shape, so none should have projection info
self.assertIn("proj:shape", item.properties)
for asset in item.assets.values():
self.assertNotIn("proj:shape", asset.extra_fields)
# Assert geometry is valid
g = shape(item.geometry)
self.assertTrue(g.is_valid)
def test_lst_m(self):
path = test_data.get_external_data(PC_LST_M)
with TemporaryDirectory() as tmp_dir:
item = stac.create_item_from_href(path, cog_directory=tmp_dir)
self.assertEqual(item.properties.get("goes:image-type"),
"MESOSCALE")
# All assets have the same shape, so none should have projection info
self.assertIn("proj:shape", item.properties)
for asset in item.assets.values():
self.assertNotIn("proj:shape", asset.extra_fields)
# Assert geometry is valid
g = shape(item.geometry)
self.assertTrue(g.is_valid)
def test_invalid_lat_lng(self):
path = test_data.get_external_data(INVALID_LAT_LNG)
with self.assertRaises(GOESMissingExtentError):
stac.create_item_from_href(path)
class CreateItemTest(unittest.TestCase):
def test_validate_product_hrefs(self):
product_hrefs: List[ProductHrefs] = []
mcmip_path = test_data.get_external_data(PC_MCMIP_F)
mcmip_file_name = ABIL2FileName.from_href(mcmip_path)
product_hrefs.append(ProductHrefs(nc_href=mcmip_path, cog_hrefs=None))
cmip_name_different_start_date = dataclasses.replace(
mcmip_file_name,
product=ProductAcronym.CMIP,
channel=1,
start_time="20180100500416")
cmip_path = os.path.join(os.path.dirname(mcmip_path),
cmip_name_different_start_date.to_str())
product_hrefs.append(ProductHrefs(nc_href=cmip_path, cog_hrefs=None))
with self.assertRaises(GOESRProductHrefsError):
_ = stac.create_item(product_hrefs)
def test_combined_item(self):
product_hrefs: List[ProductHrefs] = []
mcmip_href = EXTERNAL_DATA[PC_MCMIP_C]['url']
mpc_data = MicrosoftPCData(mcmip_href)
for product in [ProductAcronym.MCMIP, ProductAcronym.FDC]:
# Use local path for main netCDF file
nc_href = mpc_data.get_nc_href(product)
if nc_href == mcmip_href:
nc_href = test_data.get_external_data(PC_MCMIP_C)
product_hrefs.append(
ProductHrefs(nc_href=nc_href,
cog_hrefs=mpc_data.get_cog_hrefs(product)))
for channel in range(1, 17):
product_hrefs.append(
ProductHrefs(
nc_href=mpc_data.get_nc_href(ProductAcronym.CMIP, channel),
cog_hrefs=mpc_data.get_cog_hrefs(ProductAcronym.CMIP,
channel)))
item = stac.create_item(product_hrefs,
read_href_modifier=planetary_computer.sign)
item.validate()
# Ensure all expected assets are there
expected_assets = set(["MCMIP-nc"])
for band_idx in range(1, 17):
expected_assets.add(f"CMIP_C{band_idx:0>2d}-nc")
expected_assets.add(f"CMI_C{band_idx:0>2d}_2km")
expected_assets.add(f"CMI_C{band_idx:0>2d}_DQF_2km")
if band_idx in [1, 3, 5]:
expected_assets.add(f"CMI_C{band_idx:0>2d}_1km")
expected_assets.add(f"CMI_C{band_idx:0>2d}_DQF_1km")
if band_idx == 2:
expected_assets.add(f"CMI_C{band_idx:0>2d}_0.5km")
expected_assets.add(f"CMI_C{band_idx:0>2d}_DQF_0.5km")
expected_assets.add("FDC-nc")
expected_assets.add("FDC_Mask")
expected_assets.add("FDC_Temp")
expected_assets.add("FDC_Area")
expected_assets.add("FDC_Power")
expected_assets.add("FDC_DQF")
self.assertEqual(set(item.assets.keys()), expected_assets)
# Validate some properties
# CMIP COG assets with higher resolution should have a different
# transform and shape than the one pulled from MCMIP.
c2_full_res = item.assets['CMI_C02_0.5km']
c5_2km = item.assets['CMI_C05_2km']
self.assertNotEqual(
ProjectionExtension.ext(c2_full_res).shape,
ProjectionExtension.ext(c5_2km).shape)
# Ensure that the shape isn't set on the asset for assets that should match the item
self.assertNotIn('proj:shape', c5_2km.extra_fields)
| 40.39934 | 92 | 0.632546 |
1b2d126675abff61344646cc5d62bbd7fd55fb6d | 8,921 | py | Python | functions/JD_multitaper.py | isgiddy/giddy-2020-roammiz | ffc710acee5342f64e8cb378e97114648c811f36 | [
"CC0-1.0"
] | 4 | 2020-09-21T11:27:48.000Z | 2022-02-21T11:39:08.000Z | functions/JD_multitaper.py | isgiddy/giddy-2020-roammiz | ffc710acee5342f64e8cb378e97114648c811f36 | [
"CC0-1.0"
] | 1 | 2021-01-04T16:16:45.000Z | 2021-01-04T16:16:45.000Z | functions/JD_multitaper.py | isgiddy/giddy-2020-roammiz | ffc710acee5342f64e8cb378e97114648c811f36 | [
"CC0-1.0"
] | null | null | null | from scipy.io.matlab import mio
import numpy as np
import spectrum as sp
from scipy import stats
def pmtmPH(x,dt=1.,nw=3,nfft=None):
"""
function [P,s,ci] = pmtmPH(x,dt,nw,qplot,nfft);
Computes the power spectrum using the multi-taper method with adaptive weighting.
Inputs:
x - Input data vector.
dt - Sampling interval, default is 1.
nw - Time bandwidth product, acceptable values are
0:.5:length(x)/2-1, default is 3. 2*nw-1 dpss tapers
are applied except if nw=0 a boxcar window is applied
and if nw=.5 (or 1) a single dpss taper is applied.
qplot - Generate a plot: 1 for yes, else no.
nfft - Number of frequencies to evaluate P at, default is
length(x) for the two-sided transform.
Outputs:
P - Power spectrum computed via the multi-taper method.
s - Frequency vector.
ci - 95% confidence intervals. Note that both the degrees of freedom
calculated by pmtm.m and chi2conf.m, which pmtm.m calls, are
incorrect. Here a quick approximation method is used to
determine the chi-squared 95% confidence limits for v degrees
of freedom. The degrees of freedom are close to but no larger
than (2*nw-1)*2; if the degrees of freedom are greater than
roughly 30, the chi-squared distribution is close to Gaussian.
The vertical ticks at the top of the plot indicate the size of
the full band-width. The distance between ticks would remain
fixed in a linear plot. For an accurate spectral estimate,
the true spectra should not vary abruptly on scales less than
the full-bandwidth.
Other toolbox functions called: dpps.m; and if nfft does not equal length(x) , cz.m
Peter Huybers
MIT, 2003
phuybers@mit.edu
Adapted from Matlab to Python by Nicolas Barrier"""
if nfft is None:
nfft=len(x)
nx=len(x)
k=np.min([np.round(2*nw),nx])
k=np.max([k-1,1])
s=np.arange(0,1/dt,1/(nfft*dt));
w=nw/(dt*nx) # half-bandwidth of the dpss
E,V=sp.dpss(nx,NW=nw,k=k)
if nx<=nfft:
tempx=np.transpose(np.tile(x,(k,1)))
Pk=np.abs(np.fft.fft(E*tempx,n=nfft,axis=0))**2
else:
raise IOError('Not implemented yet')
#Iteration to determine adaptive weights:
if k>1:
xmat=np.mat(x).T
sig2 = xmat.T*xmat/nx; # power
P = (Pk[:,0]+Pk[:,1])/2.; # initial spectrum estimate
Ptemp= np.zeros(nfft);
P1 = np.zeros(nfft);
tol = .0005*sig2/nfft;
a = sig2*(1-V);
while np.sum(np.abs(P-P1)/nfft)>tol:
Pmat=np.mat(P).T
Vmat=np.mat(V)
amat=np.mat(a)
temp1=np.mat(np.ones((1,k)))
temp2=np.mat(np.ones((nfft,1)))
b=(Pmat*temp1)/(Pmat*Vmat+temp2*amat); # weights
temp3=np.mat(np.ones((nfft,1)))*Vmat
temp3=np.array(temp3)
b=np.array(b)
wk=b**2*temp3
P1=np.sum(wk*Pk,axis=1)/np.sum(wk,axis=1)
Ptemp=P1; P1=P; P=Ptemp; # swap P and P1
#b2=b**2
#temp1=np.mat(np.ones((nfft,1)))*V
temp1=b**2
temp2=np.mat(np.ones((nfft,1)))*Vmat
num=2*np.sum(temp1*np.array(temp2),axis=1)**2
temp1=b**4
temp2=np.mat(np.ones((nfft,1)))*np.mat(V**2)
den=np.sum(temp1*np.array(temp2),axis=1)
v=num/den
select=np.arange(0,(nfft+1)/2+1).astype(np.int64)
P=P[select]
s=s[select]
v=v[select]
temp1=1/(1-2/(9*v)-1.96*np.sqrt(2./(9*v)))**3
temp2=1/(1-2/(9*v)+1.96*np.sqrt(2/(9*v)))**3
ci=np.array([temp1,temp2])
return P,s,ci
def JD_spectra(ts,dt,ax,f,nrj=0,nw=3,unit='unit',col='k'):
"""
% calculates multitaper spectra using pmtmPH.m
% Author: J. Deshayes, CNRS IRD, March 2013
Arguments:
ts=time series whose spectrum to plot.
dt=time step of ts in seconds
ax=the axes in which the drawing has to be done
f=the frequency where to do draw the errorbar
Optional arguments:
nrj=0 for a variance spectrum, 1 for an energy spectrum (F*Px)
nw=3 (multitaper used 2*nw-1 tapers)
unit='unit': label of the yaxis
col='k': color of the line
Returns:
Px=the vector of spectrum
F=the vector of frequency
PxC=the vector of error bar
Adapted from Matlab to Python by Nicolas Barrier"""
T=len(ts)
ts=ts-np.mean(ts)
[Px,F,Pxc]=pmtmPH(ts,nw=nw);
F=F*365*86400/dt; # to get the result in cpy
Px=Px/(365*86400/dt); # to get the result in cpy^{-1}
Pxc=Pxc/(365*86400/dt); # to get the result in cpy^{-1}
barmax=Pxc[0,0]*1e2;
barmin=Pxc[1,0]*1e2;
if nrj==0:
hh=ax.loglog(F,Px,color=col);
ax.set_ylabel('power spectrum density ('+ unit+ '$^2$ cpy$^{-1}$)');
ax.loglog([f, f],[barmin, barmax],color=col,marker='_');
else:
hh=ax.loglog(F,F*Px,color=col);
ax.set_ylabel('Energy power spectrum F*Px ('+ unit +'$^2$)');
ax.set_xlabel('frequency (cpy)')
ax.grid(True,which='both',axis='x')
ax.grid(True,which='major',axis='y')
return Px,F,Pxc
def JD_space_spectra(ts,dl,ax,f,nw=3,unit='unit',col='k'):
"""
% calculates multitaper spectra using pmtmPH.m
% Author: J. Deshayes, CNRS IRD, March 2013
Arguments:
ts=time series whose spectrum to plot.
dl=spatial step of ts in km
ax=the axes in which the drawing has to be done
f=the frequency where to do draw the errorbar
Optional arguments:
nw=3 (multitaper used 2*nw-1 tapers)
unit='unit': label of the yaxis
col='k': color of the line
Returns:
Px=the vector of spectrum
F=the vector of frequency
PxC=the vector of error bar
Adapted from Matlab to Python by Nicolas Barrier"""
T=len(ts)
ts=ts-np.mean(ts)
[Px,F,Pxc]=pmtmPH(ts,nw=nw);
F=F/dl; # to get the result in cpy
Px=Px*dl; # to get the result in cpy^{-1}
Pxc=Pxc*dl; # to get the result in cpy^{-1}
barmax=Pxc[0,0]*1e2;
barmin=Pxc[1,0]*1e2;
hh=ax.loglog(F,Px,color=col);
ax.set_ylabel('power spectrum density ('+ unit+ ' cpkm$^{-1}$)');
ax.loglog([f, f],[barmin, barmax],color=col,marker='_');
ax.set_xlabel('wavenumber (cpkm)')
ax.grid(True,which='both',axis='x')
ax.grid(True,which='major',axis='y')
return Px,F,Pxc,hh
def plot_slope(Px,F,ax,fmin=None,fmax=None,col='k',lin='--',lw=2,offy=1):
""" This function draws the slope of the spectrum
Arguments:
Px=the vector of spectrum
F=the vector of frequency
ax=the axes in which the slopes are plotted
Optional arguments:
fmin=None (if fmin=None, takes the min of F): frequency minimum where the slope is computed
fmax=None (if fmax=None, takes the max of F): frequency maximum where the slope is computed
col='k': color of the slope
lin='--': linestyle of the slope
lw=2: linewidth of the slope
offy=1: y offset of the slope
Outputs:
The value of the slope
Author: Nicolas Barrier
"""
if fmin==None:
fmin=F.min()
if fmax==None:
fmax=F.max()
i=np.nonzero((F>=fmin)&(F<=fmax)&(F!=0))[0]
fout=F[i]
pxout=Px[i]
y=np.log(pxout)
x=np.log(fout)
from scipy import stats
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
confidence_interval = 2.58*std_err #99%
p=np.polyfit(x,y,1)
trend=np.exp(p[0]*x+p[1]+offy)
ax.loglog(fout,trend,color=col,linestyle=lin,lw=lw)
# add regression slope and confidence interval
return p[0] , slope, confidence_interval
def plot_ref_slope(fmin,fmax,f,ax,kvec=[2],col='k',lw=2,ls='--'):
""" This function draws reference slopes (k=-2, k=-4 for instance)
Arguments:
fmin=frequency where to start the reference the slope
fmax=fmin=frequency where to end the reference the slope
f= y intercept of the slope
ax=the axes in which the slopes are plotted
Optional arguments:
kvec=[2]: list containing the reference values of k, whose slope to draw
col='k': colors of the slopes
lw=2: linewidths of the slopes
ls=--: linestyles of the slopes
Author: Nicolas Barrier
"""
x=np.linspace(fmin,fmax,5)
for p in range(0,len(kvec)):
k=kvec[p]
y=np.log(f)+k*(np.log(fmin)-np.log(x[:]))
yout=np.exp(y)
ax.loglog(x,yout,color=col,linewidth=lw,linestyle=ls)
ax.text(x[-1],yout[-1],' k = -'+str(k),ha='center',va='center',color=col,
bbox=dict(boxstyle="round", fc="0.9"))
| 32.322464 | 96 | 0.589844 |
fb57d4f3cf3c7da4868f811d972bbda46f3dcd2b | 4,266 | py | Python | getFCInterfaceStats.py | louisjia/dcnm-rest-sample | b3f589061eb33035337175e1fc527122c7cda174 | [
"BSD-3-Clause"
] | 7 | 2018-11-05T21:19:22.000Z | 2022-01-05T11:05:43.000Z | getFCInterfaceStats.py | louisjia/dcnm-rest-sample | b3f589061eb33035337175e1fc527122c7cda174 | [
"BSD-3-Clause"
] | 3 | 2018-08-31T21:49:42.000Z | 2020-08-08T00:47:06.000Z | getFCInterfaceStats.py | louisjia/dcnm-rest-sample | b3f589061eb33035337175e1fc527122c7cda174 | [
"BSD-3-Clause"
] | 9 | 2018-08-20T21:05:41.000Z | 2021-05-24T09:30:37.000Z | import http.client
import ssl
import base64
import string
import json
__author__ = "Louis Jia"
__copyright__ = "Copyright (C) 2018 Cisco System"
def getRestToken(username, password, serverip):
ssl._create_default_https_context = ssl._create_unverified_context
##replace server ip address here
conn = http.client.HTTPSConnection(serverip)
payload = "{\"expirationTime\" : 10000000000}\n"
## replace user name and password here
authenStr="%s:%s" % (username, password)
base64string = base64.encodestring(bytes(authenStr, 'utf-8'))
tmpstr= "Basic %s" % base64string
authorizationStr = tmpstr.replace("b\'","").replace("\\n\'","");
print(authorizationStr);
headers = {
'content-type': "application/json",
'authorization': authorizationStr,
'cache-control': "no-cache"
}
conn.request("POST", "/rest/logon", payload, headers)
res = conn.getresponse()
data = res.read()
longstr=data.decode("utf-8")
strArr=longstr.split("\"")
return strArr[3]
def getFabricId(serverip, switchip, resttoken):
ssl._create_default_https_context = ssl._create_unverified_context
conn = http.client.HTTPSConnection(serverip)
headers = {
'dcnm-token': resttoken,
'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
conn.request("GET", "/fm/fmrest/inventory/switches/?name=foo1&navId=-1", headers=headers)
res = conn.getresponse()
data = res.read()
jsonstr=data.decode("utf-8")
decoded = json.loads(jsonstr)
for x in decoded :
if ( x['ipAddress'] == switchip ) :
print(x['fid'])
return x['fid']
return -1
def getSwitchId(serverip, switchip, resttoken):
ssl._create_default_https_context = ssl._create_unverified_context
conn = http.client.HTTPSConnection(serverip)
headers = {
'dcnm-token': resttoken,
'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
conn.request("GET", "/fm/fmrest/inventory/switches/?name=foo1&navId=-1", headers=headers)
res = conn.getresponse()
data = res.read()
jsonstr=data.decode("utf-8")
decoded = json.loads(jsonstr)
for x in decoded :
if ( x['ipAddress'] == switchip ) :
print(x['switchDbID'])
return x['switchDbID']
return -1
def getSwitchIntfId(serverip, switchid, interface, resttoken):
ssl._create_default_https_context = ssl._create_unverified_context
conn = http.client.HTTPSConnection(serverip)
headers = {
'dcnm-token': resttoken,
'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
conn.request("GET", "/fm/fmrest/inventory/getInterfacesBySwitch/?switchDbID="+str(switchid)+"&network=SAN", headers=headers)
res = conn.getresponse()
data = res.read()
jsonstr=data.decode("utf-8")
decoded = json.loads(jsonstr)
for x in decoded :
if ( x['ifName'] == interface ) :
return x['endPortId']
return -1
def getInterfaceStats(serverip, fid, interface, resttoken):
ssl._create_default_https_context = ssl._create_unverified_context
conn = http.client.HTTPSConnection(serverip)
headers = {
'dcnm-token': resttoken,
'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
conn.request("GET", "/fm/fmrest/statistics/pmInterfaceChartData?interfaceDbId="+str(interface) + "&fid="+str(fid)+ "&interval=1&navId=-1", headers=headers)
res = conn.getresponse()
data = res.read()
jsonstr=data.decode("utf-8")
decoded = json.loads(jsonstr)
items=decoded['chartDO']
print("rx")
print(items['items'][0])
print("tx")
print(items['items'][1])
print("millisec")
print(items['xLabels'])
return
#serverip
server="172.10.10.10"
# DCNM username, password, DCNM server ip address
restToken=getRestToken("admin", "xxxxxxx", server)
print(restToken)
# DCNM server ip, switch ip, resetTotken
fid=getFabricId(server, "172.25.174.139",restToken)
print(fid)
# DCNM server ip, switch ip, resetTotken
switchid=getSwitchId(server, "172.25.174.139",restToken)
print(switchid)
intfid= getSwitchIntfId(server, switchid, "fc1/16", restToken)
print(intfid)
#serverip fabric-id interface-id restToken
getInterfaceStats(server,fid, intfid, restToken)
| 25.854545 | 157 | 0.691749 |
dc825e72aed877bc8105abf51201f204476dc0ab | 4,997 | py | Python | picore/src/pir2/display/DotDisplay.py | subhagho/r2d3.142 | 71f26b7da4ff2384ee1aa9727ea3d40cbcdf3718 | [
"Apache-2.0"
] | null | null | null | picore/src/pir2/display/DotDisplay.py | subhagho/r2d3.142 | 71f26b7da4ff2384ee1aa9727ea3d40cbcdf3718 | [
"Apache-2.0"
] | null | null | null | picore/src/pir2/display/DotDisplay.py | subhagho/r2d3.142 | 71f26b7da4ff2384ee1aa9727ea3d40cbcdf3718 | [
"Apache-2.0"
] | null | null | null | import pir2.common as common
import pir2.common.Logger as Logger
import pir2.display as display
import smbus
from time import sleep
# i2c bus (0 -- original Pi, 1 -- Rev 2 Pi)
I2CBUS = 1
# LCD Address
ADDRESS = 0x27
class DisplayI2CDot:
def __init__(self, addr, port=I2CBUS):
self.addr = addr
self.bus = smbus.SMBus(port)
# Write a single command
def write_cmd(self, cmd):
self.bus.write_byte(self.addr, cmd)
sleep(0.0001)
# Write a command and argument
def write_cmd_arg(self, cmd, data):
self.bus.write_byte_data(self.addr, cmd, data)
sleep(0.0001)
# Write a block of data
def write_block_data(self, cmd, data):
self.bus.write_block_data(self.addr, cmd, data)
sleep(0.0001)
# Read a single byte
def read(self):
return self.bus.read_byte(self.addr)
# Read
def read_data(self, cmd):
return self.bus.read_byte_data(self.addr, cmd)
# Read a block of data
def read_block_data(self, cmd):
return self.bus.read_block_data(self.addr, cmd)
# commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# flags for function set
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
# flags for backlight control
LCD_BACKLIGHT = 0x08
LCD_NOBACKLIGHT = 0x00
En = 0b00000100 # Enable bit
Rw = 0b00000010 # Read/Write bit
Rs = 0b00000001 # Register select bit
CONST_CONFIG_DISPLAY_DOT_ADDR = 'display.dot.address'
CONST_CONFIG_DISPLAY_DOT_PORT = 'display.dot.port'
class DisplayDotLcd:
# initializes objects and lcd
def __init__(self, addr=ADDRESS, port=I2CBUS):
config = common.get_config(display.CONST_CONFIG_SECTION_NAME_DISPLAY)
if config:
c = config.get_option(display.CONST_CONFIG_SECTION_NAME_DISPLAY, CONST_CONFIG_DISPLAY_DOT_ADDR)
if c:
addr = int(c)
c = config.get_option(display.CONST_CONFIG_SECTION_NAME_DISPLAY, CONST_CONFIG_DISPLAY_DOT_PORT)
if c:
port = int(c)
self.lcd_device = DisplayI2CDot(addr, port)
self.lcd_write(0x03)
self.lcd_write(0x03)
self.lcd_write(0x03)
self.lcd_write(0x02)
self.lcd_write(LCD_FUNCTIONSET | LCD_2LINE | LCD_5x8DOTS | LCD_4BITMODE)
self.lcd_write(LCD_DISPLAYCONTROL | LCD_DISPLAYON)
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_ENTRYMODESET | LCD_ENTRYLEFT)
sleep(0.2)
# clocks EN to latch command
def lcd_strobe(self, data):
self.lcd_device.write_cmd(data | En | LCD_BACKLIGHT)
sleep(.0005)
self.lcd_device.write_cmd(((data & ~En) | LCD_BACKLIGHT))
sleep(.0001)
def lcd_write_four_bits(self, data):
self.lcd_device.write_cmd(data | LCD_BACKLIGHT)
self.lcd_strobe(data)
# write a command to lcd
def lcd_write(self, cmd, mode=0):
self.lcd_write_four_bits(mode | (cmd & 0xF0))
self.lcd_write_four_bits(mode | ((cmd << 4) & 0xF0))
# write a character to lcd (or character rom) 0x09: backlight | RS=DR<
# works!
def lcd_write_char(self, charvalue, mode=1):
self.lcd_write_four_bits(mode | (charvalue & 0xF0))
self.lcd_write_four_bits(mode | ((charvalue << 4) & 0xF0))
# put string function with optional char positioning
def lcd_display_string(self, string, line=1, pos=0):
pos_new = 0
if line == 1:
pos_new = pos
elif line == 2:
pos_new = 0x40 + pos
elif line == 3:
pos_new = 0x14 + pos
elif line == 4:
pos_new = 0x54 + pos
self.lcd_write(0x80 + pos_new)
for char in string:
self.lcd_write(ord(char), Rs)
# clear lcd and set to home
def lcd_clear(self):
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_RETURNHOME)
# define backlight on/off (lcd.backlight(1); off= lcd.backlight(0)
def backlight(self, state): # for state, 1 = on, 0 = off
if state == 1:
self.lcd_device.write_cmd(LCD_BACKLIGHT)
elif state == 0:
self.lcd_device.write_cmd(LCD_NOBACKLIGHT)
# add custom characters (0 - 7)
def lcd_load_custom_chars(self, fontdata):
self.lcd_write(0x40);
for char in fontdata:
for line in char:
self.lcd_write_char(line)
| 27.607735 | 107 | 0.663798 |
8633a5b474c9c3cc2ae2149506f5cb1f70ab1d91 | 10,284 | py | Python | whatshap/merge.py | panguangze/whatshap | ab398340d6187b427fb067dd98eb157d779e9820 | [
"MIT"
] | null | null | null | whatshap/merge.py | panguangze/whatshap | ab398340d6187b427fb067dd98eb157d779e9820 | [
"MIT"
] | null | null | null | whatshap/merge.py | panguangze/whatshap | ab398340d6187b427fb067dd98eb157d779e9820 | [
"MIT"
] | null | null | null | import logging
from abc import ABC, abstractmethod
from typing import Dict
from math import log
from networkx import (
Graph,
number_of_nodes,
number_of_edges,
connected_components,
node_connected_component,
shortest_path,
)
from whatshap.core import Read, ReadSet
logger = logging.getLogger(__name__)
class ReadMergerBase(ABC):
@abstractmethod
def merge(self, readset: ReadSet) -> ReadSet:
pass
class ReadMerger(ReadMergerBase):
def __init__(self, error_rate, max_error_rate, positive_threshold, negative_threshold):
self._error_rate = error_rate
self._max_error_rate = max_error_rate
self._positive_threshold = positive_threshold
self._negative_threshold = negative_threshold
def merge(self, readset: ReadSet) -> ReadSet:
"""
Return a set of reads after merging together subsets of reads
(into super reads) from an input readset according to a
probabilistic model of how likely sets of reads are to appear
together on one haplotype and on opposite haplotypes.
readset -- the input .core.ReadSet object
error_rate -- the probability that a nucleotide is wrong
max_error_rate -- the maximum error rate of any edge of the read
merging graph allowed before we discard it
threshold -- the threshold of the ratio between the probabilities
that a pair of reads come from the same haplotype and different
haplotypes
neg_threshold -- The threshold of the ratio between the
probabilities that a pair of reads come from the same haplotype
and different haplotypes.
"""
logger.info(
"Merging %d reads with error rate %.2f, maximum error rate %.2f, "
"positive threshold %d and negative threshold %d ...",
len(readset),
self._error_rate,
self._max_error_rate,
self._positive_threshold,
self._negative_threshold,
)
logger.debug("Merging started.")
gblue = Graph()
gred = Graph()
gnotblue = Graph()
gnotred = Graph()
# Probability that any nucleotide is wrong
error_rate = self._error_rate
logger.debug("Error Rate: %s", error_rate)
# If an edge has too many errors, we discard it since it is not reliable
max_error_rate = self._max_error_rate
logger.debug("Max Error Rate: %s", max_error_rate)
# Threshold of the ratio between the probabilities that the two reads come from
# the same side or from different sides
thr = self._positive_threshold
logger.debug("Positive Threshold: %s", thr)
# Threshold_neg is a more conservative threshold for the evidence
# that two reads should not be clustered together.
thr_neg = self._negative_threshold
logger.debug("Negative Threshold: %s", thr_neg)
thr_diff = 1 + int(log(thr, (1 - error_rate) / (error_rate / 3)))
thr_neg_diff = 1 + int(log(thr_neg, (1 - error_rate) / (error_rate / 3)))
logger.debug("Thr. Diff.: %s - Thr. Neg. Diff.: %s", thr_diff, thr_neg_diff)
logger.debug("Start reading the reads...")
id = 0
orig_reads = {}
queue = {}
reads = {}
for read in readset:
id += 1
begin_str = read[0][0]
snps = []
orgn = []
for variant in read:
site = variant[0]
zyg = variant[1]
qual = variant[2]
orgn.append([str(site), str(zyg), str(qual)])
if int(zyg) == 0:
snps.append("G")
else:
snps.append("C")
begin = int(begin_str)
end = begin + len(snps)
orig_reads[id] = orgn
gblue.add_node(id, begin=begin, end=end, sites="".join(snps))
gnotblue.add_node(id, begin=begin, end=end, sites="".join(snps))
gred.add_node(id, begin=begin, end=end, sites="".join(snps))
gnotred.add_node(id, begin=begin, end=end, sites="".join(snps))
queue[id] = {"begin": begin, "end": end, "sites": snps}
reads[id] = {"begin": begin, "end": end, "sites": snps}
for x in [id for id in queue.keys() if queue[id]["end"] <= begin]: # type: ignore
del queue[x]
for id1 in queue.keys():
if id == id1:
continue
match, mismatch = eval_overlap(queue[id1], queue[id])
if (
match + mismatch >= thr_neg_diff
and min(match, mismatch) / (match + mismatch) <= max_error_rate
and match - mismatch >= thr_diff
):
gblue.add_edge(id1, id, match=match, mismatch=mismatch)
if mismatch - match >= thr_diff:
gred.add_edge(id1, id, match=match, mismatch=mismatch)
if match - mismatch >= thr_neg_diff:
gnotred.add_edge(id1, id, match=match, mismatch=mismatch)
if mismatch - match >= thr_neg_diff:
gnotblue.add_edge(id1, id, match=match, mismatch=mismatch)
logger.debug("Finished reading the reads.")
logger.debug("Number of reads: %s", id)
logger.debug("Blue Graph")
logger.debug(
"Nodes: %s - Edges: %s - ConnComp: %s",
number_of_nodes(gblue),
number_of_edges(gblue),
len(list(connected_components(gblue))),
)
logger.debug("Non-Blue Graph")
logger.debug(
"Nodes: %s - Edges: %s - ConnComp: %s",
number_of_nodes(gnotblue),
number_of_edges(gnotblue),
len(list(connected_components(gnotblue))),
)
logger.debug("Red Graph")
logger.debug(
"Nodes: %s - Edges: %s - ConnComp: %s",
number_of_nodes(gred),
number_of_edges(gred),
len(list(connected_components(gred))),
)
logger.debug("Non-Red Graph")
logger.debug(
"Nodes: %s - Edges: %s - ConnComp: %s",
number_of_nodes(gnotred),
number_of_edges(gnotred),
len(list(connected_components(gnotred))),
)
# We consider the notblue edges as an evidence that two reads
# should not be merged together
# Since we want to merge each blue connected components into
# a single superread, we check each notblue edge (r1, r2) and
# we remove some blue edges so that r1 and r2 are not in the
# same blue connected component
blue_component = {}
current_component = 0
for conncomp in connected_components(gblue):
for v in conncomp:
blue_component[v] = current_component
current_component += 1
# Keep only the notblue edges that are inside a blue connected component
good_notblue_edges = [
(v, w) for (v, w) in gnotblue.edges() if blue_component[v] == blue_component[w]
]
for (u, v) in good_notblue_edges:
while v in node_connected_component(gblue, u):
path = shortest_path(gblue, source=u, target=v)
# Remove the edge with the smallest support
# A better strategy is to weight each edge with -log p
# and remove the minimum (u,v)-cut
w, x = min(
zip(path[:-1], path[1:]),
key=lambda p: gblue[p[0]][p[1]]["match"] - gblue[p[0]][p[1]]["mismatch"],
)
gblue.remove_edge(w, x)
# Merge blue components (somehow)
logger.debug("Started Merging Reads...")
superreads: Dict = {} # superreads given by the clusters (if clustering)
rep = {} # cluster representative of a read in a cluster
for cc in connected_components(gblue):
if len(cc) > 1:
r = min(cc)
superreads[r] = {}
for id in cc:
rep[id] = r
for id in orig_reads:
if id in rep:
for tok in orig_reads[id]:
site = int(tok[0])
zyg = int(tok[1])
qual = int(tok[2])
r = rep[id]
if site not in superreads[r]:
superreads[r][site] = [0, 0]
superreads[r][site][zyg] += qual
merged_reads = ReadSet()
readn = 0
for id in orig_reads:
read = Read("read" + str(readn))
readn += 1
if id in rep:
if id == rep[id]:
for site in sorted(superreads[id]):
z = superreads[id][site]
if z[0] >= z[1]:
read.add_variant(site, 0, z[0] - z[1])
elif z[1] > z[0]:
read.add_variant(site, 1, z[1] - z[0])
merged_reads.add(read)
else:
for tok in orig_reads[id]:
read.add_variant(int(tok[0]), int(tok[1]), int(tok[2]))
merged_reads.add(read)
logger.debug("Finished merging reads.")
logger.info(
"... after merging: merged %d reads into %d reads", len(readset), len(merged_reads)
)
return merged_reads
class DoNothingReadMerger(ReadMergerBase):
def merge(self, readset):
return readset
def eval_overlap(n1, n2):
"""
Return a tuple containing the number of matches (resp.,
mismatches) between a pair (n1,n2) of overlapping reads
"""
hang1 = n2["begin"] - n1["begin"]
overlap = zip(n1["sites"][hang1:], n2["sites"])
match, mismatch = (0, 0)
for (c1, c2) in overlap:
if c1 in ["A", "C", "G", "T"] and c2 in ["A", "C", "G", "T"]:
if c1 == c2:
match += 1
else:
mismatch += 1
return (match, mismatch)
| 37.396364 | 95 | 0.540548 |
97c53cf87738e364e26d003e76af1901deb93143 | 1,090 | py | Python | util/reorder_samples.py | J-Moravec/pairtree | 91cbba628b78aea31034efb080976fdb47d83976 | [
"MIT"
] | null | null | null | util/reorder_samples.py | J-Moravec/pairtree | 91cbba628b78aea31034efb080976fdb47d83976 | [
"MIT"
] | null | null | null | util/reorder_samples.py | J-Moravec/pairtree | 91cbba628b78aea31034efb080976fdb47d83976 | [
"MIT"
] | null | null | null | import argparse
import json
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import inputparser
def _process(ssmfn, jsonfn, order):
params = inputparser.load_params(jsonfn)
ssms = inputparser.load_ssms(ssmfn)
order = [int(idx) for idx in order.split(',')]
N = len(params['samples'])
assert set(range(N)) == set(order)
assert len(list(ssms.values())[0]['var_reads']) == N
params['samples'] = [params['samples'][idx] for idx in order]
for vid in ssms.keys():
for K in ('var_reads', 'ref_reads', 'total_reads', 'vaf', 'omega_v'):
ssms[vid][K] = ssms[vid][K][order]
with open(jsonfn, 'w') as F:
json.dump(params, F)
inputparser.write_ssms(ssms, ssmfn)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
parser.add_argument('order')
args = parser.parse_args()
_process(args.ssm_fn, args.params_fn, args.order)
if __name__ == '__main__':
main()
| 26.585366 | 73 | 0.687156 |
3f8bb88e8461b1bb2ab7c594b0103a12f4165c11 | 346 | py | Python | Tests/test_discord_embed_call.py | MichaelCduBois/whpy | 8964e97cee00d3bd8f38086f640d13f0fe95068b | [
"MIT"
] | null | null | null | Tests/test_discord_embed_call.py | MichaelCduBois/whpy | 8964e97cee00d3bd8f38086f640d13f0fe95068b | [
"MIT"
] | 1 | 2019-10-02T02:06:22.000Z | 2019-10-02T04:17:10.000Z | Tests/test_discord_embed_call.py | MichaelCduBois/whpy | 8964e97cee00d3bd8f38086f640d13f0fe95068b | [
"MIT"
] | 1 | 2019-10-02T03:30:17.000Z | 2019-10-02T03:30:17.000Z | from dotenv import load_dotenv
import os
import pytest
from WhPy import discord
# Load Environment Variables
load_dotenv("./env/.env")
# Test Variables
channel_id = os.getenv("WEBHOOK_CHANNEL_ID")
token = os.getenv("WEBHOOK_TOKEN")
url = os.getenv("WEBHOOK_URL")
def test_():
"""
Scenario:
When
Then
"""
pass
| 15.043478 | 44 | 0.676301 |
73fb4ed3457b6fbc64b1f1ad6e2bda1fad6b513f | 528 | py | Python | faculdade/atv3.py | Tkl02/Aula-python | b260b9bcc088974c9a7c05b61f93bde2e06f65ac | [
"MIT"
] | null | null | null | faculdade/atv3.py | Tkl02/Aula-python | b260b9bcc088974c9a7c05b61f93bde2e06f65ac | [
"MIT"
] | null | null | null | faculdade/atv3.py | Tkl02/Aula-python | b260b9bcc088974c9a7c05b61f93bde2e06f65ac | [
"MIT"
] | null | null | null | from gtts import gTTS #bibliotecas
from playsound import playsound
text = str(input("qual a frase? ")) #variavel para selecionar texto
# idioma = str(input("qual idioma")) #variavel para selecionar idioma
idioma = 'pt-br'
audio = 'meusom.mp3' #atribuindo nome para o audio
convers = gTTS ( #convertendo o codigo para audio
text, #recebendo valor do input text
lang = idioma #lang(traduzir) recebendo idioma
)
convers.save(audio) #salvando codigo com o nome da variavel audio | 40.615385 | 71 | 0.6875 |
4fa5fe67eb407b4ee33227f2a54836089316e5dd | 1,264 | py | Python | testproj/users/migrations/0002_setup_oauth2_apps.py | johnny-prnd/drf-yasg | 5a943250e62ac1ecab1faa3155c5f87ab68367a3 | [
"BSD-3-Clause"
] | 1 | 2019-10-31T12:13:21.000Z | 2019-10-31T12:13:21.000Z | testproj/users/migrations/0002_setup_oauth2_apps.py | johnny-prnd/drf-yasg | 5a943250e62ac1ecab1faa3155c5f87ab68367a3 | [
"BSD-3-Clause"
] | 4 | 2021-06-28T20:54:50.000Z | 2022-03-29T18:26:22.000Z | testproj/users/migrations/0002_setup_oauth2_apps.py | johnny-prnd/drf-yasg | 5a943250e62ac1ecab1faa3155c5f87ab68367a3 | [
"BSD-3-Clause"
] | 1 | 2020-05-29T08:55:51.000Z | 2020-05-29T08:55:51.000Z | # Generated by Django 2.1.3 on 2018-12-19 07:57
from django.conf import settings
from django.db import migrations
def add_oauth_apps(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
User = apps.get_model(settings.AUTH_USER_MODEL)
Application = apps.get_model('oauth2_provider', 'application')
user = User.objects.get(username='admin')
oauth2_apps = [
{
"user": user,
"client_type": "public",
"authorization_grant_type": "password",
"client_id": settings.OAUTH2_CLIENT_ID,
"client_secret": settings.OAUTH2_CLIENT_SECRET,
"redirect_uris": settings.OAUTH2_REDIRECT_URL,
"name": settings.OAUTH2_APP_NAME
}
]
for app in oauth2_apps:
Application.objects.get_or_create(client_id=app['client_id'], defaults=app)
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('oauth2_provider', '0006_auto_20171214_2232'),
('users', '0001_create_admin_user'),
]
operations = [
migrations.RunPython(add_oauth_apps)
]
| 31.6 | 83 | 0.670095 |
80fd410266ea0b924ac8582866322a4360184277 | 60,488 | py | Python | tests/pipeline/test_engine.py | fwagner-quantopian/zipline | 6f72d19554d3b1a05ae3b1451f3e805f3ee52360 | [
"Apache-2.0"
] | 1 | 2019-12-19T14:58:20.000Z | 2019-12-19T14:58:20.000Z | tests/pipeline/test_engine.py | fwagner-quantopian/zipline | 6f72d19554d3b1a05ae3b1451f3e805f3ee52360 | [
"Apache-2.0"
] | 1 | 2021-06-02T00:56:13.000Z | 2021-06-02T00:56:13.000Z | tests/pipeline/test_engine.py | fwagner-quantopian/zipline | 6f72d19554d3b1a05ae3b1451f3e805f3ee52360 | [
"Apache-2.0"
] | 1 | 2020-04-18T18:27:04.000Z | 2020-04-18T18:27:04.000Z | """
Tests for SimplePipelineEngine
"""
from __future__ import division
from collections import OrderedDict
from itertools import product
from operator import add, sub
from unittest import skipIf
from nose_parameterized import parameterized
import numpy as np
from numpy import (
arange,
array,
concatenate,
float32,
float64,
full,
full_like,
log,
nan,
tile,
where,
zeros,
)
from numpy.testing import assert_almost_equal
from pandas import (
Categorical,
DataFrame,
date_range,
Int64Index,
MultiIndex,
Series,
Timestamp,
)
from pandas.compat.chainmap import ChainMap
from pandas.util.testing import assert_frame_equal
from six import iteritems, itervalues
from toolz import merge
from zipline.assets.synthetic import make_rotating_equity_info
from zipline.errors import NoFurtherDataError
from zipline.lib.adjustment import MULTIPLY
from zipline.lib.labelarray import LabelArray
from zipline.pipeline import CustomFactor, Pipeline
from zipline.pipeline.data import (
Column, DataSet, EquityPricing, USEquityPricing,
)
from zipline.pipeline.data.testing import TestingDataSet
from zipline.pipeline.domain import (
EquitySessionDomain,
GENERIC,
JP_EQUITIES,
US_EQUITIES,
)
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.factors import (
AverageDollarVolume,
EWMA,
EWMSTD,
ExponentialWeightedMovingAverage,
ExponentialWeightedMovingStdDev,
MaxDrawdown,
SimpleMovingAverage,
)
from zipline.pipeline.filters import CustomFilter
from zipline.pipeline.loaders.equity_pricing_loader import (
EquityPricingLoader,
)
from zipline.pipeline.loaders.frame import DataFrameLoader
from zipline.pipeline.loaders.synthetic import (
PrecomputedLoader,
make_bar_data,
expected_bar_values_2d,
)
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import InputDates
from zipline.testing import (
AssetID,
AssetIDPlusDay,
check_arrays,
make_alternating_boolean_array,
make_cascading_boolean_array,
OpenPrice,
parameter_space,
product_upper_triangle,
)
import zipline.testing.fixtures as zf
from zipline.utils.exploding_object import NamedExplodingObject
from zipline.testing.core import create_simple_domain
from zipline.testing.predicates import assert_equal
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import bool_dtype, datetime64ns_dtype
from zipline.utils.pandas_utils import new_pandas, skip_pipeline_new_pandas
class RollingSumDifference(CustomFactor):
window_length = 3
inputs = [EquityPricing.open, EquityPricing.close]
def compute(self, today, assets, out, open, close):
out[:] = (open - close).sum(axis=0)
class MultipleOutputs(CustomFactor):
window_length = 1
inputs = [EquityPricing.open, EquityPricing.close]
outputs = ['open', 'close']
def compute(self, today, assets, out, open, close):
out.open[:] = open
out.close[:] = close
class OpenCloseSumAndDiff(CustomFactor):
"""
Used for testing a CustomFactor with multiple outputs operating over a non-
trivial window length.
"""
inputs = [EquityPricing.open, EquityPricing.close]
def compute(self, today, assets, out, open, close):
out.sum_[:] = open.sum(axis=0) + close.sum(axis=0)
out.diff[:] = open.sum(axis=0) - close.sum(axis=0)
def assert_multi_index_is_product(testcase, index, *levels):
"""Assert that a MultiIndex contains the product of `*levels`."""
testcase.assertIsInstance(
index, MultiIndex, "%s is not a MultiIndex" % index
)
testcase.assertEqual(set(index), set(product(*levels)))
class ColumnArgs(tuple):
"""A tuple of Columns that defines equivalence based on the order of the
columns' DataSets, instead of the columns themselves. This is used when
comparing the columns passed to a loader's load_adjusted_array method,
since we want to assert that they are ordered by DataSet.
"""
def __new__(cls, *cols):
return super(ColumnArgs, cls).__new__(cls, cols)
@classmethod
def sorted_by_ds(cls, *cols):
return cls(*sorted(cols, key=lambda col: col.dataset))
def by_ds(self):
return tuple(col.dataset for col in self)
def __eq__(self, other):
return set(self) == set(other) and self.by_ds() == other.by_ds()
def __hash__(self):
return hash(frozenset(self))
class RecordingPrecomputedLoader(PrecomputedLoader):
def __init__(self, *args, **kwargs):
super(RecordingPrecomputedLoader, self).__init__(*args, **kwargs)
self.load_calls = []
def load_adjusted_array(self, domain, columns, dates, sids, mask):
self.load_calls.append(ColumnArgs(*columns))
return super(RecordingPrecomputedLoader, self).load_adjusted_array(
domain, columns, dates, sids, mask,
)
class RollingSumSum(CustomFactor):
def compute(self, today, assets, out, *inputs):
assert len(self.inputs) == len(inputs)
out[:] = sum(inputs).sum(axis=0)
class WithConstantInputs(zf.WithAssetFinder):
asset_ids = ASSET_FINDER_EQUITY_SIDS = 1, 2, 3, 4
START_DATE = Timestamp('2014-01-01', tz='utc')
END_DATE = Timestamp('2014-03-01', tz='utc')
ASSET_FINDER_COUNTRY_CODE = 'US'
@classmethod
def init_class_fixtures(cls):
super(WithConstantInputs, cls).init_class_fixtures()
cls.domain = create_simple_domain(
start=cls.START_DATE,
end=cls.END_DATE,
country_code=cls.ASSET_FINDER_COUNTRY_CODE,
)
cls.constants = {
# Every day, assume every stock starts at 2, goes down to 1,
# goes up to 4, and finishes at 3.
EquityPricing.low: 1,
EquityPricing.open: 2,
EquityPricing.close: 3,
EquityPricing.high: 4,
}
cls.dates = date_range(
cls.START_DATE,
cls.END_DATE,
freq='D',
tz='UTC',
)
cls.loader = PrecomputedLoader(
constants=cls.constants,
dates=cls.dates,
sids=cls.asset_ids,
)
cls.assets = cls.asset_finder.retrieve_all(cls.asset_ids)
cls.engine = SimplePipelineEngine(
lambda c: cls.loader,
cls.asset_finder,
default_domain=cls.domain
)
class ConstantInputTestCase(WithConstantInputs,
zf.WithAssetFinder,
zf.WithTradingCalendars,
zf.ZiplineTestCase):
def test_bad_dates(self):
p = Pipeline()
msg = "start_date must be before or equal to end_date .*"
with self.assertRaisesRegexp(ValueError, msg):
self.engine.run_pipeline(p, self.dates[2], self.dates[1])
def test_fail_usefully_on_insufficient_data(self):
class SomeFactor(CustomFactor):
inputs = [EquityPricing.close]
window_length = 10
def compute(self, today, assets, out, closes):
pass
p = Pipeline(columns={'t': SomeFactor()})
# self.dates[9] is the earliest date we should be able to compute.
self.engine.run_pipeline(p, self.dates[9], self.dates[9])
# We shouldn't be able to compute dates[8], since we only know about 8
# prior dates, and we need a window length of 10.
with self.assertRaises(NoFurtherDataError):
self.engine.run_pipeline(p, self.dates[8], self.dates[8])
def test_input_dates_provided_by_default(self):
class TestFactor(CustomFactor):
inputs = [InputDates(), EquityPricing.close]
window_length = 10
dtype = datetime64ns_dtype
def compute(self, today, assets, out, dates, closes):
first, last = dates[[0, -1], 0]
assert last == today.asm8
assert len(dates) == len(closes) == self.window_length
out[:] = first
p = Pipeline(columns={'t': TestFactor()})
results = self.engine.run_pipeline(p, self.dates[9], self.dates[10])
# All results are the same, so just grab one column.
column = results.unstack().iloc[:, 0].values
check_arrays(column, self.dates[:2].values)
def test_same_day_pipeline(self):
factor = AssetID()
asset = self.asset_ids[0]
p = Pipeline(columns={'f': factor}, screen=factor <= asset)
# The crux of this is that when we run the pipeline for a single day
# (i.e. start and end dates are the same) we should accurately get
# data for the day prior.
result = self.engine.run_pipeline(p, self.dates[1], self.dates[1])
self.assertEqual(result['f'][0], 1.0)
def test_screen(self):
asset_ids = array(self.asset_ids)
num_dates = 5
dates = self.dates[10:10 + num_dates]
factor = AssetID()
for asset_id in asset_ids:
p = Pipeline(columns={'f': factor}, screen=factor <= asset_id)
result = self.engine.run_pipeline(p, dates[0], dates[-1])
expected_sids = asset_ids[asset_ids <= asset_id]
expected_assets = self.asset_finder.retrieve_all(expected_sids)
expected_result = DataFrame(
index=MultiIndex.from_product([dates, expected_assets]),
data=tile(expected_sids.astype(float), [len(dates)]),
columns=['f'],
)
assert_frame_equal(result, expected_result)
def test_single_factor(self):
assets = self.assets
result_shape = (num_dates, num_assets) = (5, len(assets))
dates = self.dates[10:10 + num_dates]
factor = RollingSumDifference()
expected_result = -factor.window_length
# Since every asset will pass the screen, these should be equivalent.
pipelines = [
Pipeline(columns={'f': factor}),
Pipeline(
columns={'f': factor},
screen=factor.eq(expected_result),
),
]
for p in pipelines:
result = self.engine.run_pipeline(p, dates[0], dates[-1])
self.assertEqual(set(result.columns), {'f'})
assert_multi_index_is_product(
self, result.index, dates, assets
)
check_arrays(
result['f'].unstack().values,
full(result_shape, expected_result, dtype=float),
)
def test_multiple_rolling_factors(self):
assets = self.assets
shape = num_dates, num_assets = (5, len(assets))
dates = self.dates[10:10 + num_dates]
short_factor = RollingSumDifference(window_length=3)
long_factor = RollingSumDifference(window_length=5)
high_factor = RollingSumDifference(
window_length=3,
inputs=[EquityPricing.open, EquityPricing.high],
)
pipeline = Pipeline(
columns={
'short': short_factor,
'long': long_factor,
'high': high_factor,
}
)
results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])
self.assertEqual(set(results.columns), {'short', 'high', 'long'})
assert_multi_index_is_product(
self, results.index, dates, assets
)
# row-wise sum over an array whose values are all (1 - 2)
check_arrays(
results['short'].unstack().values,
full(shape, -short_factor.window_length, dtype=float),
)
check_arrays(
results['long'].unstack().values,
full(shape, -long_factor.window_length, dtype=float),
)
# row-wise sum over an array whose values are all (1 - 3)
check_arrays(
results['high'].unstack().values,
full(shape, -2 * high_factor.window_length, dtype=float),
)
def test_numeric_factor(self):
constants = self.constants
num_dates = 5
dates = self.dates[10:10 + num_dates]
high, low = EquityPricing.high, EquityPricing.low
open, close = EquityPricing.open, EquityPricing.close
high_minus_low = RollingSumDifference(inputs=[high, low])
open_minus_close = RollingSumDifference(inputs=[open, close])
avg = (high_minus_low + open_minus_close) / 2
results = self.engine.run_pipeline(
Pipeline(
columns={
'high_low': high_minus_low,
'open_close': open_minus_close,
'avg': avg,
},
),
dates[0],
dates[-1],
)
high_low_result = results['high_low'].unstack()
expected_high_low = 3.0 * (constants[high] - constants[low])
assert_frame_equal(
high_low_result,
DataFrame(expected_high_low, index=dates, columns=self.assets),
)
open_close_result = results['open_close'].unstack()
expected_open_close = 3.0 * (constants[open] - constants[close])
assert_frame_equal(
open_close_result,
DataFrame(expected_open_close, index=dates, columns=self.assets),
)
avg_result = results['avg'].unstack()
expected_avg = (expected_high_low + expected_open_close) / 2.0
assert_frame_equal(
avg_result,
DataFrame(expected_avg, index=dates, columns=self.assets),
)
def test_masked_factor(self):
"""
Test that a Custom Factor computes the correct values when passed a
mask. The mask/filter should be applied prior to computing any values,
as opposed to computing the factor across the entire universe of
assets. Any assets that are filtered out should be filled with missing
values.
"""
dates = self.dates[5:8]
assets = self.assets
asset_ids = self.asset_ids
constants = self.constants
num_dates = len(dates)
num_assets = len(assets)
open = EquityPricing.open
close = EquityPricing.close
factor1_value = constants[open]
factor2_value = 3.0 * (constants[open] - constants[close])
def create_expected_results(expected_value, mask):
expected_values = where(mask, expected_value, nan)
return DataFrame(expected_values, index=dates, columns=assets)
cascading_mask = AssetIDPlusDay() < (asset_ids[-1] + dates[0].day)
expected_cascading_mask_result = make_cascading_boolean_array(
shape=(num_dates, num_assets),
)
alternating_mask = (AssetIDPlusDay() % 2).eq(0)
expected_alternating_mask_result = make_alternating_boolean_array(
shape=(num_dates, num_assets), first_value=False,
)
masks = cascading_mask, alternating_mask
expected_mask_results = (
expected_cascading_mask_result,
expected_alternating_mask_result,
)
for mask, expected_mask in zip(masks, expected_mask_results):
# Test running a pipeline with a single masked factor.
columns = {'factor1': OpenPrice(mask=mask), 'mask': mask}
pipeline = Pipeline(columns=columns)
results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])
mask_results = results['mask'].unstack()
check_arrays(mask_results.values, expected_mask)
factor1_results = results['factor1'].unstack()
factor1_expected = create_expected_results(factor1_value,
mask_results)
assert_frame_equal(factor1_results, factor1_expected)
# Test running a pipeline with a second factor. This ensures that
# adding another factor to the pipeline with a different window
# length does not cause any unexpected behavior, especially when
# both factors share the same mask.
columns['factor2'] = RollingSumDifference(mask=mask)
pipeline = Pipeline(columns=columns)
results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])
mask_results = results['mask'].unstack()
check_arrays(mask_results.values, expected_mask)
factor1_results = results['factor1'].unstack()
factor2_results = results['factor2'].unstack()
factor1_expected = create_expected_results(factor1_value,
mask_results)
factor2_expected = create_expected_results(factor2_value,
mask_results)
assert_frame_equal(factor1_results, factor1_expected)
assert_frame_equal(factor2_results, factor2_expected)
def test_rolling_and_nonrolling(self):
open_ = EquityPricing.open
close = EquityPricing.close
volume = EquityPricing.volume
# Test for thirty days up to the last day that we think all
# the assets existed.
dates_to_test = self.dates[-30:]
constants = {
open_: 1,
close: 2,
volume: 3,
}
loader = PrecomputedLoader(
constants=constants,
dates=self.dates,
sids=self.asset_ids,
)
engine = SimplePipelineEngine(lambda column: loader, self.asset_finder)
sumdiff = RollingSumDifference()
result = engine.run_pipeline(
Pipeline(
columns={
'sumdiff': sumdiff,
'open': open_.latest,
'close': close.latest,
'volume': volume.latest,
},
domain=self.domain,
),
dates_to_test[0],
dates_to_test[-1]
)
self.assertIsNotNone(result)
self.assertEqual(
{'sumdiff', 'open', 'close', 'volume'},
set(result.columns)
)
result_index = self.asset_ids * len(dates_to_test)
result_shape = (len(result_index),)
check_arrays(
result['sumdiff'],
Series(
index=result_index,
data=full(result_shape, -3, dtype=float),
),
)
for name, const in [('open', 1), ('close', 2), ('volume', 3)]:
check_arrays(
result[name],
Series(
index=result_index,
data=full(result_shape, const, dtype=float),
),
)
def test_factor_with_single_output(self):
"""
Test passing an `outputs` parameter of length 1 to a CustomFactor.
"""
dates = self.dates[5:10]
assets = self.assets
num_dates = len(dates)
open = EquityPricing.open
open_values = [self.constants[open]] * num_dates
open_values_as_tuple = [(self.constants[open],)] * num_dates
single_output = OpenPrice(outputs=['open'])
pipeline = Pipeline(
columns={
'open_instance': single_output,
'open_attribute': single_output.open,
},
)
results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])
# The instance `single_output` itself will compute a numpy.recarray
# when added as a column to our pipeline, so we expect its output
# values to be 1-tuples.
open_instance_expected = {
asset: open_values_as_tuple for asset in assets
}
open_attribute_expected = {asset: open_values for asset in assets}
for colname, expected_values in (
('open_instance', open_instance_expected),
('open_attribute', open_attribute_expected)):
column_results = results[colname].unstack()
expected_results = DataFrame(
expected_values, index=dates, columns=assets, dtype=float64,
)
assert_frame_equal(column_results, expected_results)
def test_factor_with_multiple_outputs(self):
dates = self.dates[5:10]
assets = self.assets
asset_ids = self.asset_ids
constants = self.constants
num_dates = len(dates)
num_assets = len(assets)
open = EquityPricing.open
close = EquityPricing.close
def create_expected_results(expected_value, mask):
expected_values = where(mask, expected_value, nan)
return DataFrame(expected_values, index=dates, columns=assets)
cascading_mask = AssetIDPlusDay() < (asset_ids[-1] + dates[0].day)
expected_cascading_mask_result = make_cascading_boolean_array(
shape=(num_dates, num_assets),
)
alternating_mask = (AssetIDPlusDay() % 2).eq(0)
expected_alternating_mask_result = make_alternating_boolean_array(
shape=(num_dates, num_assets), first_value=False,
)
expected_no_mask_result = full(
shape=(num_dates, num_assets), fill_value=True, dtype=bool_dtype,
)
masks = cascading_mask, alternating_mask, NotSpecified
expected_mask_results = (
expected_cascading_mask_result,
expected_alternating_mask_result,
expected_no_mask_result,
)
for mask, expected_mask in zip(masks, expected_mask_results):
open_price, close_price = MultipleOutputs(mask=mask)
pipeline = Pipeline(
columns={'open_price': open_price, 'close_price': close_price},
)
if mask is not NotSpecified:
pipeline.add(mask, 'mask')
results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])
for colname, case_column in (('open_price', open),
('close_price', close)):
if mask is not NotSpecified:
mask_results = results['mask'].unstack()
check_arrays(mask_results.values, expected_mask)
output_results = results[colname].unstack()
output_expected = create_expected_results(
constants[case_column], expected_mask,
)
assert_frame_equal(output_results, output_expected)
def test_instance_of_factor_with_multiple_outputs(self):
"""
Test adding a CustomFactor instance, which has multiple outputs, as a
pipeline column directly. Its computed values should be tuples
containing the computed values of each of its outputs.
"""
dates = self.dates[5:10]
assets = self.assets
num_dates = len(dates)
num_assets = len(assets)
constants = self.constants
open_values = [constants[EquityPricing.open]] * num_assets
close_values = [constants[EquityPricing.close]] * num_assets
expected_values = [list(zip(open_values, close_values))] * num_dates
expected_results = DataFrame(
expected_values, index=dates, columns=assets, dtype=float64,
)
multiple_outputs = MultipleOutputs()
pipeline = Pipeline(columns={'instance': multiple_outputs})
results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])
instance_results = results['instance'].unstack()
assert_frame_equal(instance_results, expected_results)
def test_custom_factor_outputs_parameter(self):
dates = self.dates[5:10]
assets = self.assets
num_dates = len(dates)
num_assets = len(assets)
constants = self.constants
def create_expected_results(expected_value):
expected_values = full(
(num_dates, num_assets), expected_value, float64,
)
return DataFrame(expected_values, index=dates, columns=assets)
for window_length in range(1, 3):
sum_, diff = OpenCloseSumAndDiff(
outputs=['sum_', 'diff'], window_length=window_length,
)
pipeline = Pipeline(columns={'sum_': sum_, 'diff': diff})
results = self.engine.run_pipeline(pipeline, dates[0], dates[-1])
for colname, op in ('sum_', add), ('diff', sub):
output_results = results[colname].unstack()
output_expected = create_expected_results(
op(
constants[EquityPricing.open] * window_length,
constants[EquityPricing.close] * window_length,
)
)
assert_frame_equal(output_results, output_expected)
def test_loader_given_multiple_columns(self):
class Loader1DataSet1(DataSet):
col1 = Column(float)
col2 = Column(float32)
domain = self.domain
class Loader1DataSet2(DataSet):
col1 = Column(float32)
col2 = Column(float32)
domain = self.domain
class Loader2DataSet(DataSet):
col1 = Column(float32)
col2 = Column(float32)
domain = self.domain
constants1 = {Loader1DataSet1.col1: 1,
Loader1DataSet1.col2: 2,
Loader1DataSet2.col1: 3,
Loader1DataSet2.col2: 4}
loader1 = RecordingPrecomputedLoader(constants=constants1,
dates=self.dates,
sids=self.assets)
constants2 = {Loader2DataSet.col1: 5,
Loader2DataSet.col2: 6}
loader2 = RecordingPrecomputedLoader(constants=constants2,
dates=self.dates,
sids=self.assets)
engine = SimplePipelineEngine(
lambda column:
loader2 if column.dataset == Loader2DataSet else loader1,
self.asset_finder,
)
pipe_col1 = RollingSumSum(inputs=[Loader1DataSet1.col1,
Loader1DataSet2.col1,
Loader2DataSet.col1],
window_length=2)
pipe_col2 = RollingSumSum(inputs=[Loader1DataSet1.col2,
Loader1DataSet2.col2,
Loader2DataSet.col2],
window_length=3)
pipe_col3 = RollingSumSum(inputs=[Loader2DataSet.col1],
window_length=3)
columns = OrderedDict([
('pipe_col1', pipe_col1),
('pipe_col2', pipe_col2),
('pipe_col3', pipe_col3),
])
result = engine.run_pipeline(
Pipeline(columns=columns, domain=self.domain),
self.dates[2], # index is >= the largest window length - 1
self.dates[-1]
)
min_window = min(pip_col.window_length
for pip_col in itervalues(columns))
col_to_val = ChainMap(constants1, constants2)
vals = {name: (sum(col_to_val[col] for col in pipe_col.inputs)
* pipe_col.window_length)
for name, pipe_col in iteritems(columns)}
index = MultiIndex.from_product([self.dates[2:], self.assets])
def expected_for_col(col):
val = vals[col]
offset = columns[col].window_length - min_window
return concatenate(
[
full(offset * index.levshape[1], nan),
full(
(index.levshape[0] - offset) * index.levshape[1],
val,
float,
)
],
)
expected = DataFrame(
data={col: expected_for_col(col) for col in vals},
index=index,
columns=columns,
)
assert_frame_equal(result, expected)
self.assertEqual(set(loader1.load_calls),
{ColumnArgs.sorted_by_ds(Loader1DataSet1.col1,
Loader1DataSet2.col1),
ColumnArgs.sorted_by_ds(Loader1DataSet1.col2,
Loader1DataSet2.col2)})
self.assertEqual(set(loader2.load_calls),
{ColumnArgs.sorted_by_ds(Loader2DataSet.col1,
Loader2DataSet.col2)})
# Use very large sids that don't fit in that doesn't fit in an int32 as a
# regression test against bugs with 32 bit integer overflow in the adjustment
# reader.
HUGE_SID = np.iinfo('int32').max + 1
class FrameInputTestCase(zf.WithAssetFinder,
zf.WithTradingCalendars,
zf.ZiplineTestCase):
asset_ids = ASSET_FINDER_EQUITY_SIDS = range(HUGE_SID, HUGE_SID + 3)
start = START_DATE = Timestamp('2015-01-01', tz='utc')
end = END_DATE = Timestamp('2015-01-31', tz='utc')
ASSET_FINDER_COUNTRY_CODE = 'US'
@classmethod
def init_class_fixtures(cls):
super(FrameInputTestCase, cls).init_class_fixtures()
cls.dates = date_range(
cls.start,
cls.end,
freq=cls.trading_calendar.day,
tz='UTC',
)
cls.assets = cls.asset_finder.retrieve_all(cls.asset_ids)
cls.domain = US_EQUITIES
@lazyval
def base_mask(self):
return self.make_frame(True)
def make_frame(self, data):
return DataFrame(data, columns=self.assets, index=self.dates)
def test_compute_with_adjustments(self):
dates, asset_ids = self.dates, self.asset_ids
low, high = EquityPricing.low, EquityPricing.high
apply_idxs = [3, 10, 16]
def apply_date(idx, offset=0):
return dates[apply_idxs[idx] + offset]
adjustments = DataFrame.from_records(
[
dict(
kind=MULTIPLY,
sid=asset_ids[1],
value=2.0,
start_date=None,
end_date=apply_date(0, offset=-1),
apply_date=apply_date(0),
),
dict(
kind=MULTIPLY,
sid=asset_ids[1],
value=3.0,
start_date=None,
end_date=apply_date(1, offset=-1),
apply_date=apply_date(1),
),
dict(
kind=MULTIPLY,
sid=asset_ids[1],
value=5.0,
start_date=None,
end_date=apply_date(2, offset=-1),
apply_date=apply_date(2),
),
]
)
low_base = DataFrame(self.make_frame(30.0))
low_loader = DataFrameLoader(low, low_base.copy(), adjustments=None)
# Pre-apply inverse of adjustments to the baseline.
high_base = DataFrame(self.make_frame(30.0))
high_base.iloc[:apply_idxs[0], 1] /= 2.0
high_base.iloc[:apply_idxs[1], 1] /= 3.0
high_base.iloc[:apply_idxs[2], 1] /= 5.0
high_loader = DataFrameLoader(high, high_base, adjustments)
# Dispatch uses the concrete specializations, not generic columns.
get_loader = {
USEquityPricing.low: low_loader,
USEquityPricing.high: high_loader
}.__getitem__
engine = SimplePipelineEngine(get_loader, self.asset_finder)
for window_length in range(1, 4):
low_mavg = SimpleMovingAverage(
inputs=[EquityPricing.low],
window_length=window_length,
)
high_mavg = SimpleMovingAverage(
inputs=[EquityPricing.high],
window_length=window_length,
)
bounds = product_upper_triangle(range(window_length, len(dates)))
for start, stop in bounds:
results = engine.run_pipeline(
Pipeline(
columns={'low': low_mavg, 'high': high_mavg},
domain=self.domain,
),
dates[start],
dates[stop],
)
self.assertEqual(set(results.columns), {'low', 'high'})
iloc_bounds = slice(start, stop + 1) # +1 to include end date
low_results = results.unstack()['low']
assert_frame_equal(low_results, low_base.iloc[iloc_bounds])
high_results = results.unstack()['high']
assert_frame_equal(high_results, high_base.iloc[iloc_bounds])
class SyntheticBcolzTestCase(zf.WithAdjustmentReader,
zf.WithAssetFinder,
zf.ZiplineTestCase):
first_asset_start = Timestamp('2015-04-01', tz='UTC')
START_DATE = Timestamp('2015-01-01', tz='utc')
END_DATE = Timestamp('2015-08-01', tz='utc')
@classmethod
def make_equity_info(cls):
cls.equity_info = ret = make_rotating_equity_info(
num_assets=6,
first_start=cls.first_asset_start,
frequency=cls.trading_calendar.day,
periods_between_starts=4,
asset_lifetime=8,
exchange='NYSE',
)
return ret
@classmethod
def make_exchanges_info(cls, *args, **kwargs):
return DataFrame({'exchange': ['NYSE'], 'country_code': ['US']})
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
return make_bar_data(
cls.equity_info,
cls.equity_daily_bar_days,
)
@classmethod
def init_class_fixtures(cls):
super(SyntheticBcolzTestCase, cls).init_class_fixtures()
cls.all_asset_ids = cls.asset_finder.sids
cls.last_asset_end = cls.equity_info['end_date'].max()
cls.pipeline_loader = EquityPricingLoader.without_fx(
cls.bcolz_equity_daily_bar_reader,
cls.adjustment_reader,
)
cls.engine = SimplePipelineEngine(
lambda c: cls.pipeline_loader,
cls.asset_finder,
default_domain=US_EQUITIES,
)
def write_nans(self, df):
"""
Write nans to the locations in data corresponding to the (date, asset)
pairs for which we wouldn't have data for `asset` on `date` in a
backtest.
Parameters
----------
df : pd.DataFrame
A DataFrame with a DatetimeIndex as index and an object index of
Assets as columns.
This means that we write nans for dates after an asset's end_date and
**on or before** an asset's start_date. The assymetry here is because
of the fact that, on the morning of an asset's first date, we haven't
yet seen any trades for that asset, so we wouldn't be able to show any
useful data to the user.
"""
# Mask out with nans all the dates on which each asset didn't exist
index = df.index
min_, max_ = index[[0, -1]]
for asset in df.columns:
if asset.start_date >= min_:
start = index.get_loc(asset.start_date, method='bfill')
df.loc[:start + 1, asset] = nan # +1 to overwrite start_date
if asset.end_date <= max_:
end = index.get_loc(asset.end_date)
df.ix[end + 1:, asset] = nan # +1 to *not* overwrite end_date
def test_SMA(self):
window_length = 5
asset_ids = self.all_asset_ids
dates = date_range(
self.first_asset_start + self.trading_calendar.day,
self.last_asset_end,
freq=self.trading_calendar.day,
)
dates_to_test = dates[window_length:]
SMA = SimpleMovingAverage(
inputs=(EquityPricing.close,),
window_length=window_length,
)
results = self.engine.run_pipeline(
Pipeline(columns={'sma': SMA}),
dates_to_test[0],
dates_to_test[-1],
)
# Shift back the raw inputs by a trading day because we expect our
# computed results to be computed using values anchored on the
# **previous** day's data.
expected_raw = DataFrame(
expected_bar_values_2d(
dates - self.trading_calendar.day,
asset_ids,
self.equity_info,
'close',
),
).rolling(window_length, min_periods=1).mean().values
expected = DataFrame(
# Truncate off the extra rows needed to compute the SMAs.
expected_raw[window_length:],
index=dates_to_test, # dates_to_test is dates[window_length:]
columns=self.asset_finder.retrieve_all(asset_ids),
)
self.write_nans(expected)
result = results['sma'].unstack()
assert_frame_equal(result, expected)
def test_drawdown(self):
# The monotonically-increasing data produced by SyntheticDailyBarWriter
# exercises two pathological cases for MaxDrawdown. The actual
# computed results are pretty much useless (everything is either NaN)
# or zero, but verifying we correctly handle those corner cases is
# valuable.
window_length = 5
asset_ids = self.all_asset_ids
dates = date_range(
self.first_asset_start + self.trading_calendar.day,
self.last_asset_end,
freq=self.trading_calendar.day,
)
dates_to_test = dates[window_length:]
drawdown = MaxDrawdown(
inputs=(EquityPricing.close,),
window_length=window_length,
)
results = self.engine.run_pipeline(
Pipeline(columns={'drawdown': drawdown}),
dates_to_test[0],
dates_to_test[-1],
)
# We expect NaNs when the asset was undefined, otherwise 0 everywhere,
# since the input is always increasing.
expected = DataFrame(
data=zeros((len(dates_to_test), len(asset_ids)), dtype=float),
index=dates_to_test,
columns=self.asset_finder.retrieve_all(asset_ids),
)
self.write_nans(expected)
result = results['drawdown'].unstack()
assert_frame_equal(expected, result)
class ParameterizedFactorTestCase(zf.WithAssetFinder,
zf.WithTradingCalendars,
zf.ZiplineTestCase):
sids = ASSET_FINDER_EQUITY_SIDS = Int64Index([1, 2, 3])
START_DATE = Timestamp('2015-01-31', tz='UTC')
END_DATE = Timestamp('2015-03-01', tz='UTC')
ASSET_FINDER_COUNTRY_CODE = '??'
@classmethod
def init_class_fixtures(cls):
super(ParameterizedFactorTestCase, cls).init_class_fixtures()
day = cls.trading_calendar.day
cls.dates = dates = date_range(
'2015-02-01',
'2015-02-28',
freq=day,
tz='UTC',
)
sids = cls.sids
cls.raw_data = DataFrame(
data=arange(len(dates) * len(sids), dtype=float).reshape(
len(dates), len(sids),
),
index=dates,
columns=cls.asset_finder.retrieve_all(sids),
)
cls.raw_data_with_nans = cls.raw_data.where((cls.raw_data % 2) != 0)
open_loader = DataFrameLoader(
EquityPricing.open,
cls.raw_data_with_nans,
)
close_loader = DataFrameLoader(EquityPricing.close, cls.raw_data)
volume_loader = DataFrameLoader(
EquityPricing.volume,
cls.raw_data * 2,
)
loader_map = {
EquityPricing.open: open_loader,
EquityPricing.close: close_loader,
EquityPricing.volume: volume_loader,
}
def get_loader(c):
return loader_map[c.unspecialize()]
cls.engine = SimplePipelineEngine(
get_loader,
cls.asset_finder,
default_domain=EquitySessionDomain(cls.dates, '??'),
)
def expected_ewma(self, window_length, decay_rate):
alpha = 1 - decay_rate
span = (2 / alpha) - 1
# XXX: This is a comically inefficient way to compute a windowed EWMA.
# Don't use it outside of testing. We're using rolling-apply of an
# ewma (which is itself a rolling-window function) because we only want
# to look at ``window_length`` rows at a time.
return self.raw_data.rolling(window_length).apply(
lambda subarray: (DataFrame(subarray)
.ewm(span=span)
.mean()
.values[-1])
)[window_length:]
def expected_ewmstd(self, window_length, decay_rate):
alpha = 1 - decay_rate
span = (2 / alpha) - 1
# XXX: This is a comically inefficient way to compute a windowed
# EWMSTD. Don't use it outside of testing. We're using rolling-apply
# of an ewma (which is itself a rolling-window function) because we
# only want to look at ``window_length`` rows at a time.
return self.raw_data.rolling(window_length).apply(
lambda subarray: (DataFrame(subarray)
.ewm(span=span)
.std()
.values[-1])
)[window_length:]
@parameterized.expand([
(3,),
(5,),
])
def test_ewm_stats(self, window_length):
def ewma_name(decay_rate):
return 'ewma_%s' % decay_rate
def ewmstd_name(decay_rate):
return 'ewmstd_%s' % decay_rate
decay_rates = [0.25, 0.5, 0.75]
ewmas = {
ewma_name(decay_rate): EWMA(
inputs=(EquityPricing.close,),
window_length=window_length,
decay_rate=decay_rate,
)
for decay_rate in decay_rates
}
ewmstds = {
ewmstd_name(decay_rate): EWMSTD(
inputs=(EquityPricing.close,),
window_length=window_length,
decay_rate=decay_rate,
)
for decay_rate in decay_rates
}
all_results = self.engine.run_pipeline(
Pipeline(columns=merge(ewmas, ewmstds)),
self.dates[window_length],
self.dates[-1],
)
for decay_rate in decay_rates:
ewma_result = all_results[ewma_name(decay_rate)].unstack()
ewma_expected = self.expected_ewma(window_length, decay_rate)
assert_frame_equal(ewma_result, ewma_expected)
ewmstd_result = all_results[ewmstd_name(decay_rate)].unstack()
ewmstd_expected = self.expected_ewmstd(window_length, decay_rate)
assert_frame_equal(ewmstd_result, ewmstd_expected)
@staticmethod
def decay_rate_to_span(decay_rate):
alpha = 1 - decay_rate
return (2 / alpha) - 1
@staticmethod
def decay_rate_to_com(decay_rate):
alpha = 1 - decay_rate
return (1 / alpha) - 1
@staticmethod
def decay_rate_to_halflife(decay_rate):
return log(.5) / log(decay_rate)
def ewm_cases():
return product([EWMSTD, EWMA], [3, 5, 10])
@parameterized.expand(ewm_cases())
def test_from_span(self, type_, span):
from_span = type_.from_span(
inputs=[EquityPricing.close],
window_length=20,
span=span,
)
implied_span = self.decay_rate_to_span(from_span.params['decay_rate'])
assert_almost_equal(span, implied_span)
@parameterized.expand(ewm_cases())
def test_from_halflife(self, type_, halflife):
from_hl = EWMA.from_halflife(
inputs=[EquityPricing.close],
window_length=20,
halflife=halflife,
)
implied_hl = self.decay_rate_to_halflife(from_hl.params['decay_rate'])
assert_almost_equal(halflife, implied_hl)
@parameterized.expand(ewm_cases())
def test_from_com(self, type_, com):
from_com = EWMA.from_center_of_mass(
inputs=[EquityPricing.close],
window_length=20,
center_of_mass=com,
)
implied_com = self.decay_rate_to_com(from_com.params['decay_rate'])
assert_almost_equal(com, implied_com)
del ewm_cases
def test_ewm_aliasing(self):
self.assertIs(ExponentialWeightedMovingAverage, EWMA)
self.assertIs(ExponentialWeightedMovingStdDev, EWMSTD)
def test_dollar_volume(self):
results = self.engine.run_pipeline(
Pipeline(
columns={
'dv1': AverageDollarVolume(window_length=1),
'dv5': AverageDollarVolume(window_length=5),
'dv1_nan': AverageDollarVolume(
window_length=1,
inputs=[EquityPricing.open, EquityPricing.volume],
),
'dv5_nan': AverageDollarVolume(
window_length=5,
inputs=[EquityPricing.open, EquityPricing.volume],
),
}
),
self.dates[5],
self.dates[-1],
)
expected_1 = (self.raw_data[5:] ** 2) * 2
assert_frame_equal(results['dv1'].unstack(), expected_1)
expected_5 = ((self.raw_data ** 2) * 2).rolling(5).mean()[5:]
assert_frame_equal(results['dv5'].unstack(), expected_5)
# The following two use EquityPricing.open and .volume as inputs.
# The former uses self.raw_data_with_nans, and the latter uses
# .raw_data * 2. Thus we multiply instead of squaring as above.
expected_1_nan = (self.raw_data_with_nans[5:]
* self.raw_data[5:] * 2).fillna(0)
assert_frame_equal(results['dv1_nan'].unstack(), expected_1_nan)
expected_5_nan = ((self.raw_data_with_nans * self.raw_data * 2)
.fillna(0)
.rolling(5).mean()
[5:])
assert_frame_equal(results['dv5_nan'].unstack(), expected_5_nan)
class StringColumnTestCase(zf.WithSeededRandomPipelineEngine,
zf.ZiplineTestCase):
ASSET_FINDER_COUNTRY_CODE = 'US'
SEEDED_RANDOM_PIPELINE_DEFAULT_DOMAIN = US_EQUITIES
@skipIf(new_pandas, skip_pipeline_new_pandas)
def test_string_classifiers_produce_categoricals(self):
"""
Test that string-based classifiers produce pandas categoricals as their
outputs.
"""
col = TestingDataSet.categorical_col
pipe = Pipeline(columns={'c': col.latest})
run_dates = self.trading_days[-10:]
start_date, end_date = run_dates[[0, -1]]
result = self.run_pipeline(pipe, start_date, end_date)
assert isinstance(result.c.values, Categorical)
expected_raw_data = self.raw_expected_values(
col,
start_date,
end_date,
)
expected_labels = LabelArray(expected_raw_data, col.missing_value)
expected_final_result = expected_labels.as_categorical_frame(
index=run_dates,
columns=self.asset_finder.retrieve_all(self.asset_finder.sids),
)
assert_frame_equal(result.c.unstack(), expected_final_result)
class WindowSafetyPropagationTestCase(zf.WithSeededRandomPipelineEngine,
zf.ZiplineTestCase):
ASSET_FINDER_COUNTRY_CODE = 'US'
SEEDED_RANDOM_PIPELINE_DEFAULT_DOMAIN = US_EQUITIES
SEEDED_RANDOM_PIPELINE_SEED = 5
def test_window_safety_propagation(self):
dates = self.trading_days[-30:]
start_date, end_date = dates[[-10, -1]]
col = TestingDataSet.float_col
pipe = Pipeline(
columns={
'average_of_rank_plus_one': SimpleMovingAverage(
inputs=[col.latest.rank() + 1],
window_length=10,
),
'average_of_aliased_rank_plus_one': SimpleMovingAverage(
inputs=[col.latest.rank().alias('some_alias') + 1],
window_length=10,
),
'average_of_rank_plus_one_aliased': SimpleMovingAverage(
inputs=[(col.latest.rank() + 1).alias('some_alias')],
window_length=10,
),
}
)
results = self.run_pipeline(pipe, start_date, end_date).unstack()
expected_ranks = DataFrame(
self.raw_expected_values(
col,
dates[-19],
dates[-1],
),
index=dates[-19:],
columns=self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS,
)
).rank(axis='columns')
# All three expressions should be equivalent and evaluate to this.
expected_result = (
(expected_ranks + 1)
.rolling(10)
.mean()
.dropna(how='any')
)
for colname in results.columns.levels[0]:
assert_equal(expected_result, results[colname])
class PopulateInitialWorkspaceTestCase(WithConstantInputs,
zf.WithAssetFinder,
zf.WithTradingCalendars,
zf.ZiplineTestCase):
@parameter_space(window_length=[3, 5], pipeline_length=[5, 10])
def test_populate_initial_workspace(self, window_length, pipeline_length):
column = EquityPricing.low
base_term = column.latest
# Take a Z-Score here so that the precomputed term is window-safe. The
# z-score will never actually get computed because we swap it out.
precomputed_term = (base_term.zscore()).alias('precomputed_term')
# A term that has `precomputed_term` as an input.
depends_on_precomputed_term = precomputed_term + 1
# A term that requires a window of `precomputed_term`.
depends_on_window_of_precomputed_term = SimpleMovingAverage(
inputs=[precomputed_term],
window_length=window_length,
)
precomputed_term_with_window = SimpleMovingAverage(
inputs=(column,),
window_length=window_length,
).alias('precomputed_term_with_window')
depends_on_precomputed_term_with_window = (
precomputed_term_with_window + 1
)
column_value = self.constants[column]
precomputed_term_value = -column_value
precomputed_term_with_window_value = -(column_value + 1)
def populate_initial_workspace(initial_workspace,
root_mask_term,
execution_plan,
dates,
assets):
def shape_for_term(term):
ndates = len(execution_plan.mask_and_dates_for_term(
term,
root_mask_term,
initial_workspace,
dates,
)[1])
nassets = len(assets)
return (ndates, nassets)
ws = initial_workspace.copy()
ws[precomputed_term] = full(
shape_for_term(precomputed_term),
precomputed_term_value,
dtype=float64,
)
ws[precomputed_term_with_window] = full(
shape_for_term(precomputed_term_with_window),
precomputed_term_with_window_value,
dtype=float64,
)
return ws
def dispatcher(c):
self.assertIsNot(
c, column, "Shouldn't need to dispatch precomputed term input!"
)
return self.loader
engine = SimplePipelineEngine(
dispatcher,
self.asset_finder,
populate_initial_workspace=populate_initial_workspace,
)
results = engine.run_pipeline(
Pipeline({
'precomputed_term': precomputed_term,
'precomputed_term_with_window': precomputed_term_with_window,
'depends_on_precomputed_term': depends_on_precomputed_term,
'depends_on_precomputed_term_with_window':
depends_on_precomputed_term_with_window,
'depends_on_window_of_precomputed_term':
depends_on_window_of_precomputed_term,
}, domain=self.domain),
self.dates[-pipeline_length],
self.dates[-1],
)
assert_equal(
results['precomputed_term'].values,
full_like(
results['precomputed_term'],
precomputed_term_value,
),
),
assert_equal(
results['precomputed_term_with_window'].values,
full_like(
results['precomputed_term_with_window'],
precomputed_term_with_window_value,
),
),
assert_equal(
results['depends_on_precomputed_term'].values,
full_like(
results['depends_on_precomputed_term'],
precomputed_term_value + 1,
),
)
assert_equal(
results['depends_on_precomputed_term_with_window'].values,
full_like(
results['depends_on_precomputed_term_with_window'],
precomputed_term_with_window_value + 1,
),
)
assert_equal(
results['depends_on_window_of_precomputed_term'].values,
full_like(
results['depends_on_window_of_precomputed_term'],
precomputed_term_value,
),
)
class ChunkedPipelineTestCase(zf.WithSeededRandomPipelineEngine,
zf.ZiplineTestCase):
PIPELINE_START_DATE = Timestamp('2006-01-05', tz='UTC')
END_DATE = Timestamp('2006-12-29', tz='UTC')
ASSET_FINDER_COUNTRY_CODE = 'US'
def test_run_chunked_pipeline(self):
"""
Test that running a pipeline in chunks produces the same result as if
it were run all at once
"""
pipe = Pipeline(
columns={
'float': TestingDataSet.float_col.latest,
'custom_factor': SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=10,
),
},
domain=US_EQUITIES,
)
if not new_pandas:
# Categoricals only work on old pandas.
pipe.add(TestingDataSet.categorical_col.latest, 'categorical')
pipeline_result = self.run_pipeline(
pipe,
start_date=self.PIPELINE_START_DATE,
end_date=self.END_DATE,
)
chunked_result = self.run_chunked_pipeline(
pipeline=pipe,
start_date=self.PIPELINE_START_DATE,
end_date=self.END_DATE,
chunksize=22
)
self.assertTrue(chunked_result.equals(pipeline_result))
def test_concatenate_empty_chunks(self):
# Test that we correctly handle concatenating chunked pipelines when
# some of the chunks are empty. This is slightly tricky b/c pandas
# DataFrames lose dtype information when they're empty.
class FalseOnOddMonths(CustomFilter):
"""Filter that returns False for all assets during odd months.
"""
inputs = ()
window_length = 1
def compute(self, today, assets, out):
out[:] = (today.month % 2 == 0)
pipe = Pipeline(
columns={
'float': TestingDataSet.float_col.latest,
'bool': TestingDataSet.bool_col.latest,
},
# Define a screen that's False for all assets a significant portion
# of the time.
screen=FalseOnOddMonths(),
domain=US_EQUITIES,
)
if not new_pandas:
# Categoricals only work on old pandas.
pipe.add(TestingDataSet.categorical_col.latest, 'categorical')
self.run_chunked_pipeline(
pipeline=pipe,
start_date=self.PIPELINE_START_DATE,
end_date=self.END_DATE,
# Make chunksize small enough that some chunks are guaranteed to
# have no assets pass the screen.
chunksize=5,
)
class MaximumRegressionTest(zf.WithSeededRandomPipelineEngine,
zf.ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
def test_no_groupby_maximum(self):
# This is a regression test for a bug where factor.top(1) would fail
# when not passed a groupby parameter.
factor = TestingDataSet.float_col.latest
maximum = factor.top(1)
pipe = Pipeline(
{'factor': factor, 'maximum': maximum},
domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
result = self.run_pipeline(
pipe, self.trading_days[-5], self.trading_days[-1]
)
# We should have one maximum every day.
maxes_per_day = result.groupby(level=0)['maximum'].sum()
self.assertTrue((maxes_per_day == 1).all())
# The maximum computed by pipeline should match the maximum computed by
# doing a groupby in pandas.
groupby_max = result.groupby(level=0).factor.max()
pipeline_max = (result.factor[result.maximum]
.reset_index(level=1, drop=True))
assert_equal(groupby_max, pipeline_max)
class ResolveDomainTestCase(zf.ZiplineTestCase):
def test_resolve_domain(self):
# we need to pass a get_loader and an asset_finder to construct
# SimplePipelineEngine, but do not expect to use them
get_loader = NamedExplodingObject(
'self._get_loader',
'SimplePipelineEngine does not currently depend on get_loader '
'at construction time. Update this test if it now does.'
)
asset_finder = NamedExplodingObject(
'self._finder',
'SimplePipelineEngine does not currently depend on asset_finder '
'at construction time. Update this test if it now does.'
)
engine_generic = SimplePipelineEngine(
get_loader, asset_finder, default_domain=GENERIC
)
engine_jp = SimplePipelineEngine(
get_loader, asset_finder, default_domain=JP_EQUITIES
)
pipe_generic = Pipeline()
pipe_us = Pipeline(domain=US_EQUITIES)
# the engine should resolve a pipeline that already has a domain
# to that domain
self.assertIs(
engine_jp.resolve_domain(pipe_us),
US_EQUITIES
)
# the engine should resolve a pipeline without a domain to the engine's
# default
self.assertIs(
engine_jp.resolve_domain(pipe_generic),
JP_EQUITIES
)
# a generic engine should resolve to the pipeline's domain
# if it has one
self.assertIs(
engine_generic.resolve_domain(pipe_us),
US_EQUITIES
)
# an engine with a default of GENERIC should raise a ValueError when
# trying to infer a pipeline whose domain is also GENERIC
with self.assertRaises(ValueError):
engine_generic.resolve_domain(pipe_generic)
# infer domain from the column if the pipeline and engine have
# a GENERIC domain
pipe = Pipeline({'close': USEquityPricing.close.latest})
self.assertIs(
engine_generic.resolve_domain(pipe),
US_EQUITIES,
)
| 36.047676 | 79 | 0.587935 |
47e856cc74a3139954ef2027c4a834b7de4a9269 | 9,264 | py | Python | LammpsSearchFuncs.py | m-bone/Bond_React_Python | e2f9fe5473e5b15d32484ccf2be6f6820a60b53f | [
"MIT"
] | 1 | 2021-02-19T06:17:40.000Z | 2021-02-19T06:17:40.000Z | LammpsSearchFuncs.py | m-bone/Bond_React_Python | e2f9fe5473e5b15d32484ccf2be6f6820a60b53f | [
"MIT"
] | null | null | null | LammpsSearchFuncs.py | m-bone/Bond_React_Python | e2f9fe5473e5b15d32484ccf2be6f6820a60b53f | [
"MIT"
] | null | null | null | ##############################################################################
# Developed by: Matthew Bone
# Last Updated: 30/07/2021
# Updated by: Matthew Bone
#
# Contact Details:
# Bristol Composites Institute (BCI)
# Department of Aerospace Engineering - University of Bristol
# Queen's Building - University Walk
# Bristol, BS8 1TR
# U.K.
# Email - matthew.bone@bristol.ac.uk
#
# File Description:
# A range of functions designed to search LAMMPS files for information.
# These functions work for 'read_data' files and 'molecule' files
##############################################################################
from natsort import natsorted
from LammpsTreatmentFuncs import clean_data
# Get data
def get_data(sectionName, lines, sectionIndexList, useExcept = True):
if useExcept: # Checks that section name is existing in LAMMPS data
try:
startIndex = lines.index(sectionName)
except ValueError:
# If doesn't exist, return empty list that can be added as normal to main list later
data = []
return data
else: # Allows for later try/except blocks to catch missing section names
startIndex = lines.index(sectionName)
endIndex = sectionIndexList[sectionIndexList.index(startIndex) + 1]
data = lines[startIndex+1:endIndex] # +1 means sectionName doesn't get included
data = [val.split() for val in data]
return data
def get_coeff(coeffName, settingsData):
# Inputs pre-split data
# Return all lines that include coeffName in the [0] index
coeffs = [line for line in settingsData if line[0] == coeffName]
return coeffs
def find_sections(lines):
# Find index of section keywords - isalpha works as no spaces, newlines or punc in section keywords
sectionIndexList = [lines.index(line) for line in lines if line.isalpha()]
# Add end of file as last index
sectionIndexList.append(len(lines))
return sectionIndexList
# Search bond pair
def pair_search(bond, bondAtom):
'''
Check if either atomID in a bond is the desired atomID.
Will return None if no match is found.
'''
if bond[2] == bondAtom:
return bond[3]
elif bond[3] == bondAtom:
return bond[2]
# Loop through atomIDs, possible bonds and find valid bonds
def search_loop(bonds, bondAtom):
nextBondAtomList = []
for searchAtom in bondAtom:
for bond in bonds:
nextAtomID = pair_search(bond, searchAtom)
if nextAtomID is not None:
nextBondAtomList.append(nextAtomID)
return nextBondAtomList
def edge_atom_fingerprint_ids(edgeAtomList, originalBondList, validAtomSet):
# Get edge atom neighbours
edgeAtomFingerprintDict = get_neighbours(edgeAtomList, originalBondList, []) # Bonding atoms given as blank list, edge atoms can never have bonding atoms as a neighbour so not a problem
# Filter out validAtomIDs that are within the partial structure
filteredFingerprintDict = {}
for key, atomList in edgeAtomFingerprintDict.items():
cutList = [atom for atom in atomList if atom not in validAtomSet]
filteredFingerprintDict[key] = cutList
return filteredFingerprintDict
def get_neighbours(atomIDList, bondsList, newBondAtoms):
'''
Get atomIDs of neighbouring atoms for each atom in atomIDList
Bonding atoms don't appear as neighbours as this is used for symmetry checks.
Bonding atoms always will have different neighbour fingerprints so no point looking at them.
'''
boundAtomsList = []
# Determine what atoms are bound to an initial atom
for atom in atomIDList:
bondingAtoms = []
for bond in bondsList:
pairResult = pair_search(bond, atom)
if pairResult is not None: # Stops bonding atoms appearing as neighbours CODE - and pairResult not in newBondAtoms
bondingAtoms.append(pairResult)
boundAtomsList.append([atom, bondingAtoms])
# Create dictionary of initial atom keys and bound atom list values
boundAtomsDict = {val[0]: val[1] for val in boundAtomsList}
return boundAtomsDict
def get_additional_neighbours(neighboursDict, searchAtomID, searchNeighbours, bondingAtoms, unique=True):
''' Get atomIDs of the neighbours of a given atomID.
This is designed to get second and third neighbours of a given atomID. Further away
neighbours are possible but may have unintended results.
Args:
unique: Prevent search from returning atomIDs that were already in the neighboursDict,
in the searchNeighbours if specified, and the atomID.
Returns:
List of neighbour atomIDs
'''
totalNeighbourSet = set()
for currentNeighbour in searchNeighbours:
totalNeighbourSet.update(neighboursDict[currentNeighbour])
if unique:
# Remove the original search atomID from totalNeighbourSet if present
if searchAtomID in totalNeighbourSet:
totalNeighbourSet.remove(searchAtomID)
# Remove bonding atoms - don't want to use bonding atom fingerprints as they will always be different pre and post
for bondingAtom in bondingAtoms:
if bondingAtom in totalNeighbourSet:
totalNeighbourSet.remove(bondingAtom)
# Remove the neighbours from this search
for currentNeighbour in searchNeighbours:
if currentNeighbour in totalNeighbourSet:
totalNeighbourSet.remove(currentNeighbour)
# Remove initial neighbours from set if they aren't the searchNeighbours specified
# This is for >= third neighbours
if neighboursDict[searchAtomID] != searchNeighbours:
for neighbour in neighboursDict[searchAtomID]:
if neighbour in totalNeighbourSet:
totalNeighbourSet.remove(neighbour)
return list(totalNeighbourSet)
def element_atomID_dict(fileName, elementsByType):
# Load molecule file
with open(fileName, 'r') as f:
lines = f.readlines()
# Clean data and get charge
data = clean_data(lines)
sections = find_sections(data)
try: # Try is for getting types from molecule file types
types = get_data('Types', data, sections, useExcept=False)
except ValueError: # Exception gets types from standard lammps file type
atoms = get_data('Atoms', data, sections, useExcept=False)
types = [[atomRow[0], atomRow[2]] for atomRow in atoms]
typesDict = {row[0]: row[1] for row in types} # Keys: ID, Val: Type
# Ensure elementsByType is uppercase
elementsByTypeDict = {index+1: val.upper() for index, val in enumerate(elementsByType)} # Keys: Type, Val: Elements
# Assert that there are enough types in elementsByType for the highest type in the types variable
largestType = int(natsorted(types, key=lambda x: x[1])[-1][1]) # Types are stored as lists of [AtomNumber, TypeNumber]
assert len(elementsByType) >= largestType, 'EBT (elements by type) is missing values. Check that all types are present and separated with a space.'
elementIDDict = {key: elementsByTypeDict[int(val)] for key, val in typesDict.items()}
return elementIDDict
def get_header(tidiedData):
'''
Extract all the data from the header of a LAMMPS data file.
Return a dictionary of keyword keys and listed numeric values
'''
# Find stop line by searching for first line starting with letters
def get_stop_line():
for index, line in enumerate(tidiedData):
# Checks to get past the initial comment line(s):
if index == 0: continue
if line[0] == '#': continue
if line[0].isalpha():
return index
headerStopLine = get_stop_line()
# Build dictionary of header parts with keyword keys and list numeric values
headerData = tidiedData[0:headerStopLine]
headerDict = {'comment': []}
for line in headerData:
if line[0].isalpha() or line[0] == '#':
headerDict['comment'].extend([line])
else:
# Break line by spaces
cutLine = line.split()
# Search through line to get the numeric values - list due to two box dimensions
valueList = []
keyList = []
for element in cutLine:
# Convert value to int, failing this a float, failing this skip it
try:
valueList.append(int(element))
except ValueError:
try:
valueList.append(float(element))
except ValueError:
keyList.append(element)
# Create dict from assembled parts
headerDict['_'.join(keyList)] = valueList
return headerDict
def convert_header(header):
'''Convert a header dictionary back to a list of lists of strings for output'''
stringHeader = []
for key, values in header.items():
headerLine = [' '.join([str(val) for val in values])]
if key != 'comment':
headerLine.extend(key.split('_'))
stringHeader.append(headerLine)
return stringHeader | 38.6 | 189 | 0.659326 |
ef358d7c3c8b170a49199da66e792e267dcfe238 | 2,623 | py | Python | code/numpy/numpy-tutorial-master/scripts/game-of-life-big.py | vicb1/python-reference | 8bbb5b14ad0781cbb4b16e260ae1dc772acd6063 | [
"MIT"
] | null | null | null | code/numpy/numpy-tutorial-master/scripts/game-of-life-big.py | vicb1/python-reference | 8bbb5b14ad0781cbb4b16e260ae1dc772acd6063 | [
"MIT"
] | null | null | null | code/numpy/numpy-tutorial-master/scripts/game-of-life-big.py | vicb1/python-reference | 8bbb5b14ad0781cbb4b16e260ae1dc772acd6063 | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: Nicolas P. Rougier (Nicolas.Rougier@inria.fr)
#
# DANA is a computing framework for the simulation of distributed,
# asynchronous, numerical and adaptive models.
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
def iterate(Z):
N = (Z[0:-2,0:-2] + Z[0:-2,1:-1] + Z[0:-2,2:] +
Z[1:-1,0:-2] + Z[1:-1,2:] +
Z[2: ,0:-2] + Z[2: ,1:-1] + Z[2: ,2:])
birth = (N==3) & (Z[1:-1,1:-1]==0)
survive = ((N==2) | (N==3)) & (Z[1:-1,1:-1]==1)
Z[...] = 0
Z[1:-1,1:-1][birth | survive] = 1
return Z
Z = np.random.randint(0,2,(256,512))
for i in range(100):
iterate(Z)
size = np.array(Z.shape)
dpi = 72.0
figsize= size[1]/float(dpi),size[0]/float(dpi)
fig = plt.figure(figsize=figsize, dpi=dpi, facecolor="white")
fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
plt.imshow(Z,interpolation='nearest', cmap=plt.cm.gray_r)
plt.xticks([]), plt.yticks([])
plt.savefig('../figures/game-of-life-big.png', dpi=dpi)
plt.show()
| 43.716667 | 79 | 0.662981 |
2665f9732b1aa95690da2df0c0cd27ea0b1643c6 | 6,377 | py | Python | lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/allucen.py | proxium/script.module.lambdascrapers | f96ad4c7c44c011c9d0007a83edde8c4797e0e2f | [
"Beerware"
] | 11 | 2018-12-21T22:52:37.000Z | 2021-09-02T02:13:50.000Z | lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/allucen.py | proxium/script.module.lambdascrapers | f96ad4c7c44c011c9d0007a83edde8c4797e0e2f | [
"Beerware"
] | null | null | null | lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/allucen.py | proxium/script.module.lambdascrapers | f96ad4c7c44c011c9d0007a83edde8c4797e0e2f | [
"Beerware"
] | 1 | 2022-01-07T18:30:52.000Z | 2022-01-07T18:30:52.000Z | # -*- coding: utf-8 -*-
'''
allucen scraper for Exodus forks.
Nov 9 2018 - Checked
Updated and refactored by someone.
Originally created by others.
'''
import re,urllib,urlparse,json
from resources.lib.modules import client
from resources.lib.modules import control
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['alluc.ee']
self.base_link = 'https://www.alluc.ee'
self.search_link = '/api/search/%s/?apikey=%s&getmeta=0&query=%s&count=%d&from=%d'
self.types = ['stream']
self.streamLimit = control.setting('alluc.limit')
if self.streamLimit == '': self.streamLimit = 100
self.streamLimit = int(self.streamLimit)
self.streamIncrease = 100
self.api = control.setting('alluc.api')
self.debrid = control.setting('alluc.download')
if self.debrid == 'true': self.types = ['stream', 'download']
self.rlsFilter = ['FRENCH', 'LATINO', 'SELF', 'SAMPLE', 'EXTRA']
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None:
raise Exception()
if not (self.api and not self.api == ''):
raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
year = int(data['year']) if 'year' in data and not data['year'] == None else None
season = int(data['season']) if 'season' in data and not data['season'] == None else None
episode = int(data['episode']) if 'episode' in data and not data['episode'] == None else None
query = '%s S%02dE%02d' % (title, season, episode) if 'tvshowtitle' in data else '%s %d' % (title, year)
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
query += ' lang:%s' % self.language[0]
query = urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, self.search_link)
hostDict = hostprDict + hostDict
iterations = self.streamLimit/self.streamIncrease
last = self.streamLimit - (iterations * self.streamIncrease)
if not last:
iterations = iterations - 1
last = self.streamIncrease
iterations = iterations + 1
seen_urls = set()
for type in self.types:
searchFrom = 0
searchCount = self.streamIncrease
for offset in range(iterations):
if iterations == offset + 1: searchCount = last
urlNew = url % (type, self.api, query, searchCount, searchFrom)
searchFrom = searchFrom + self.streamIncrease
results = client.request(urlNew)
results = json.loads(results)
apistatus = results['status']
if apistatus != 'success': break
results = results['result']
added = False
for result in results:
jsonName = result['title']
jsonSize = result['sizeinternal']
jsonExtension = result['extension']
jsonLanguage = result['lang']
jsonHoster = result['hostername'].lower()
jsonLink = result['hosterurls'][0]['url']
if jsonLink in seen_urls: continue
seen_urls.add(jsonLink)
if not hdlr in jsonName.upper(): continue
if not self.releaseValid(title, jsonName): continue # filter non en releases
if not jsonHoster in hostDict: continue
if jsonExtension == 'rar': continue
quality, info = source_utils.get_release_quality(jsonName)
info.append(self.formatSize(jsonSize))
info.append(jsonName)
info = '|'.join(info)
sources.append({'source' : jsonHoster, 'quality': quality, 'language' : jsonLanguage, 'url' : jsonLink, 'info': info, 'direct' : False, 'debridonly' : False})
added = True
if not added:
break
return sources
except:
return sources
def resolve(self, url):
return url
def formatSize(self, size):
if size == 0 or size is None: return ''
size = int(size) / (1024 * 1024)
if size > 2000:
size = size / 1024
unit = 'GB'
else:
unit = 'MB'
size = '[B][%s %s][/B]' % (size, unit)
return size
def releaseValid (self, title, release):
for unw in self.rlsFilter:
if not unw in title.upper() and unw in release.upper():
return False
return True
| 38.648485 | 183 | 0.514192 |
6373ea078f838fd3a48f55156d5c4c0e8e4ba8d0 | 351 | py | Python | ascii-decryption.py | leaen/Codeeval-solutions | fa83cb4fba3e56f79c0a6b00361c18cd3092c3f0 | [
"MIT"
] | null | null | null | ascii-decryption.py | leaen/Codeeval-solutions | fa83cb4fba3e56f79c0a6b00361c18cd3092c3f0 | [
"MIT"
] | null | null | null | ascii-decryption.py | leaen/Codeeval-solutions | fa83cb4fba3e56f79c0a6b00361c18cd3092c3f0 | [
"MIT"
] | null | null | null | import sys
def decrypt(phrase):
sentence = list(map(int, phrase.split()[4:]))
offset = 32 - min(sentence)
return ''.join(map(lambda x: chr(x + offset), sentence))
def main():
with open(sys.argv[1]) as input_file:
for phrase in input_file:
print(decrypt(phrase.strip()))
if __name__ == '__main__':
main()
| 23.4 | 64 | 0.606838 |
9df784d98fe4caf0508c11ae291506369ccbbfa3 | 1,222 | py | Python | predict.py | celestinhermez/pytorch_image_classifier | e481b200db7eb769c5f553ad941b2f7129798dde | [
"MIT"
] | null | null | null | predict.py | celestinhermez/pytorch_image_classifier | e481b200db7eb769c5f553ad941b2f7129798dde | [
"MIT"
] | null | null | null | predict.py | celestinhermez/pytorch_image_classifier | e481b200db7eb769c5f553ad941b2f7129798dde | [
"MIT"
] | null | null | null | # We start by importing all the modules we will need, as well as the helper document with all our functions
import argparse
import torch
import json
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
import os
from torchvision import datasets, transforms, models
from collections import OrderedDict
from PIL import Image
import helper
def main():
# We display a short prompt
print('Hello! This script will use a checkpoint to predict the top k classes of a picture of your choosing' +
'\n'
+ 'You can choose how many classes to display.' + '\n' +
'You can also provide a mapping from the indices to the class names should you have it.' + '\n'
'You can consult the help to see all the other arguments' +
'\n' + '\n')
print('\n')
# We parse the arguments from the command line
args = helper.get_input_args_predict()
image = args.path
checkpoint = args.checkpoint
top_k = args.top_k
mapping = args.category_names
gpu = args.gpu
# We predict the categories, with their associated probabilities
helper.predict(image, checkpoint, top_k, mapping, gpu)
main() | 31.333333 | 113 | 0.689034 |
716027fd6af3292e68d3f3796c953b266afc0386 | 1,972 | py | Python | games_seeker/commands/builder.py | sgg10/games_seeker | c9b7723586e79baffb5dc9f6ddb88f541da416a7 | [
"MIT"
] | null | null | null | games_seeker/commands/builder.py | sgg10/games_seeker | c9b7723586e79baffb5dc9f6ddb88f541da416a7 | [
"MIT"
] | null | null | null | games_seeker/commands/builder.py | sgg10/games_seeker | c9b7723586e79baffb5dc9f6ddb88f541da416a7 | [
"MIT"
] | null | null | null | from datetime import datetime
import click
import numpy as np
from docxtpl import DocxTemplate
from games_seeker.pc_builder.genetic_search import GeneticSearch
def generate_report(context, budget):
template = DocxTemplate("templates/template_builder.docx")
template.render(context)
date = datetime.now().strftime("%d-%m-%Y")
template.save(f"reports/builder_result_{budget}_{date}.docx")
@click.command()
@click.option('-b', '--budget', prompt=True, type=int)
@click.option("-a", "--advance", is_flag=True, default=False)
@click.option("-n", "--number-results", type=int, default=3)
@click.option("-i", "--iterations", type=int, default=10)
def cli(budget, advance, number_results, iterations):
cases = [{"CXPB":0.8, "MUTPB":0.2, "NGEN":50, "NIND":100}]
if advance:
click.echo(click.style(
"Advanced mode will take more time to search but gives a better result (approximately 2.5 minutes)",
fg="yellow"
)
)
cases += [
{"CXPB": 0.4, "MUTPB": 0.6, "NGEN": 50, "NIND": 100},
{"CXPB": 0.2, "MUTPB": 0.2, "NGEN": 50, "NIND": 100},
{"CXPB": 0.3, "MUTPB": 0.7, "NGEN": 50, "NIND": 100},
{"CXPB": 0.6, "MUTPB": 0.4, "NGEN": 50, "NIND": 100},
]
gs = GeneticSearch(budget)
results, _ = gs.run(cases, {"iterations": iterations})
output = [gs.PRODUCTS.loc[result, gs.PRODUCT_INFO] for result in results.iloc[:number_results, 7]]
output = list(map(lambda x: {
"rows": list(map(lambda val: list(val), x.values)),
"headers": [h.upper() for h in x.columns],
"total": np.sum(x["price"].values)}, output)
)
context = {
"results": output,
"date": datetime.now().strftime("%d-%m-%Y %H:%M")
}
generate_report(context, budget)
click.echo(click.style("✅ Report was created successfully ✅", fg="green"))
| 34.596491 | 117 | 0.586207 |
d988c3da7191dcbe0b2bd3f4321cb295e4dcc5c4 | 206 | py | Python | server/src/resources/__init__.py | Rubilmax/netflux | 9e79063b81e3dc78055fc683c230de511827f030 | [
"MIT"
] | 2 | 2019-06-17T08:28:03.000Z | 2019-06-17T08:28:32.000Z | server/src/resources/__init__.py | Rubilmax/netflux | 9e79063b81e3dc78055fc683c230de511827f030 | [
"MIT"
] | 3 | 2020-09-05T00:54:20.000Z | 2021-05-07T15:34:58.000Z | server/src/resources/__init__.py | Rubilmax/netflux | 9e79063b81e3dc78055fc683c230de511827f030 | [
"MIT"
] | null | null | null | from .mark import MarkResource, MovieMeanResource
from .user import UserResource, UsersResource, UserCreateResource, UserLoginResource
from .movie import MovieResource, MoviesResource, MovieCreateResource
| 41.2 | 84 | 0.864078 |
034453d02366d673db5308c6e672fdd87bda4dd1 | 5,007 | py | Python | ppo-comet.py | ZombaSY/minimalRL | be08d993c699a11603328db92c569f12f92de17a | [
"MIT"
] | null | null | null | ppo-comet.py | ZombaSY/minimalRL | be08d993c699a11603328db92c569f12f92de17a | [
"MIT"
] | null | null | null | ppo-comet.py | ZombaSY/minimalRL | be08d993c699a11603328db92c569f12f92de17a | [
"MIT"
] | null | null | null | import gym
from comet_ml import Experiment
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utils.log import Log
from torch.distributions import Categorical
from utils.utils import save_model
import random
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--learning-rate', type=float, default=0.0005)
parser.add_argument('--gamma', type=float, default=0.98)
parser.add_argument('--lmbda', type=float, default=0.9)
parser.add_argument('--eps-clip', type=float, default=0.1)
parser.add_argument('--K-epoch', type=int, default=3)
parser.add_argument('--T-horizon', type=int, default=20)
parser.add_argument('--epsilon', type=float, default=0.1)
args = parser.parse_args()
class PPO(nn.Module):
def __init__(self):
super(PPO, self).__init__()
self.data = []
self.fc1 = nn.Linear(4,256)
self.fc_pi = nn.Linear(256,2)
self.fc_v = nn.Linear(256,1)
self.optimizer = optim.Adam(self.parameters(), lr=args.learning_rate)
self.device = torch.device('cuda') # or cpu
def pi(self, x, softmax_dim=0):
x = F.relu(self.fc1(x))
x = self.fc_pi(x)
prob = F.softmax(x, dim=softmax_dim)
return prob
def v(self, x):
x = F.relu(self.fc1(x))
v = self.fc_v(x)
return v
def put_data(self, transition):
self.data.append(transition)
def make_batch(self):
s_lst, a_lst, r_lst, s_prime_lst, prob_a_lst, done_lst = [], [], [], [], [], []
for transition in self.data:
s, a, r, s_prime, prob_a, done = transition
s_lst.append(s)
a_lst.append([a])
r_lst.append([r])
s_prime_lst.append(s_prime)
prob_a_lst.append([prob_a])
done_mask = 0 if done else 1
done_lst.append([done_mask])
s,a,r,s_prime,done_mask, prob_a = torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst), \
torch.tensor(r_lst), torch.tensor(s_prime_lst, dtype=torch.float), \
torch.tensor(done_lst, dtype=torch.float), torch.tensor(prob_a_lst)
self.data = []
return s, a, r, s_prime, done_mask, prob_a
def train_net(self):
s, a, r, s_prime, done_mask, prob_a = self.make_batch()
for i in range(args.K_epoch):
td_target = r + args.gamma * self.v(s_prime) * done_mask
delta = td_target - self.v(s)
delta = delta.detach().numpy()
advantage_lst = []
advantage = 0.0
# 이부분 이해가 잘...
for delta_t in delta[::-1]:
advantage = args.gamma * args.lmbda * advantage + delta_t[0]
advantage_lst.append([advantage])
advantage_lst.reverse()
advantage = torch.tensor(advantage_lst, dtype=torch.float)
pi = self.pi(s, softmax_dim=1)
pi_a = pi.gather(1, a)
ratio = torch.exp(torch.log(pi_a) - torch.log(prob_a)) # a/b == exp(log(a)-log(b))
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1-args.eps_clip, 1+args.eps_clip) * advantage
loss = -torch.min(surr1, surr2) + F.smooth_l1_loss(self.v(s) , td_target.detach())
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
def main():
env = gym.make('CartPole-v1')
model = PPO()
score = 0.0
print_interval = 20
log = Log(__file__[:-3])
experiment = Experiment(api_key="F8yfdGljIExZoi73No4gb1gF5",
project_name="reinforcement-learning", workspace="zombasy")
experiment.set_model_graph(model)
for n_epi in range(2000):
s = env.reset()
done = False
epsilon = max(0.01, args.epsilon - 0.01 * (n_epi / 200))
while not done:
for t in range(args.T_horizon):
prob = model.pi(torch.from_numpy(s).float())
m = Categorical(prob)
a = m.sample().item()
coin = random.random()
if coin < epsilon:
a = random.randint(0, 1)
s_prime, r, done, info = env.step(a)
model.put_data((s, a, r/100.0, s_prime, prob[a].item(), done))
s = s_prime
score += r
if done:
break
model.train_net()
if n_epi%print_interval==0 and n_epi!=0:
log.info("episode :{}, avg score : {:.1f}".format(n_epi, score/print_interval))
experiment.log_metric('score', score / print_interval)
experiment.log_metric('epsilon', epsilon)
score = 0.0
if n_epi % 500 == 0 and n_epi != 0:
save_model(model, 'ppo', n_epi, experiment)
env.close()
if __name__ == '__main__':
main()
| 32.512987 | 110 | 0.561015 |
2da2341a37af7434871359b00ac4b30e4d679a6b | 1,564 | py | Python | tests/app/dao/test_speakers_dao.py | kentsanggds/api | 651cdf7d496690722d6a4f5b51f04f4be97899d4 | [
"MIT"
] | 1 | 2018-10-12T15:04:31.000Z | 2018-10-12T15:04:31.000Z | tests/app/dao/test_speakers_dao.py | kentsanggds/api | 651cdf7d496690722d6a4f5b51f04f4be97899d4 | [
"MIT"
] | 169 | 2017-11-07T00:45:25.000Z | 2022-03-12T00:08:59.000Z | tests/app/dao/test_speakers_dao.py | kentsanggds/api | 651cdf7d496690722d6a4f5b51f04f4be97899d4 | [
"MIT"
] | 1 | 2019-08-15T14:51:31.000Z | 2019-08-15T14:51:31.000Z | import json
from app.dao.speakers_dao import (
dao_create_speaker,
dao_update_speaker,
dao_get_speakers,
dao_get_speaker_by_id,
dao_get_speaker_by_name
)
from app.models import Speaker
from tests.db import create_speaker
class WhenUsingSpeakersDAO(object):
def it_creates_a_speaker(self, db):
speaker = create_speaker()
assert Speaker.query.count() == 1
speaker_from_db = Speaker.query.filter(Speaker.id == speaker.id).first()
assert speaker == speaker_from_db
def it_updates_a_speaker_dao(self, db, db_session, sample_speaker):
dao_update_speaker(sample_speaker.id, name='Gary Green')
speaker_from_db = Speaker.query.filter(Speaker.id == sample_speaker.id).first()
assert sample_speaker.name == speaker_from_db.name
def it_gets_all_speakers_in_last_name_alphabetical_order(self, db, db_session, sample_speaker):
speakers = [create_speaker(name='Bob Blue'), create_speaker(name='Sid Green'), sample_speaker]
speakers_from_db = dao_get_speakers()
assert speakers == speakers_from_db
def it_gets_a_speaker_by_id(self, db, db_session, sample_speaker):
speaker = create_speaker(name='Sam Black')
fetched_speaker = dao_get_speaker_by_id(speaker.id)
assert fetched_speaker == speaker
def it_gets_a_speaker_by_name(self, db, db_session, sample_speaker):
speaker = create_speaker(name='Sam Black')
fetched_speaker = dao_get_speaker_by_name(speaker.name)
assert fetched_speaker == speaker
| 31.918367 | 102 | 0.7289 |
384b7b8350652e8aba3787e62245e667735132a3 | 51,112 | py | Python | tests/snuba/api/endpoints/test_organization_events_stats.py | vaniot-s/sentry | 5c1accadebfaf8baf6863251c05b38ea979ee1c7 | [
"BSD-3-Clause"
] | null | null | null | tests/snuba/api/endpoints/test_organization_events_stats.py | vaniot-s/sentry | 5c1accadebfaf8baf6863251c05b38ea979ee1c7 | [
"BSD-3-Clause"
] | null | null | null | tests/snuba/api/endpoints/test_organization_events_stats.py | vaniot-s/sentry | 5c1accadebfaf8baf6863251c05b38ea979ee1c7 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import mock
import six
import uuid
from pytz import utc
from datetime import timedelta
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import iso_format, before_now
from sentry.utils.compat import zip
from sentry.utils.samples import load_data
class OrganizationEventsStatsEndpointTest(APITestCase, SnubaTestCase):
def setUp(self):
super(OrganizationEventsStatsEndpointTest, self).setUp()
self.login_as(user=self.user)
self.authed_user = self.user
self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
self.project = self.create_project()
self.project2 = self.create_project()
self.user = self.create_user()
self.user2 = self.create_user()
self.store_event(
data={
"event_id": "a" * 32,
"message": "very bad",
"timestamp": iso_format(self.day_ago + timedelta(minutes=1)),
"fingerprint": ["group1"],
"tags": {"sentry:user": self.user.email},
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"message": "oh my",
"timestamp": iso_format(self.day_ago + timedelta(hours=1, minutes=1)),
"fingerprint": ["group2"],
"tags": {"sentry:user": self.user2.email},
},
project_id=self.project2.id,
)
self.store_event(
data={
"event_id": "c" * 32,
"message": "very bad",
"timestamp": iso_format(self.day_ago + timedelta(hours=1, minutes=2)),
"fingerprint": ["group2"],
"tags": {"sentry:user": self.user2.email},
},
project_id=self.project2.id,
)
self.url = reverse(
"sentry-api-0-organization-events-stats",
kwargs={"organization_slug": self.project.organization.slug},
)
def test_simple(self):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
},
format="json",
)
assert response.status_code == 200, response.content
assert [attrs for time, attrs in response.data["data"]] == [
[{"count": 1}],
[{"count": 2}],
]
def test_no_projects(self):
org = self.create_organization(owner=self.user)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-organization-events-stats", kwargs={"organization_slug": org.slug}
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 0
def test_groupid_filter(self):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"group": self.group.id,
},
format="json",
)
assert response.status_code == 200, response.content
assert len(response.data["data"])
def test_groupid_filter_invalid_value(self):
url = "%s?group=not-a-number" % (self.url,)
with self.feature({"organizations:discover-basic": False}):
response = self.client.get(url, format="json")
assert response.status_code == 400, response.content
def test_user_count(self):
self.store_event(
data={
"event_id": "d" * 32,
"message": "something",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"tags": {"sentry:user": self.user2.email},
"fingerprint": ["group2"],
},
project_id=self.project2.id,
)
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "user_count",
},
format="json",
)
assert response.status_code == 200, response.content
assert [attrs for time, attrs in response.data["data"]] == [[{"count": 2}], [{"count": 1}]]
def test_discover2_backwards_compatibility(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "user_count",
},
format="json",
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) > 0
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "event_count",
},
format="json",
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) > 0
def test_with_event_count_flag(self):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "event_count",
},
format="json",
)
assert response.status_code == 200, response.content
assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 2}]]
def test_performance_view_feature(self):
with self.feature(
{"organizations:performance-view": True, "organizations:discover-basic": False}
):
response = self.client.get(
self.url,
format="json",
data={
"end": iso_format(before_now()),
"start": iso_format(before_now(hours=2)),
"query": "project_id:1",
"interval": "30m",
"yAxis": "count()",
},
)
assert response.status_code == 200
def test_aggregate_function_count(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
},
)
assert response.status_code == 200, response.content
assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 2}]]
def test_invalid_aggregate(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "rubbish",
},
)
assert response.status_code == 400, response.content
def test_aggregate_function_user_count(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count_unique(user)",
},
)
assert response.status_code == 200, response.content
assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
def test_aggregate_invalid(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "nope(lol)",
},
)
assert response.status_code == 400, response.content
def test_throughput_epm_hour_rollup(self):
project = self.create_project()
# Each of these denotes how many events to create in each hour
event_counts = [6, 0, 6, 3, 0, 3]
for hour, count in enumerate(event_counts):
for minute in range(count):
self.store_event(
data={
"event_id": six.binary_type(six.text_type(uuid.uuid1()).encode("ascii")),
"message": "very bad",
"timestamp": iso_format(
self.day_ago + timedelta(hours=hour, minutes=minute)
),
"fingerprint": ["group1"],
"tags": {"sentry:user": self.user.email},
},
project_id=project.id,
)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=6)),
"interval": "1h",
"yAxis": "epm()",
"project": project.id,
},
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert len(data) == 6
rows = data[0:6]
for test in zip(event_counts, rows):
assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
def test_throughput_epm_day_rollup(self):
project = self.create_project()
# Each of these denotes how many events to create in each minute
event_counts = [6, 0, 6, 3, 0, 3]
for hour, count in enumerate(event_counts):
for minute in range(count):
self.store_event(
data={
"event_id": six.binary_type(six.text_type(uuid.uuid1()).encode("ascii")),
"message": "very bad",
"timestamp": iso_format(
self.day_ago + timedelta(hours=hour, minutes=minute)
),
"fingerprint": ["group1"],
"tags": {"sentry:user": self.user.email},
},
project_id=project.id,
)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=24)),
"interval": "24h",
"yAxis": "epm()",
"project": project.id,
},
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert len(data) == 1
assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
def test_throughput_eps_minute_rollup(self):
project = self.create_project()
# Each of these denotes how many events to create in each minute
event_counts = [6, 0, 6, 3, 0, 3]
for minute, count in enumerate(event_counts):
for second in range(count):
self.store_event(
data={
"event_id": six.binary_type(six.text_type(uuid.uuid1()).encode("ascii")),
"message": "very bad",
"timestamp": iso_format(
self.day_ago + timedelta(minutes=minute, seconds=second)
),
"fingerprint": ["group1"],
"tags": {"sentry:user": self.user.email},
},
project_id=project.id,
)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(minutes=6)),
"interval": "1m",
"yAxis": "eps()",
"project": project.id,
},
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert len(data) == 6
rows = data[0:6]
for test in zip(event_counts, rows):
assert test[1][1][0]["count"] == test[0] / 60.0
def test_throughput_eps_no_rollup(self):
project = self.create_project()
# Each of these denotes how many events to create in each minute
event_counts = [6, 0, 6, 3, 0, 3]
for minute, count in enumerate(event_counts):
for second in range(count):
self.store_event(
data={
"event_id": six.binary_type(six.text_type(uuid.uuid1()).encode("ascii")),
"message": "very bad",
"timestamp": iso_format(
self.day_ago + timedelta(minutes=minute, seconds=second)
),
"fingerprint": ["group1"],
"tags": {"sentry:user": self.user.email},
},
project_id=project.id,
)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(minutes=1)),
"interval": "1s",
"yAxis": "eps()",
"project": project.id,
},
)
assert response.status_code == 200, response.content
data = response.data["data"]
# expect 60 data points between time span of 0 and 60 seconds
assert len(data) == 60
rows = data[0:6]
for row in rows:
assert row[1][0]["count"] == 1
def test_with_field_and_reference_event_invalid(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"referenceEvent": "nope-invalid",
"yAxis": "count()",
},
)
assert response.status_code == 400, response.content
assert "reference" in response.content
def test_only_reference_event(self):
# Create a new event that message matches events made in setup
event = self.store_event(
data={
"event_id": "e" * 32,
"message": "oh my",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"tags": {"sentry:user": "bob@example.com"},
"fingerprint": ["group3"],
},
project_id=self.project.id,
)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"referenceEvent": "%s:%s" % (self.project.slug, event.event_id),
"yAxis": "count()",
},
)
assert response.status_code == 200, response.content
# Because we didn't send fields, the reference event is not applied
assert [attrs for time, attrs in response.data["data"]] == [[{"count": 2}], [{"count": 2}]]
def test_field_and_reference_event(self):
# Create a new event that message matches events made in setup
event = self.store_event(
data={
"event_id": "e" * 32,
"message": "oh my",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"tags": {"sentry:user": "bob@example.com"},
"fingerprint": ["group3"],
},
project_id=self.project.id,
)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"field": ["message", "count()"],
"interval": "1h",
"referenceEvent": "%s:%s" % (self.project.slug, event.event_id),
"yAxis": "count()",
},
)
assert response.status_code == 200, response.content
assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
def test_transaction_events(self):
prototype = {
"type": "transaction",
"transaction": "api.issue.delete",
"spans": [],
"contexts": {"trace": {"op": "foobar", "trace_id": "a" * 32, "span_id": "a" * 16}},
"tags": {"important": "yes"},
}
fixtures = (
("d" * 32, before_now(minutes=32)),
("e" * 32, before_now(hours=1, minutes=2)),
("f" * 32, before_now(hours=1, minutes=35)),
)
for fixture in fixtures:
data = prototype.copy()
data["event_id"] = fixture[0]
data["timestamp"] = iso_format(fixture[1])
data["start_timestamp"] = iso_format(fixture[1] - timedelta(seconds=1))
self.store_event(data=data, project_id=self.project.id)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"end": iso_format(before_now()),
"start": iso_format(before_now(hours=2)),
"query": "event.type:transaction",
"interval": "30m",
"yAxis": "count()",
},
)
assert response.status_code == 200, response.content
items = [item for time, item in response.data["data"] if item]
# We could get more results depending on where the 30 min
# windows land.
assert len(items) >= 3
def test_project_id_query_filter(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"end": iso_format(before_now()),
"start": iso_format(before_now(hours=2)),
"query": "project_id:1",
"interval": "30m",
"yAxis": "count()",
},
)
assert response.status_code == 200
def test_latest_release_query_filter(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"end": iso_format(before_now()),
"start": iso_format(before_now(hours=2)),
"query": "release:latest",
"interval": "30m",
"yAxis": "count()",
},
)
assert response.status_code == 200
def test_simple_multiple_yaxis(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": ["user_count", "event_count"],
},
format="json",
)
assert response.status_code == 200, response.content
response.data["user_count"]["order"] == 0
assert [attrs for time, attrs in response.data["user_count"]["data"]] == [
[{"count": 1}],
[{"count": 1}],
]
response.data["event_count"]["order"] == 1
assert [attrs for time, attrs in response.data["event_count"]["data"]] == [
[{"count": 1}],
[{"count": 2}],
]
@mock.patch("sentry.snuba.discover.timeseries_query", return_value={})
def test_multiple_yaxis_only_one_query(self, mock_query):
with self.feature("organizations:discover-basic"):
self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": ["user_count", "event_count", "epm()", "eps()"],
},
format="json",
)
assert mock_query.call_count == 1
def test_invalid_interval(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"end": iso_format(before_now()),
"start": iso_format(before_now(hours=24)),
"query": "",
"interval": "1s",
"yAxis": "count()",
},
)
assert response.status_code == 400
def test_out_of_retention(self):
with self.options({"system.event-retention-days": 10}):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
format="json",
data={
"start": iso_format(before_now(days=20)),
"end": iso_format(before_now(days=15)),
"query": "",
"interval": "30m",
"yAxis": "count()",
},
)
assert response.status_code == 400
@mock.patch("sentry.utils.snuba.quantize_time")
def test_quantize_dates(self, mock_quantize):
mock_quantize.return_value = before_now(days=1).replace(tzinfo=utc)
with self.feature("organizations:discover-basic"):
# Don't quantize short time periods
self.client.get(
self.url,
format="json",
data={"statsPeriod": "1h", "query": "", "interval": "30m", "yAxis": "count()"},
)
# Don't quantize absolute date periods
self.client.get(
self.url,
format="json",
data={
"start": iso_format(before_now(days=20)),
"end": iso_format(before_now(days=15)),
"query": "",
"interval": "30m",
"yAxis": "count()",
},
)
assert len(mock_quantize.mock_calls) == 0
# Quantize long date periods
self.client.get(
self.url,
format="json",
data={"statsPeriod": "90d", "query": "", "interval": "30m", "yAxis": "count()"},
)
assert len(mock_quantize.mock_calls) == 2
class OrganizationEventsStatsTopNEvents(APITestCase, SnubaTestCase):
def setUp(self):
super(OrganizationEventsStatsTopNEvents, self).setUp()
self.login_as(user=self.user)
self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
self.project = self.create_project()
self.project2 = self.create_project()
self.user2 = self.create_user()
transaction_data = load_data("transaction")
transaction_data["start_timestamp"] = iso_format(self.day_ago + timedelta(minutes=2))
transaction_data["timestamp"] = iso_format(self.day_ago + timedelta(minutes=4))
self.event_data = [
{
"data": {
"message": "poof",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"user": {"email": self.user.email},
"fingerprint": ["group1"],
},
"project": self.project2,
"count": 7,
},
{
"data": {
"message": "voof",
"timestamp": iso_format(self.day_ago + timedelta(hours=1, minutes=2)),
"fingerprint": ["group2"],
"user": {"email": self.user2.email},
},
"project": self.project2,
"count": 6,
},
{
"data": {
"message": "very bad",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"fingerprint": ["group3"],
"user": {"email": "foo@example.com"},
},
"project": self.project,
"count": 5,
},
{
"data": {
"message": "oh no",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"fingerprint": ["group4"],
"user": {"email": "bar@example.com"},
},
"project": self.project,
"count": 4,
},
{"data": transaction_data, "project": self.project, "count": 3},
# Not in the top 5
{
"data": {
"message": "sorta bad",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"fingerprint": ["group5"],
"user": {"email": "bar@example.com"},
},
"project": self.project,
"count": 2,
},
{
"data": {
"message": "not so bad",
"timestamp": iso_format(self.day_ago + timedelta(minutes=2)),
"fingerprint": ["group6"],
"user": {"email": "bar@example.com"},
},
"project": self.project,
"count": 1,
},
]
self.events = []
for index, event_data in enumerate(self.event_data):
data = event_data["data"].copy()
for i in range(event_data["count"]):
data["event_id"] = "{}{}".format(index, i) * 16
event = self.store_event(data, project_id=event_data["project"].id)
self.events.append(event)
self.transaction = self.events[4]
self.url = reverse(
"sentry-api-0-organization-events-stats",
kwargs={"organization_slug": self.project.organization.slug},
)
def test_simple_top_events(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "user.email"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
results = data[
",".join([message, self.event_data[index]["data"]["user"].get("email", "None")])
]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_limits(self):
data = {
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "user.email"],
}
with self.feature("organizations:discover-basic"):
data["topEvents"] = 50
response = self.client.get(self.url, data, format="json",)
assert response.status_code == 400
data["topEvents"] = 0
response = self.client.get(self.url, data, format="json",)
assert response.status_code == 400
data["topEvents"] = "a"
response = self.client.get(self.url, data, format="json",)
assert response.status_code == 400
def test_top_events_with_projects(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "project"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
results = data[",".join([message, event.project.slug])]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_with_issue(self):
# delete a group to make sure if this happens the value becomes unknown
event_group = self.events[0].group
event_group.delete()
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "issue"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
# Because we deleted the group for event 0
if index == 0 or event.group is None:
issue = "unknown"
else:
issue = event.group.qualified_short_id
results = data[",".join([issue, message])]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_with_functions(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-p99()"],
"field": ["transaction", "avg(transaction.duration)", "p99()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 1
results = data[self.transaction.transaction]
assert results["order"] == 0
assert [attrs for time, attrs in results["data"]] == [
[{"count": 3}],
[{"count": 0}],
]
def test_top_events_with_functions_on_different_transactions(self):
""" Transaction2 has less events, but takes longer so order should be self.transaction then transaction2 """
transaction_data = load_data("transaction")
transaction_data["start_timestamp"] = iso_format(self.day_ago + timedelta(minutes=2))
transaction_data["timestamp"] = iso_format(self.day_ago + timedelta(minutes=6))
transaction_data["transaction"] = "/foo_bar/"
transaction2 = self.store_event(transaction_data, project_id=self.project.id)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-p99()"],
"field": ["transaction", "avg(transaction.duration)", "p99()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 2
results = data[self.transaction.transaction]
assert results["order"] == 1
assert [attrs for time, attrs in results["data"]] == [
[{"count": 3}],
[{"count": 0}],
]
results = data[transaction2.transaction]
assert results["order"] == 0
assert [attrs for time, attrs in results["data"]] == [
[{"count": 1}],
[{"count": 0}],
]
def test_top_events_with_query(self):
transaction_data = load_data("transaction")
transaction_data["start_timestamp"] = iso_format(self.day_ago + timedelta(minutes=2))
transaction_data["timestamp"] = iso_format(self.day_ago + timedelta(minutes=6))
transaction_data["transaction"] = "/foo_bar/"
self.store_event(transaction_data, project_id=self.project.id)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-p99()"],
"query": "transaction:/foo_bar/",
"field": ["transaction", "avg(transaction.duration)", "p99()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 1
transaction2_data = data["/foo_bar/"]
assert transaction2_data["order"] == 0
assert [attrs for time, attrs in transaction2_data["data"]] == [
[{"count": 1}],
[{"count": 0}],
]
def test_top_events_with_epm(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "epm()",
"orderby": ["-count()"],
"field": ["message", "user.email", "count()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
results = data[
",".join([message, self.event_data[index]["data"]["user"].get("email", "None")])
]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"] / (3600.0 / 60.0)}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_with_multiple_yaxis(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": ["epm()", "count()"],
"orderby": ["-count()"],
"field": ["message", "user.email", "count()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
results = data[
",".join([message, self.event_data[index]["data"]["user"].get("email", "None")])
]
assert results["order"] == index
assert results["epm()"]["order"] == 0
assert results["count()"]["order"] == 1
assert [{"count": self.event_data[index]["count"] / (3600.0 / 60.0)}] in [
attrs for time, attrs in results["epm()"]["data"]
]
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["count()"]["data"]
]
def test_top_events_with_boolean(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "device.charging"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
message = event.message or event.transaction
results = data[",".join(["False", message])]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_with_timestamp(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"query": "event.type:default",
"field": ["count()", "message", "timestamp"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
# Transactions won't be in the results because of the query
del self.events[4]
del self.event_data[4]
for index, event in enumerate(self.events[:5]):
results = data[",".join([event.message, event.timestamp])]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_with_int(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "message", "transaction.duration"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 1
results = data[",".join([self.transaction.transaction, "120000"])]
assert results["order"] == 0
assert [attrs for time, attrs in results["data"]] == [
[{"count": 3}],
[{"count": 0}],
]
def test_top_events_with_user(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["user", "count()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
assert data["bar@example.com"]["order"] == 0
assert [attrs for time, attrs in data["bar@example.com"]["data"]] == [
[{"count": 7}],
[{"count": 0}],
]
assert [attrs for time, attrs in data["127.0.0.1"]["data"]] == [
[{"count": 3}],
[{"count": 0}],
]
def test_top_events_with_user_and_email(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["user", "user.email", "count()"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
assert data["bar@example.com,bar@example.com"]["order"] == 0
assert [attrs for time, attrs in data["bar@example.com,bar@example.com"]["data"]] == [
[{"count": 7}],
[{"count": 0}],
]
assert [attrs for time, attrs in data["127.0.0.1,None"]["data"]] == [
[{"count": 3}],
[{"count": 0}],
]
def test_top_events_none_filter(self):
""" When a field is None in one of the top events, make sure we filter by it
In this case event[4] is a transaction and has no issue
"""
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "issue"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 5
for index, event in enumerate(self.events[:5]):
if event.group is None:
issue = "unknown"
else:
issue = event.group.qualified_short_id
results = data[issue]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
def test_top_events_one_field_with_none(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"query": "event.type:transaction",
"field": ["count()", "issue"],
"topEvents": 5,
},
format="json",
)
data = response.data
assert response.status_code == 200, response.content
assert len(data) == 1
results = data["unknown"]
assert [attrs for time, attrs in results["data"]] == [
[{"count": 3}],
[{"count": 0}],
]
assert results["order"] == 0
def test_top_events_with_error_handled(self):
data = self.event_data[0]
data["data"]["level"] = "error"
data["data"]["exception"] = {
"values": [
{
"type": "ValidationError",
"value": "Bad request",
"mechanism": {"handled": True, "type": "generic"},
}
]
}
self.store_event(data["data"], project_id=data["project"].id)
data["data"]["exception"] = {
"values": [
{
"type": "ValidationError",
"value": "Bad request",
"mechanism": {"handled": False, "type": "generic"},
}
]
}
self.store_event(data["data"], project_id=data["project"].id)
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["count()", "error.handled"],
"topEvents": 5,
},
format="json",
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 3
results = data[""]
assert [attrs for time, attrs in results["data"]] == [
[{"count": 22}],
[{"count": 6}],
]
assert results["order"] == 0
results = data["1"]
assert [attrs for time, attrs in results["data"]] == [
[{"count": 1}],
[{"count": 0}],
]
results = data["0"]
assert [attrs for time, attrs in results["data"]] == [
[{"count": 1}],
[{"count": 0}],
]
def test_top_events_with_aggregate_condition(self):
with self.feature("organizations:discover-basic"):
response = self.client.get(
self.url,
data={
"start": iso_format(self.day_ago),
"end": iso_format(self.day_ago + timedelta(hours=2)),
"interval": "1h",
"yAxis": "count()",
"orderby": ["-count()"],
"field": ["message", "count()"],
"query": "count():>4",
"topEvents": 5,
},
format="json",
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 3
for index, event in enumerate(self.events[:3]):
message = event.message or event.transaction
results = data[message]
assert results["order"] == index
assert [{"count": self.event_data[index]["count"]}] in [
attrs for time, attrs in results["data"]
]
| 38.001487 | 116 | 0.475231 |
934fb0af8fcb0b3fd5066bea2fa7219f7e169276 | 3,481 | py | Python | bindings/python/ensmallen/datasets/string/streptomonosporaalba.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-09-10T18:31:58.000Z | 2022-03-24T04:28:04.000Z | bindings/python/ensmallen/datasets/string/streptomonosporaalba.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/streptomonosporaalba.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Streptomonospora alba.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def StreptomonosporaAlba(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Streptomonospora alba graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Streptomonospora alba graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="StreptomonosporaAlba",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.152381 | 223 | 0.678541 |
c17a32157f1dc35b9a2815402c78470a37950a2c | 3,820 | py | Python | tests/unit/test_deprecations.py | Demmenie/praw | 6d2dcaf8a53abc062dac3819545d5c08044605c8 | [
"BSD-2-Clause"
] | null | null | null | tests/unit/test_deprecations.py | Demmenie/praw | 6d2dcaf8a53abc062dac3819545d5c08044605c8 | [
"BSD-2-Clause"
] | null | null | null | tests/unit/test_deprecations.py | Demmenie/praw | 6d2dcaf8a53abc062dac3819545d5c08044605c8 | [
"BSD-2-Clause"
] | null | null | null | """This file should be updated as files/classes/functions are deprecated."""
import pytest
from praw import Reddit
from praw.exceptions import APIException, WebSocketException
from praw.models.reddit.user_subreddit import UserSubreddit
from . import UnitTest
@pytest.mark.filterwarnings("error", category=DeprecationWarning)
class TestDeprecation(UnitTest):
def test_validate_on_submit(self):
with pytest.raises(DeprecationWarning):
self.reddit.validate_on_submit
self.reddit.validate_on_submit = True
assert self.reddit.validate_on_submit
self.reddit.validate_on_submit = False
with pytest.raises(DeprecationWarning):
self.reddit.validate_on_submit
def test_api_exception(self):
exc = APIException(["test", "testing", "test"])
with pytest.raises(DeprecationWarning):
exc.error_type
with pytest.raises(DeprecationWarning):
exc.message
with pytest.raises(DeprecationWarning):
exc.field
def test_subreddit_rules_call(self):
with pytest.raises(DeprecationWarning) as excinfo:
self.reddit.subreddit("test").rules()
assert (
excinfo.value.args[0]
== "Calling SubredditRules to get a list of rules is deprecated. Remove the parentheses to use the iterator. View the PRAW documentation on how to change the code in order to use the iterator (https://praw.readthedocs.io/en/latest/code_overview/other/subredditrules.html#praw.models.reddit.rules.SubredditRules.__call__)."
)
def test_web_socket_exception_attribute(self):
exc = WebSocketException("Test", Exception("Test"))
with pytest.raises(DeprecationWarning) as excinfo:
_ = exc.original_exception
assert (
excinfo.value.args[0]
== "Accessing the attribute original_exception is deprecated. Please rewrite your code in such a way that this attribute does not need to be used. It will be removed in PRAW 8.0."
)
def test_gold_method(self):
with pytest.raises(DeprecationWarning) as excinfo:
self.reddit.subreddits.gold()
assert (
excinfo.value.args[0]
== "`subreddits.gold` has be renamed to `subreddits.premium`."
)
def test_gild_method(self):
with pytest.raises(DeprecationWarning) as excinfo:
self.reddit.submission("1234").gild()
assert excinfo.value.args[0] == "`.gild` has been renamed to `.award`."
def test_reddit_user_me_read_only(self):
with pytest.raises(DeprecationWarning):
self.reddit.user.me()
def test_reddit_refresh_token(self):
with pytest.raises(DeprecationWarning):
Reddit(
client_id="dummy",
client_secret=None,
redirect_uri="dummy",
refresh_token="dummy",
user_agent="dummy",
)
def test_user_subreddit_as_dict(self):
user_subreddit = UserSubreddit(None, display_name="test")
with pytest.deprecated_call() as warning_info:
display_name = user_subreddit["display_name"]
assert display_name == "test"
assert (
warning_info.list[0].message.args[0]
== "`Redditor.subreddit` is no longer a dict and is now an `UserSubreddit` object. Accessing attributes using string indices is deprecated."
)
assert user_subreddit.keys() == user_subreddit.__dict__.keys()
assert (
warning_info.list[1].message.args[0]
== "`Redditor.subreddit` is no longer a dict and is now an `UserSubreddit` object. Using `keys` is deprecated and will be removed in PRAW 8."
)
| 42.444444 | 334 | 0.652094 |
2b4a8444609a528588c6edf14ab3b01e52bc88c0 | 191 | py | Python | configs/_base_/schedules/schedule_adadelta_8e.py | jeffreykuang/mmocr-1 | b17304edeb493b0a4d7224c23d23b952350d0db5 | [
"Apache-2.0"
] | 206 | 2021-07-30T09:04:08.000Z | 2022-03-22T00:57:44.000Z | configs/_base_/schedules/schedule_adadelta_8e.py | jeffreykuang/mmocr-1 | b17304edeb493b0a4d7224c23d23b952350d0db5 | [
"Apache-2.0"
] | 39 | 2021-08-05T07:16:46.000Z | 2022-03-14T13:23:48.000Z | configs/_base_/schedules/schedule_adadelta_8e.py | jeffreykuang/mmocr-1 | b17304edeb493b0a4d7224c23d23b952350d0db5 | [
"Apache-2.0"
] | 61 | 2021-07-30T07:51:41.000Z | 2022-03-30T14:40:02.000Z | # optimizer
optimizer = dict(type='Adadelta', lr=1.0)
optimizer_config = dict(grad_clip=dict(max_norm=0.5))
# learning policy
lr_config = dict(policy='step', step=[4, 6, 7])
total_epochs = 8
| 27.285714 | 53 | 0.722513 |
af84ef31bd932c47ecf83feae284f7c500bcc8d4 | 6,947 | py | Python | sdk/eventhub/azure-eventhub/azure/eventhub/aio/_eventprocessor/checkpoint_store.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | null | null | null | sdk/eventhub/azure-eventhub/azure/eventhub/aio/_eventprocessor/checkpoint_store.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | null | null | null | sdk/eventhub/azure-eventhub/azure/eventhub/aio/_eventprocessor/checkpoint_store.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# -----------------------------------------------------------------------------------
from typing import Iterable, Dict, Any, Union, Optional
from abc import ABC, abstractmethod
class CheckpointStore(ABC):
"""CheckpointStore deals with the interaction with the chosen storage service.
It can list and claim partition ownerships; and list and save checkpoints.
"""
@abstractmethod
async def list_ownership(
self,
fully_qualified_namespace: str,
eventhub_name: str,
consumer_group: str
) -> Iterable[Dict[str, Any]]:
"""Retrieves a complete ownership list from the chosen storage service.
:param str fully_qualified_namespace: The fully qualified namespace that the Event Hub belongs to.
The format is like "<namespace>.servicebus.windows.net"
:param str eventhub_name: The name of the specific Event Hub the partition ownerships are associated with,
relative to the Event Hubs namespace that contains it.
:param str consumer_group: The name of the consumer group the ownerships are associated with.
:rtype: Iterable[Dict[str, Any]], Iterable of dictionaries containing partition ownership information:
- `fully_qualified_namespace` (str): The fully qualified namespace that the Event Hub belongs to.
The format is like "<namespace>.servicebus.windows.net"
- `eventhub_name` (str): The name of the specific Event Hub the checkpoint is associated with,
relative to the Event Hubs namespace that contains it.
- `consumer_group` (str): The name of the consumer group the ownership are associated with.
- `partition_id` (str): The partition ID which the checkpoint is created for.
- `owner_id` (str): A UUID representing the current owner of this partition.
- `last_modified_time` (UTC datetime.datetime): The last time this ownership was claimed.
- `etag` (str): The Etag value for the last time this ownership was modified. Optional depending
on storage implementation.
"""
@abstractmethod
async def claim_ownership(self, ownership_list: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
"""Tries to claim ownership for a list of specified partitions.
:param Iterable[Dict[str,Any]] ownership_list: Iterable of dictionaries containing all the ownerships to claim.
:rtype: Iterable[Dict[str,Any]], Iterable of dictionaries containing partition ownership information:
- `fully_qualified_namespace` (str): The fully qualified namespace that the Event Hub belongs to.
The format is like "<namespace>.servicebus.windows.net"
- `eventhub_name` (str): The name of the specific Event Hub the checkpoint is associated with,
relative to the Event Hubs namespace that contains it.
- `consumer_group` (str): The name of the consumer group the ownership are associated with.
- `partition_id` (str): The partition ID which the checkpoint is created for.
- `owner_id` (str): A UUID representing the owner attempting to claim this partition.
- `last_modified_time` (UTC datetime.datetime): The last time this ownership was claimed.
- `etag` (str): The Etag value for the last time this ownership was modified. Optional depending
on storage implementation.
"""
@abstractmethod
async def update_checkpoint(self, checkpoint: Dict[str, Optional[Union[str, int]]]) -> None:
"""Updates the checkpoint using the given information for the offset, associated partition and
consumer group in the chosen storage service.
Note: If you plan to implement a custom checkpoint store with the intention of running between
cross-language EventHubs SDKs, it is recommended to persist the offset value as an integer.
:param Dict[str,Any] checkpoint: A dict containing checkpoint information:
- `fully_qualified_namespace` (str): The fully qualified namespace that the Event Hub belongs to.
The format is like "<namespace>.servicebus.windows.net"
- `eventhub_name` (str): The name of the specific Event Hub the checkpoint is associated with,
relative to the Event Hubs namespace that contains it.
- `consumer_group` (str): The name of the consumer group the checkpoint is associated with.
- `partition_id` (str): The partition ID which the checkpoint is created for.
- `sequence_number` (int): The sequence number of the :class:`EventData<azure.eventhub.EventData>`
the new checkpoint will be associated with.
- `offset` (str): The offset of the :class:`EventData<azure.eventhub.EventData>`
the new checkpoint will be associated with.
:rtype: None
"""
@abstractmethod
async def list_checkpoints(
self,
fully_qualified_namespace: str,
eventhub_name: str,
consumer_group: str
) -> Iterable[Dict[str, Any]]:
"""List the updated checkpoints from the store.
:param str fully_qualified_namespace: The fully qualified namespace that the Event Hub belongs to.
The format is like "<namespace>.servicebus.windows.net"
:param str eventhub_name: The name of the specific Event Hub the checkpoints are associated with, relative to
the Event Hubs namespace that contains it.
:param str consumer_group: The name of the consumer group the checkpoints are associated with.
:rtype: Iterable[Dict[str,Any]], Iterable of dictionaries containing partition checkpoint information:
- `fully_qualified_namespace` (str): The fully qualified namespace that the Event Hub belongs to.
The format is like "<namespace>.servicebus.windows.net"
- `eventhub_name` (str): The name of the specific Event Hub the checkpoints are associated with,
relative to the Event Hubs namespace that contains it.
- `consumer_group` (str): The name of the consumer group the checkpoints are associated with.
- `partition_id` (str): The partition ID which the checkpoint is created for.
- `sequence_number` (int): The sequence number of the :class:`EventData<azure.eventhub.EventData>`.
- `offset` (str): The offset of the :class:`EventData<azure.eventhub.EventData>`.
"""
| 62.026786 | 119 | 0.653807 |
634501a61638daa57e1dee376eaa8929294ef681 | 1,853 | py | Python | cauliflowervest/server/handlers/rekey.py | isabella232/cauliflowervest | d3f52501ebed8b9a392350c8e177bbc602a6a09d | [
"Apache-2.0"
] | 292 | 2015-01-03T02:26:18.000Z | 2022-03-05T22:54:51.000Z | cauliflowervest/server/handlers/rekey.py | google/cauliflowervest | d3f52501ebed8b9a392350c8e177bbc602a6a09d | [
"Apache-2.0"
] | 15 | 2015-01-30T18:48:40.000Z | 2019-06-27T08:47:05.000Z | cauliflowervest/server/handlers/rekey.py | isabella232/cauliflowervest | d3f52501ebed8b9a392350c8e177bbc602a6a09d | [
"Apache-2.0"
] | 61 | 2015-01-24T09:23:58.000Z | 2022-03-08T14:12:14.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide passphrase status to client."""
import logging
from google.appengine.api import memcache
from cauliflowervest.server import util
from cauliflowervest.server.handlers import base_handler
from cauliflowervest.server.models import base
from cauliflowervest.server.models import util as models_util
class IsRekeyNeeded(base_handler.BaseHandler):
"""Check if rekeying needed."""
def get(self, type_name, target_id):
"""Handles GET requests."""
user = base.GetCurrentUser()
tag = self.request.get('tag', 'default')
entity = models_util.TypeNameToModel(
type_name).GetLatestForTarget(target_id, tag)
if not entity:
if memcache.Client().get(target_id, namespace='experimental_rekey'):
logging.info('experimental_rekey %s', target_id)
self.response.write(util.ToSafeJson('experimental'))
return
self.response.write(util.ToSafeJson(False))
return
if user.email not in entity.owners:
logging.warning(
'owner mismatch %s %s', entity.owners, user.email)
# Passphrase retrieval is necessary for rekeying so we abort.
self.response.write(util.ToSafeJson(False))
return
self.response.write(util.ToSafeJson(bool(entity.force_rekeying)))
| 34.962264 | 74 | 0.736643 |
b408e217a0e36e05a5668650cd1993550894fe4a | 1,391 | py | Python | test_get_references.py | mconlon17/vivo-foundation | 202f458bc72fb76c7d89240091c4fb00522cfe3f | [
"BSD-3-Clause"
] | null | null | null | test_get_references.py | mconlon17/vivo-foundation | 202f458bc72fb76c7d89240091c4fb00522cfe3f | [
"BSD-3-Clause"
] | 1 | 2015-04-04T01:38:51.000Z | 2015-04-04T01:38:51.000Z | tools/test_get_references.py | mconlon17/vivo-1.5-improvement | 44d8335eb7bbe518374a53c0e1f9f39014023ee7 | [
"BSD-3-Clause"
] | null | null | null | """
test_get_references.py -- Given a URI, get the references for the URI
Version 0.1 MC 2013-12-27
-- Initial version.
Version 0.2 MC 2014-09-18
-- Update for PEP 8 and Tools 2
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.2"
from vivofoundation import get_references
from datetime import datetime
# Test cases for access and display functions
print datetime.now(), "Start"
print "\nDateTime"
print get_references("http://vivo.ufl.edu/individual/n7860108656")
print "\nDateTimeInterval"
print get_references("http://vivo.ufl.edu/individual/n182882417")
print "\nOrganization"
print get_references("http://vivo.ufl.edu/individual/n8763427")
print "\nAuthorship"
print get_references("http://vivo.ufl.edu/individual/n148010391")
print "\nRole"
print get_references("http://vivo.ufl.edu/individual/n1864549239")
print "\nPerson"
print get_references("http://vivo.ufl.edu/individual/n39051")
print "\nNot Found"
print get_references("http://vivo.ufl.edu/notfound")
print "\nPublication Venue"
print get_references("http://vivo.ufl.edu/individual/n378789540")
print "\nPaper"
print get_references("http://vivo.ufl.edu/individual/n4703866415")
print "\nGrant"
print get_references("http://vivo.ufl.edu/individual/n614029206")
print datetime.now(), "Finish"
| 25.290909 | 73 | 0.752696 |
046f59272fc71528f238ede7f7f7fa97b24ceaa1 | 567 | py | Python | 04 - Drawing canvas, timers/exercises/timers_circle.py | PableraShow/python-exercises | e1648fd42f3009ec6fb1e2096852b6d399e91d5b | [
"MIT"
] | 8 | 2018-10-01T17:35:57.000Z | 2022-02-01T08:12:12.000Z | 04 - Drawing canvas, timers/exercises/timers_circle.py | PableraShow/python-exercises | e1648fd42f3009ec6fb1e2096852b6d399e91d5b | [
"MIT"
] | null | null | null | 04 - Drawing canvas, timers/exercises/timers_circle.py | PableraShow/python-exercises | e1648fd42f3009ec6fb1e2096852b6d399e91d5b | [
"MIT"
] | 6 | 2018-07-22T19:15:21.000Z | 2022-02-05T07:54:58.000Z | """ Create a circle in the center of the canvas.
Use a timer to increase its radius one pixel every tenth of a second."""
import simplegui
WIDTH = 200
HEIGHT = 200
radius = 1
# Timer handler
def tick():
global radius
radius += 1
# Draw handler
def draw(canvas):
canvas.draw_circle([WIDTH / 2, HEIGHT / 2], radius, 1, 'White', 'White')
# Create frame and timer
frame = simplegui.create_frame('Expanding circle', 200, 200)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(100, tick)
# Start timer
frame.start()
timer.start()
| 20.25 | 77 | 0.691358 |
199df535d1c7e8a62efce606db7ed3c3e2ccfbda | 393 | py | Python | examples/pyp5js/gallery/sketch_006.py | MarcSkovMadsen/panel-sketch | 518cb177e2e8d1899b58f3a3f4d80d26c82ec760 | [
"MIT"
] | 5 | 2021-04-24T00:41:14.000Z | 2021-08-07T13:07:27.000Z | examples/pyp5js/gallery/sketch_006.py | MarcSkovMadsen/panel-sketch | 518cb177e2e8d1899b58f3a3f4d80d26c82ec760 | [
"MIT"
] | null | null | null | examples/pyp5js/gallery/sketch_006.py | MarcSkovMadsen/panel-sketch | 518cb177e2e8d1899b58f3a3f4d80d26c82ec760 | [
"MIT"
] | null | null | null | from pyp5js import *
r = None
def setup():
global r
createCanvas(900, 900)
r = random(100, 700)
noFill()
def draw():
x, y = 100, 100
rect(x, y, r, r)
def keyPressed():
console.log("Key pressed event")
if key == "n":
global r
r = random(100, 700)
redraw()
def mouseDragged():
global r
r = random(100, 700)
redraw()
| 12.28125 | 36 | 0.526718 |
b6114b684d44ab312a6246d357fefc8c61abbbe8 | 3,535 | py | Python | fortnitepy/enums.py | Chr0nicT/fortnitepy | a2d0a1a96935164cebe05c46448e36c83739e88b | [
"MIT"
] | null | null | null | fortnitepy/enums.py | Chr0nicT/fortnitepy | a2d0a1a96935164cebe05c46448e36c83739e88b | [
"MIT"
] | null | null | null | fortnitepy/enums.py | Chr0nicT/fortnitepy | a2d0a1a96935164cebe05c46448e36c83739e88b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019 Terbau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from enum import Enum
class PartyPrivacy(Enum):
PUBLIC = {
'partyType': 'Public',
'inviteRestriction': 'AnyMember',
'onlyLeaderFriendsCanJoin': False,
'presencePermission': 'Anyone',
'invitePermission': 'Anyone',
'acceptingMembers': True,
}
FRIENDS_ALLOW_FRIENDS_OF_FRIENDS = {
'partyType': 'FriendsOnly',
'inviteRestriction': 'AnyMember',
'onlyLeaderFriendsCanJoin': False,
'presencePermission': 'Anyone',
'invitePermission': 'AnyMember',
'acceptingMembers': True,
}
FRIENDS = {
'partyType': 'FriendsOnly',
'inviteRestriction': 'LeaderOnly',
'onlyLeaderFriendsCanJoin': True,
'presencePermission': 'Leader',
'invitePermission': 'Leader',
'acceptingMembers': False,
}
PRIVATE_ALLOW_FRIENDS_OF_FRIENDS = {
'partyType': 'Private',
'inviteRestriction': 'AnyMember',
'onlyLeaderFriendsCanJoin': False,
'presencePermission': 'Noone',
'invitePermission': 'AnyMember',
'acceptingMembers': False,
}
PRIVATE = {
'partyType': 'Private',
'inviteRestriction': 'LeaderOnly',
'onlyLeaderFriendsCanJoin': True,
'presencePermission': 'Noone',
'invitePermission': 'Leader',
'acceptingMembers': False,
}
class DefaultCharacters(Enum):
CID_001_Athena_Commando_F_Default = 1
CID_002_Athena_Commando_F_Default = 2
CID_003_Athena_Commando_F_Default = 3
CID_004_Athena_Commando_F_Default = 4
CID_005_Athena_Commando_M_Default = 5
CID_006_Athena_Commando_M_Default = 6
CID_007_Athena_Commando_M_Default = 7
CID_008_Athena_Commando_M_Default = 8
class V1Gamemode(Enum):
SOLO = 'p2'
DUO = 'p10'
SQUAD = 'p9'
class V1Platform(Enum):
PC = 'pc'
XBOX = 'xb1'
PS4 = 'ps4'
class V1Window(Enum):
ALLTIME = 'alltime'
WEEKLY = 'weekly'
class V2Input(Enum):
KEYBOARDANDMOUSE = 'keyboardmouse'
GAMEPAD = 'gamepad'
TOUCH = 'touch'
class Region(Enum):
NAEAST = 'NAE'
NAWEST = 'NAW'
EUROPE = 'EU'
BRAZIL = 'BR'
OCEANIA = 'OCE'
ASIA = 'ASIA'
CHINA = 'CN'
class Platform(Enum):
WINDOWS = 'WIN'
MAC = 'MAC'
PLAYSTATION = 'PSN'
XBOX = 'XBL'
SWITCH = 'SWT'
IOS = 'IOS'
ANDROID = 'AND'
| 29.705882 | 78 | 0.657709 |
d14a53254abfa8a66b986d00638142715dfe4a60 | 928 | py | Python | isi_sdk_8_2_2/test/test_license_license_tier.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_2/test/test_license_license_tier.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_2/test/test_license_license_tier.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_2
from isi_sdk_8_2_2.models.license_license_tier import LicenseLicenseTier # noqa: E501
from isi_sdk_8_2_2.rest import ApiException
class TestLicenseLicenseTier(unittest.TestCase):
"""LicenseLicenseTier unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testLicenseLicenseTier(self):
"""Test LicenseLicenseTier"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_2.models.license_license_tier.LicenseLicenseTier() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.634146 | 94 | 0.713362 |
3322a482bd23b5ac8fd7c5dab12b8ef5946ea22b | 828 | py | Python | aalh_iit_jeep_001/download-image-files.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | aalh_iit_jeep_001/download-image-files.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | aalh_iit_jeep_001/download-image-files.py | johndewees/iitmigration | 4dadfbecda719d6e7d60af076a231aedec3c862f | [
"Unlicense"
] | null | null | null | from openpyxl import load_workbook
import urllib.request
filename = 'aalh_iit_jeep_001.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 2
maximumcol = 50
minimumrow = 7
maximumrow = 394
targetcol = 48
iterationrow = 7
for row in ws.iter_rows(min_row=minimumrow, min_col=targetcol, max_row=maximumrow, max_col=targetcol):
for cell in row:
testvar = ws.cell(row=iterationrow, column=targetcol).value
if testvar == 'SKIP':
continue
else:
downloadurl = ws.cell(row=iterationrow, column=48).value
downloadfilename = ws.cell(row=iterationrow, column=43).value
download = urllib.request.urlretrieve(downloadurl, downloadfilename)
iterationrow = iterationrow + 1
print('*****IMAGES DOWNLOADED*****') | 31.846154 | 103 | 0.679952 |
926e78acee713c0d823922ef898fbdf3dccba2e5 | 168,317 | py | Python | tensorflow/python/feature_column/feature_column_test.py | tianhm/tensorflow | e55574f28257bdacd744dcdba86c839e661b1b2a | [
"Apache-2.0"
] | 47 | 2017-03-08T20:58:54.000Z | 2021-06-24T07:07:49.000Z | tensorflow/python/feature_column/feature_column_test.py | genSud/tensorflow | ec8216568d8cd9810004067558041c11a8356685 | [
"Apache-2.0"
] | 1 | 2019-07-11T16:29:54.000Z | 2019-07-11T16:29:54.000Z | tensorflow/python/feature_column/feature_column_test.py | genSud/tensorflow | ec8216568d8cd9810004067558041c11a8356685 | [
"Apache-2.0"
] | 19 | 2017-04-17T01:28:40.000Z | 2020-08-15T13:01:33.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.feature_column.feature_column import _CategoricalColumn
from tensorflow.python.feature_column.feature_column import _DenseColumn
from tensorflow.python.feature_column.feature_column import _FeatureColumn
from tensorflow.python.feature_column.feature_column import _LazyBuilder
from tensorflow.python.feature_column.feature_column import _transform_features
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
def _initialized_session():
sess = session.Session()
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class LazyColumnTest(test.TestCase):
def test_transormations_called_once(self):
class TransformCounter(_FeatureColumn):
def __init__(self):
self.num_transform = 0
@property
def name(self):
return 'TransformCounter'
def _transform_feature(self, cache):
self.num_transform += 1 # Count transform calls.
return cache.get('a')
@property
def _parse_example_spec(self):
pass
builder = _LazyBuilder(features={'a': [[2], [3.]]})
column = TransformCounter()
self.assertEqual(0, column.num_transform)
builder.get(column)
self.assertEqual(1, column.num_transform)
builder.get(column)
self.assertEqual(1, column.num_transform)
def test_returns_transform_output(self):
class Transformer(_FeatureColumn):
@property
def name(self):
return 'Transformer'
def _transform_feature(self, cache):
return 'Output'
@property
def _parse_example_spec(self):
pass
builder = _LazyBuilder(features={'a': [[2], [3.]]})
column = Transformer()
self.assertEqual('Output', builder.get(column))
self.assertEqual('Output', builder.get(column))
def test_does_not_pollute_given_features_dict(self):
class Transformer(_FeatureColumn):
@property
def name(self):
return 'Transformer'
def _transform_feature(self, cache):
return 'Output'
@property
def _parse_example_spec(self):
pass
features = {'a': [[2], [3.]]}
builder = _LazyBuilder(features=features)
builder.get(Transformer())
self.assertEqual(['a'], list(features.keys()))
def test_error_if_feature_is_not_found(self):
builder = _LazyBuilder(features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(ValueError,
'bbb is not in features dictionary'):
builder.get('bbb')
def test_not_supported_feature_column(self):
class NotAProperColumn(_FeatureColumn):
@property
def name(self):
return 'NotAProperColumn'
def _transform_feature(self, cache):
# It should return not None.
pass
@property
def _parse_example_spec(self):
pass
builder = _LazyBuilder(features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(ValueError,
'NotAProperColumn is not supported'):
builder.get(NotAProperColumn())
def test_key_should_be_string_or_feature_colum(self):
class NotAFeatureColumn(object):
pass
builder = _LazyBuilder(features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(
TypeError, '"key" must be either a "str" or "_FeatureColumn".'):
builder.get(NotAFeatureColumn())
class NumericColumnTest(test.TestCase):
def test_defaults(self):
a = fc.numeric_column('aaa')
self.assertEqual('aaa', a.key)
self.assertEqual((1,), a.shape)
self.assertIsNone(a.default_value)
self.assertEqual(dtypes.float32, a.dtype)
self.assertIsNone(a.normalizer_fn)
def test_shape_saved_as_tuple(self):
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]])
self.assertEqual((1, 2), a.shape)
def test_default_value_saved_as_tuple(self):
a = fc.numeric_column('aaa', default_value=4.)
self.assertEqual((4.,), a.default_value)
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]])
self.assertEqual(((3., 2.),), a.default_value)
def test_shape_and_default_value_compatibility(self):
fc.numeric_column('aaa', shape=[2], default_value=[1, 2.])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column('aaa', shape=[2], default_value=[1, 2, 3.])
fc.numeric_column(
'aaa', shape=[3, 2], default_value=[[2, 3], [1, 2], [2, 3.]])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column(
'aaa', shape=[3, 1], default_value=[[2, 3], [1, 2], [2, 3.]])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column(
'aaa', shape=[3, 3], default_value=[[2, 3], [1, 2], [2, 3.]])
def test_default_value_type_check(self):
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.float32)
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2], dtype=dtypes.int32)
with self.assertRaisesRegexp(TypeError, 'must be compatible with dtype'):
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.int32)
with self.assertRaisesRegexp(TypeError,
'default_value must be compatible with dtype'):
fc.numeric_column('aaa', default_value=['string'])
def test_shape_must_be_positive_integer(self):
with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
fc.numeric_column(
'aaa', shape=[
1.0,
])
with self.assertRaisesRegexp(ValueError,
'shape dimensions must be greater than 0'):
fc.numeric_column(
'aaa', shape=[
0,
])
def test_dtype_is_convertable_to_float(self):
with self.assertRaisesRegexp(ValueError,
'dtype must be convertible to float'):
fc.numeric_column('aaa', dtype=dtypes.string)
def test_scalar_deafult_value_fills_the_shape(self):
a = fc.numeric_column('aaa', shape=[2, 3], default_value=2.)
self.assertEqual(((2., 2., 2.), (2., 2., 2.)), a.default_value)
def test_parse_spec(self):
a = fc.numeric_column('aaa', shape=[2, 3], dtype=dtypes.int32)
self.assertEqual({
'aaa': parsing_ops.FixedLenFeature((2, 3), dtype=dtypes.int32)
}, a._parse_example_spec)
def test_parse_example_no_default_value(self):
price = fc.numeric_column('price', shape=[2])
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([price]))
self.assertIn('price', features)
with self.test_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
def test_parse_example_with_default_value(self):
price = fc.numeric_column('price', shape=[2], default_value=11.)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
no_data = example_pb2.Example(features=feature_pb2.Features(
feature={
'something_else':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString(),
no_data.SerializeToString()],
features=fc.make_parse_example_spec([price]))
self.assertIn('price', features)
with self.test_session():
self.assertAllEqual([[20., 110.], [11., 11.]], features['price'].eval())
def test_normalizer_fn_must_be_callable(self):
with self.assertRaisesRegexp(TypeError, 'must be a callable'):
fc.numeric_column('price', normalizer_fn='NotACallable')
def test_normalizer_fn_transform_feature(self):
def _increment_two(input_tensor):
return input_tensor + 2.
price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two)
output = _transform_features({'price': [[1., 2.], [5., 6.]]}, [price])
with self.test_session():
self.assertAllEqual([[3., 4.], [7., 8.]], output[price].eval())
def test_get_dense_tensor(self):
def _increment_two(input_tensor):
return input_tensor + 2.
price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two)
builder = _LazyBuilder({'price': [[1., 2.], [5., 6.]]})
self.assertEqual(builder.get(price), price._get_dense_tensor(builder))
def test_sparse_tensor_not_supported(self):
price = fc.numeric_column('price')
builder = _LazyBuilder({
'price':
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
})
with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
price._transform_feature(builder)
def test_deep_copy(self):
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3., 2.]])
a_copy = copy.deepcopy(a)
self.assertEqual(a_copy.name, 'aaa')
self.assertEqual(a_copy.shape, (1, 2))
self.assertEqual(a_copy.default_value, ((3., 2.),))
def test_numpy_default_value(self):
a = fc.numeric_column(
'aaa', shape=[1, 2], default_value=np.array([[3., 2.]]))
self.assertEqual(a.default_value, ((3., 2.),))
def test_linear_model(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.]], price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[10.], [50.]], predictions.eval())
class BucketizedColumnTest(test.TestCase):
def test_invalid_source_column_type(self):
a = fc.categorical_column_with_hash_bucket('aaa', hash_bucket_size=10)
with self.assertRaisesRegexp(
ValueError,
'source_column must be a column generated with numeric_column'):
fc.bucketized_column(a, boundaries=[0, 1])
def test_invalid_source_column_shape(self):
a = fc.numeric_column('aaa', shape=[2, 3])
with self.assertRaisesRegexp(
ValueError, 'source_column must be one-dimensional column'):
fc.bucketized_column(a, boundaries=[0, 1])
def test_invalid_boundaries(self):
a = fc.numeric_column('aaa')
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=None)
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=1.)
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=[1, 0])
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=[1, 1])
def test_name(self):
a = fc.numeric_column('aaa', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
self.assertEqual('aaa_bucketized', b.name)
def test_parse_spec(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
self.assertEqual({
'aaa': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32)
}, b._parse_example_spec)
def test_variable_shape(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
# Column 'aaa` has shape [2] times three buckets -> variable_shape=[2, 3].
self.assertAllEqual((2, 3), b._variable_shape)
def test_num_buckets(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
# Column 'aaa` has shape [2] times three buckets -> num_buckets=6.
self.assertEqual(6, b._num_buckets)
def test_parse_example(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([bucketized_price]))
self.assertIn('price', features)
with self.test_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
def test_transform_feature(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformed_tensor = _transform_features({
'price': [[-1., 1.], [5., 6.]]
}, [bucketized_price])
with _initialized_session():
self.assertAllEqual([[0, 1], [3, 4]],
transformed_tensor[bucketized_price].eval())
def test_get_dense_tensor_one_input_value(self):
"""Tests _get_dense_tensor() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
builder = _LazyBuilder({'price': [[-1.], [1.], [5.], [6.]]})
with _initialized_session():
bucketized_price_tensor = bucketized_price._get_dense_tensor(builder)
self.assertAllClose(
# One-hot tensor.
[[[1., 0., 0., 0., 0.]],
[[0., 1., 0., 0., 0.]],
[[0., 0., 0., 1., 0.]],
[[0., 0., 0., 0., 1.]]],
bucketized_price_tensor.eval())
def test_get_dense_tensor_two_input_values(self):
"""Tests _get_dense_tensor() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
builder = _LazyBuilder({'price': [[-1., 1.], [5., 6.]]})
with _initialized_session():
bucketized_price_tensor = bucketized_price._get_dense_tensor(builder)
self.assertAllClose(
# One-hot tensor.
[[[1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.]],
[[0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]],
bucketized_price_tensor.eval())
def test_get_sparse_tensors_one_input_value(self):
"""Tests _get_sparse_tensors() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
builder = _LazyBuilder({'price': [[-1.], [1.], [5.], [6.]]})
with _initialized_session() as sess:
id_weight_pair = bucketized_price._get_sparse_tensors(builder)
self.assertIsNone(id_weight_pair.weight_tensor)
id_tensor_value = sess.run(id_weight_pair.id_tensor)
self.assertAllEqual(
[[0, 0], [1, 0], [2, 0], [3, 0]], id_tensor_value.indices)
self.assertAllEqual([0, 1, 3, 4], id_tensor_value.values)
self.assertAllEqual([4, 1], id_tensor_value.dense_shape)
def test_get_sparse_tensors_two_input_values(self):
"""Tests _get_sparse_tensors() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
builder = _LazyBuilder({'price': [[-1., 1.], [5., 6.]]})
with _initialized_session() as sess:
id_weight_pair = bucketized_price._get_sparse_tensors(builder)
self.assertIsNone(id_weight_pair.weight_tensor)
id_tensor_value = sess.run(id_weight_pair.id_tensor)
self.assertAllEqual(
[[0, 0], [0, 1], [1, 0], [1, 1]], id_tensor_value.indices)
# Values 0-4 correspond to the first column of the input price.
# Values 5-9 correspond to the second column of the input price.
self.assertAllEqual([0, 6, 3, 9], id_tensor_value.values)
self.assertAllEqual([2, 2], id_tensor_value.dense_shape)
def test_sparse_tensor_input_not_supported(self):
price = fc.numeric_column('price')
bucketized_price = fc.bucketized_column(price, boundaries=[0, 1])
builder = _LazyBuilder({
'price':
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
})
with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
bucketized_price._transform_feature(builder)
def test_deep_copy(self):
a = fc.numeric_column('aaa', shape=[2])
a_bucketized = fc.bucketized_column(a, boundaries=[0, 1])
a_bucketized_copy = copy.deepcopy(a_bucketized)
self.assertEqual(a_bucketized_copy.name, 'aaa_bucketized')
self.assertAllEqual(a_bucketized_copy._variable_shape, (2, 3))
self.assertEqual(a_bucketized_copy.boundaries, (0, 1))
def test_linear_model_one_input_value(self):
"""Tests linear_model() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1.], [1.], [5.], [6.]]}
predictions = fc.linear_model(features, [bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
# One weight variable per bucket, all initialized to zero.
self.assertAllClose(
[[0.], [0.], [0.], [0.], [0.]], bucketized_price_var.eval())
self.assertAllClose([[0.], [0.], [0.], [0.]], predictions.eval())
sess.run(bucketized_price_var.assign(
[[10.], [20.], [30.], [40.], [50.]]))
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 1st bucket, whose weight is 20.
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 4th bucket, whose weight is 50.
self.assertAllClose([[10.], [20.], [40.], [50.]], predictions.eval())
sess.run(bias.assign([1.]))
self.assertAllClose([[11.], [21.], [41.], [51.]], predictions.eval())
def test_linear_model_two_input_values(self):
"""Tests linear_model() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1., 1.], [5., 6.]]}
predictions = fc.linear_model(features, [bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
# One weight per bucket per input column, all initialized to zero.
self.assertAllClose(
[[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]],
bucketized_price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(bucketized_price_var.assign(
[[10.], [20.], [30.], [40.], [50.],
[60.], [70.], [80.], [90.], [100.]]))
# 1st example:
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 6th bucket, whose weight is 70.
# 2nd example:
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 9th bucket, whose weight is 100.
self.assertAllClose([[80.], [140.]], predictions.eval())
sess.run(bias.assign([1.]))
self.assertAllClose([[81.], [141.]], predictions.eval())
class HashedCategoricalColumnTest(test.TestCase):
def test_defaults(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
self.assertEqual('aaa', a.name)
self.assertEqual('aaa', a.key)
self.assertEqual(10, a.hash_bucket_size)
self.assertEqual(dtypes.string, a.dtype)
def test_bucket_size_should_be_given(self):
with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be set.'):
fc.categorical_column_with_hash_bucket('aaa', None)
def test_bucket_size_should_be_positive(self):
with self.assertRaisesRegexp(ValueError,
'hash_bucket_size must be at least 1'):
fc.categorical_column_with_hash_bucket('aaa', 0)
def test_dtype_should_be_string_or_integer(self):
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.string)
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32)
def test_deep_copy(self):
original = fc.categorical_column_with_hash_bucket('aaa', 10)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(10, column.hash_bucket_size)
self.assertEqual(10, column._num_buckets)
self.assertEqual(dtypes.string, column.dtype)
def test_parse_spec_string(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, a._parse_example_spec)
def test_parse_spec_int(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, a._parse_example_spec)
def test_parse_example(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_strings_should_be_hashed(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
outputs = _transform_features({'wire': wire_tensor}, [hashed_sparse])
output = outputs[hashed_sparse]
# Check exact hashed output. If hashing changes this test will break.
expected_values = [6, 4, 1]
with self.test_session():
self.assertEqual(dtypes.int64, output.values.dtype)
self.assertAllEqual(expected_values, output.values.eval())
self.assertAllEqual(wire_tensor.indices.eval(), output.indices.eval())
self.assertAllEqual(wire_tensor.dense_shape.eval(),
output.dense_shape.eval())
def test_tensor_dtype_should_be_string_or_integer(self):
string_fc = fc.categorical_column_with_hash_bucket(
'a_string', 10, dtype=dtypes.string)
int_fc = fc.categorical_column_with_hash_bucket(
'a_int', 10, dtype=dtypes.int32)
float_fc = fc.categorical_column_with_hash_bucket(
'a_float', 10, dtype=dtypes.string)
int_tensor = sparse_tensor.SparseTensor(
values=[101],
indices=[[0, 0]],
dense_shape=[1, 1])
string_tensor = sparse_tensor.SparseTensor(
values=['101'],
indices=[[0, 0]],
dense_shape=[1, 1])
float_tensor = sparse_tensor.SparseTensor(
values=[101.],
indices=[[0, 0]],
dense_shape=[1, 1])
builder = _LazyBuilder({
'a_int': int_tensor,
'a_string': string_tensor,
'a_float': float_tensor
})
builder.get(string_fc)
builder.get(int_fc)
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
builder.get(float_fc)
def test_dtype_should_match_with_tensor(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
builder = _LazyBuilder({'wire': wire_tensor})
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
builder.get(hashed_sparse)
def test_ints_should_be_hashed(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=[101, 201, 301],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
builder = _LazyBuilder({'wire': wire_tensor})
output = builder.get(hashed_sparse)
# Check exact hashed output. If hashing changes this test will break.
expected_values = [3, 7, 5]
with self.test_session():
self.assertAllEqual(expected_values, output.values.eval())
def test_int32_64_is_compatible(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=constant_op.constant([101, 201, 301], dtype=dtypes.int32),
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
builder = _LazyBuilder({'wire': wire_tensor})
output = builder.get(hashed_sparse)
# Check exact hashed output. If hashing changes this test will break.
expected_values = [3, 7, 5]
with self.test_session():
self.assertAllEqual(expected_values, output.values.eval())
def test_get_sparse_tensors(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
builder = _LazyBuilder({
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
})
id_weight_pair = hashed_sparse._get_sparse_tensors(builder)
self.assertIsNone(id_weight_pair.weight_tensor)
self.assertEqual(builder.get(hashed_sparse), id_weight_pair.id_tensor)
def test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_hash_bucket('aaa', 10)
inputs = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
column._get_sparse_tensors(
_LazyBuilder({
'aaa': inputs
}), weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
builder = _LazyBuilder({'wire': (('omar', ''), ('stringer', 'marlo'))})
id_weight_pair = hashed_sparse._get_sparse_tensors(builder)
self.assertIsNone(id_weight_pair.weight_tensor)
self.assertEqual(builder.get(hashed_sparse), id_weight_pair.id_tensor)
def test_linear_model(self):
wire_column = fc.categorical_column_with_hash_bucket('wire', 4)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
wire_column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 3: wire_var[3] = 4
# 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6
self.assertAllClose(((4.,), (6.,)), predictions.eval())
class CrossedColumnTest(test.TestCase):
def test_keys_empty(self):
with self.assertRaisesRegexp(
ValueError, 'keys must be a list with length > 1'):
fc.crossed_column([], 10)
def test_keys_length_one(self):
with self.assertRaisesRegexp(
ValueError, 'keys must be a list with length > 1'):
fc.crossed_column(['a'], 10)
def test_key_type_unsupported(self):
with self.assertRaisesRegexp(ValueError, 'Unsupported key type'):
fc.crossed_column(['a', fc.numeric_column('c')], 10)
with self.assertRaisesRegexp(
ValueError, 'categorical_column_with_hash_bucket is not supported'):
fc.crossed_column(
['a', fc.categorical_column_with_hash_bucket('c', 10)], 10)
def test_hash_bucket_size_negative(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], -1)
def test_hash_bucket_size_zero(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], 0)
def test_hash_bucket_size_none(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], None)
def test_name(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([crossed1, 'c', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_leaf_keys_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d2', 'c'], 10)
crossed2 = fc.crossed_column([crossed1, 'd1', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_parse_spec(self):
a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed = fc.crossed_column([b, 'c'], 10)
self.assertEqual({
'a': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32),
'c': parsing_ops.VarLenFeature(dtypes.string),
}, crossed._parse_example_spec)
def test_num_buckets(self):
a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed = fc.crossed_column([b, 'c'], 15)
self.assertEqual(15, crossed._num_buckets)
def test_deep_copy(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
crossed2_copy = copy.deepcopy(crossed2)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2_copy.name,)
self.assertEqual(15, crossed2_copy.hash_bucket_size)
self.assertEqual(5, crossed2_copy.hash_key)
def test_parse_example(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
price_cross_wire = fc.crossed_column([bucketized_price, 'wire'], 10)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.])),
'wire':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([price_cross_wire]))
self.assertIn('price', features)
self.assertIn('wire', features)
with self.test_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
wire_sparse = features['wire']
self.assertAllEqual([[0, 0], [0, 1]], wire_sparse.indices.eval())
# Use byte constants to pass the open-source test.
self.assertAllEqual([b'omar', b'stringer'], wire_sparse.values.eval())
self.assertAllEqual([1, 2], wire_sparse.dense_shape.eval())
def test_transform_feature(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
hash_bucket_size = 10
price_cross_wire = fc.crossed_column(
[bucketized_price, 'wire'], hash_bucket_size)
features = {
'price': constant_op.constant([[1., 2.], [5., 6.]]),
'wire': sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2]),
}
outputs = _transform_features(features, [price_cross_wire])
output = outputs[price_cross_wire]
with self.test_session() as sess:
output_val = sess.run(output)
self.assertAllEqual(
[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [1, 3]], output_val.indices)
for val in output_val.values:
self.assertIn(val, list(range(hash_bucket_size)))
self.assertAllEqual([2, 4], output_val.dense_shape)
def test_get_sparse_tensors(self):
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
with ops.Graph().as_default():
builder = _LazyBuilder({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
'd1':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d1A', 'd1B', 'd1C'],
dense_shape=(2, 2)),
'd2':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d2A', 'd2B', 'd2C'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed2._get_sparse_tensors(builder)
with _initialized_session():
id_tensor_eval = id_weight_pair.id_tensor.eval()
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
(1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13),
(1, 14), (1, 15)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (
6, 14, 0, 13, 8, 8, 10, 12, 2, 0, 1, 9, 8, 12, 2, 0, 10, 11)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 16), id_tensor_eval.dense_shape)
def test_get_sparse_tensors_simple(self):
"""Same as test_get_sparse_tensors, but with simpler values."""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
builder = _LazyBuilder({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed._get_sparse_tensors(builder)
with _initialized_session():
id_tensor_eval = id_weight_pair.id_tensor.eval()
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (1, 0, 1, 3, 4, 2)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 4), id_tensor_eval.dense_shape)
def test_linear_model(self):
"""Tests linear_model.
Uses data from test_get_sparse_tesnsors_simple.
"""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = fc.linear_model({
'a': constant_op.constant(((-1., .5), (.5, 1.))),
'c': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session() as sess:
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(
((0.,), (0.,), (0.,), (0.,), (0.,)), crossed_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), predictions.eval())
sess.run(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), predictions.eval())
def test_linear_model_with_weights(self):
class _TestColumnWithWeights(_CategoricalColumn):
"""Produces sparse IDs and sparse weights."""
@property
def name(self):
return 'test_column'
@property
def _parse_example_spec(self):
return {
self.name: parsing_ops.VarLenFeature(dtypes.int32),
'{}_weights'.format(self.name): parsing_ops.VarLenFeature(
dtypes.float32),
}
@property
def _num_buckets(self):
return 5
def _transform_feature(self, inputs):
return (inputs.get(self.name),
inputs.get('{}_weights'.format(self.name)))
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Populates both id_tensor and weight_tensor."""
ids_and_weights = inputs.get(self)
return _CategoricalColumn.IdWeightPair(
id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
t = _TestColumnWithWeights()
crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
'crossed_column does not support weight_tensor.*{}'.format(t.name)):
fc.linear_model({
t.name: sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[0, 1, 2],
dense_shape=(2, 2)),
'{}_weights'.format(t.name): sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[1., 10., 2.],
dense_shape=(2, 2)),
'c': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
def get_linear_model_bias():
with variable_scope.variable_scope('linear_model', reuse=True):
return variable_scope.get_variable('bias_weights')
def get_linear_model_column_var(column):
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
'linear_model/' + column.name)[0]
class LinearModelTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
fc.linear_model(features={}, feature_columns=[])
def test_should_be_feature_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
fc.linear_model(features={'a': [[0]]}, feature_columns='NotSupported')
def test_should_be_dense_or_categorical_column(self):
class NotSupportedColumn(_FeatureColumn):
@property
def name(self):
return 'NotSupportedColumn'
def _transform_feature(self, cache):
pass
@property
def _parse_example_spec(self):
pass
with self.assertRaisesRegexp(
ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
fc.linear_model(
features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.linear_model(
features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
fc.linear_model(
features={'a': [[0]]},
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])
def test_dense_bias(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
sess.run(price_var.assign([[10.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[15.], [55.]], predictions.eval())
def test_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(features, [wire_cast])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.], [0.], [0.]], wire_cast_var.eval())
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], predictions.eval())
def test_dense_and_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
price = fc.numeric_column('price')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [wire_cast, price])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[1015.], [10065.]], predictions.eval())
def test_dense_and_sparse_column(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _DenseAndSparseColumn(_DenseColumn, _CategoricalColumn):
@property
def name(self):
return 'dense_and_sparse_column'
@property
def _parse_example_spec(self):
return {self.name: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
return inputs.get(self.name)
@property
def _variable_shape(self):
raise ValueError('Should not use this method.')
def _get_dense_tensor(self, inputs, weight_collections=None,
trainable=None):
raise ValueError('Should not use this method.')
@property
def _num_buckets(self):
return 4
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sp_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=[2, 0, 3],
dense_shape=[2, 2])
return _CategoricalColumn.IdWeightPair(sp_tensor, None)
dense_and_sparse_column = _DenseAndSparseColumn()
with ops.Graph().as_default():
sp_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {dense_and_sparse_column.name: sp_tensor}
predictions = fc.linear_model(features, [dense_and_sparse_column])
bias = get_linear_model_bias()
dense_and_sparse_column_var = get_linear_model_column_var(
dense_and_sparse_column)
with _initialized_session() as sess:
sess.run(dense_and_sparse_column_var.assign(
[[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], predictions.eval())
def test_dense_multi_output(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((1, 3)), price_var.eval())
sess.run(price_var.assign([[10., 100., 1000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
predictions.eval())
def test_sparse_multi_output(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(features, [wire_cast], units=3)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((4, 3)), wire_cast_var.eval())
sess.run(
wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [
1000., 1100., 1200.
], [10000., 11000., 12000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
predictions.eval())
def test_dense_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = fc.linear_model(features, [price])
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([[0.], [0.]], price_var.eval())
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], predictions.eval())
def test_sparse_multi_rank(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = array_ops.sparse_placeholder(dtypes.string)
wire_value = sparse_tensor.SparseTensorValue(
values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
dense_shape=[2, 2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(features, [wire_cast])
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((4, 1)), wire_cast_var.eval())
self.assertAllClose(
np.zeros((2, 1)),
predictions.eval(feed_dict={wire_tensor: wire_value}))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
self.assertAllClose(
[[1010.], [11000.]],
predictions.eval(feed_dict={wire_tensor: wire_value}))
def test_sparse_combiner(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(
features, [wire_cast], sparse_combiner='mean')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [5010.]], predictions.eval())
def test_dense_multi_dimension_multi_output(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = fc.linear_model(features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((2, 3)), price_var.eval())
sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
sess.run(bias.assign([2., 3., 4.]))
self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
predictions.eval())
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [price])
with _initialized_session():
with self.assertRaisesRegexp(Exception, 'requested shape has 4'):
predictions.eval()
def test_dense_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
predictions = fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.]], price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], predictions.eval())
def test_dense_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3.], [4.]]
}
predictions = fc.linear_model(features, [price1, price2])
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.]], price1_var.eval())
self.assertAllClose([[0.]], price2_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price1_var.assign([[10.], [100.]]))
sess.run(price2_var.assign([[1000.]]))
sess.run(bias.assign([7.]))
self.assertAllClose([[3217.], [4657.]], predictions.eval())
def test_dense_collection(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc.linear_model(features, [price], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
self.assertIn(bias, my_vars)
self.assertIn(price_var, my_vars)
def test_sparse_collection(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc.linear_model(
features, [wire_cast], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, my_vars)
self.assertIn(wire_cast_var, my_vars)
def test_dense_trainable_default(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertIn(bias, trainable_vars)
self.assertIn(price_var, trainable_vars)
def test_sparse_trainable_default(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc.linear_model(features, [wire_cast])
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, trainable_vars)
self.assertIn(wire_cast_var, trainable_vars)
def test_dense_trainable_false(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc.linear_model(features, [price], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_sparse_trainable_false(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc.linear_model(features, [wire_cast], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
fc.linear_model(
features, [price_a, wire_cast, price_b],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
fc.linear_model(
features, [wire_cast, price_b, price_a],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.linear_model(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.linear_model(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
predictions = fc.linear_model(features, [price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'must have the same size and shape'):
sess.run(
predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
predictions = fc.linear_model(features, [price1, price2])
with _initialized_session() as sess:
sess.run(
predictions,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_with_numpy_input_fn(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(price, boundaries=[0., 10., 100.,])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([-1., 2., 13., 104.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.linear_model(features, [price_buckets, body_style])
# self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
coord.request_stop()
coord.join(threads)
def test_with_1d_sparse_tensor(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(price, boundaries=[0., 10., 100.,])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': constant_op.constant([-1., 12.,]),
'body-style': sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
net = fc.linear_model(features, [price_buckets, body_style])
with _initialized_session() as sess:
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(price, boundaries=[0., 10., 100.,])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
price_data = np.array([-1., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,))
net = fc.linear_model(features, [price_buckets, body_style])
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
with _initialized_session() as sess:
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose(
[[10 - 1000 + 5.], [1000 - 10 + 5.]],
sess.run(net, feed_dict={
features['price']: price_data,
features['body-style']: body_style_data}))
# Dense categorical_column with unknown shape is not allowed.
with self.assertRaisesRegexp(ValueError, 'Undefined input_tensor shape.'):
fc.linear_model(features, [price_buckets, body_style, country])
def test_with_rank_0_feature(self):
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
fc.linear_model(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc.linear_model(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
class InputLayerTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
fc.input_layer(features={}, feature_columns=[])
def test_should_be_dense_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a _DenseColumn'):
fc.input_layer(
features={'a': [[0]]},
feature_columns=[
fc.categorical_column_with_hash_bucket('wire_cast', 4)
])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.input_layer(
features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
fc.input_layer(
features={'a': [[0]]},
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])
def test_one_column(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = fc.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1.], [5.]], net.eval())
def test_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
net = fc.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], net.eval())
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = fc.input_layer(features, [price])
with _initialized_session():
with self.assertRaisesRegexp(Exception, 'requested shape has 4'):
net.eval()
def test_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
net = fc.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], net.eval())
def test_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3.], [4.]]
}
net = fc.input_layer(features, [price1, price2])
with _initialized_session():
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], net.eval())
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
with ops.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
}
net1 = fc.input_layer(features, [price_a, price_b])
net2 = fc.input_layer(features, [price_b, price_a])
with _initialized_session():
self.assertAllClose([[1., 3.]], net1.eval())
self.assertAllClose([[1., 3.]], net2.eval())
def test_fails_for_categorical_column(self):
animal = fc.categorical_column_with_identity('animal', num_buckets=4)
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
with self.assertRaisesRegexp(Exception, 'must be a _DenseColumn'):
fc.input_layer(features, [animal])
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.input_layer(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.input_layer(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
net = fc.input_layer(features, [price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'Dimensions of inputs should match'):
sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
net = fc.input_layer(features, [price1, price2])
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_with_numpy_input_fn(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# one_hot_body_style has 3 dims in input_layer.
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
embedded_body_style = fc.embedding_column(body_style, dimension=5,
initializer=_initializer)
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([11., 12., 13., 14.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.input_layer(features,
[price, one_hot_body_style, embedded_body_style])
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[11., 12., 13., 14., 15., 0., 0., 1., 11.],
[1., 2., 3., 4., 5., 1., 0., 0., 12]],
sess.run(net))
coord.request_stop()
coord.join(threads)
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in input_layer.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(country, dimension=5,
initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': constant_op.constant([11., 12.,]),
'body-style': sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
# This is dense tensor for the categorical_column.
'country': constant_op.constant(['CA', 'US']),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
self.assertEqual(1, features['country'].shape.ndims)
net = fc.input_layer(features,
[price, one_hot_body_style, embedded_country])
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 11., 12., 13., 14., 15., 11.],
[1., 0., 0., 1., 2., 3., 4., 5., 12.]],
sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in input_layer.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(country, dimension=5,
initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
# This is dense tensor for the categorical_column.
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
self.assertIsNone(features['country'].shape.ndims)
price_data = np.array([11., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,))
# Dense categorical_column with unknown shape is not allowed.
with self.assertRaisesRegexp(ValueError, 'Undefined input_tensor shape.'):
fc.input_layer(features, [price, one_hot_body_style, embedded_country])
net = fc.input_layer(features, [price, one_hot_body_style])
self.assertEqual(1 + 3, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 11.], [1., 0., 0., 12.]],
sess.run(net, feed_dict={
features['price']: price_data,
features['body-style']: body_style_data}))
def test_with_rank_0_feature(self):
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
fc.input_layer(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc.input_layer(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
class MakeParseExampleSpecTest(test.TestCase):
class _TestFeatureColumn(_FeatureColumn,
collections.namedtuple('_TestFeatureColumn',
['parse_spec'])):
@property
def _parse_example_spec(self):
return self.parse_spec
def test_no_feature_columns(self):
actual = fc.make_parse_example_spec([])
self.assertDictEqual({}, actual)
def test_invalid_type(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
with self.assertRaisesRegexp(
ValueError,
'All feature_columns must be _FeatureColumn instances.*invalid_column'):
fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}), 'invalid_column'))
def test_one_feature_column(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),))
self.assertDictEqual({key1: parse_spec1}, actual)
def test_two_feature_columns(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key2: parse_spec2})))
self.assertDictEqual({key1: parse_spec1, key2: parse_spec2}, actual)
def test_equal_keys_different_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
with self.assertRaisesRegexp(
ValueError,
'feature_columns contain different parse_spec for key key1'):
fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key1: parse_spec2})))
def test_equal_keys_equal_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key1: parse_spec1})))
self.assertDictEqual({key1: parse_spec1}, actual)
def test_multiple_features_dict(self):
"""parse_spc for one column is a dict with length > 1."""
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
key3 = 'key3'
parse_spec3 = parsing_ops.VarLenFeature(dtype=dtypes.int32)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key2: parse_spec2, key3: parse_spec3})))
self.assertDictEqual(
{key1: parse_spec1, key2: parse_spec2, key3: parse_spec3}, actual)
def _assert_sparse_tensor_value(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
class VocabularyFileCategoricalColumnTest(test.TestCase):
def setUp(self):
super(VocabularyFileCategoricalColumnTest, self).setUp()
# Contains ints, Golden State Warriors jersey numbers: 30, 35, 11, 23, 22
self._warriors_vocabulary_file_name = test.test_src_dir_path(
'python/feature_column/testdata/warriors_vocabulary.txt')
self._warriors_vocabulary_size = 5
# Contains strings, character names from 'The Wire': omar, stringer, marlo
self._wire_vocabulary_file_name = test.test_src_dir_path(
'python/feature_column/testdata/wire_vocabulary.txt')
self._wire_vocabulary_size = 3
def test_defaults(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
self.assertEqual('aaa', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, column._parse_example_spec)
def test_all_constructor_args(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3,
num_oov_buckets=4, dtype=dtypes.int32)
self.assertEqual(7, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column._parse_example_spec)
def test_deep_copy(self):
original = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3,
num_oov_buckets=4, dtype=dtypes.int32)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(7, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column._parse_example_spec)
def test_vocabulary_file_none(self):
with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=None, vocabulary_size=3)
def test_vocabulary_file_empty_string(self):
with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='', vocabulary_size=3)
def test_invalid_vocabulary_file(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
with self.assertRaisesRegexp(errors.OpError, 'file_does_not_exist'):
with self.test_session():
lookup_ops.tables_initializer().run()
def test_invalid_vocabulary_size(self):
with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=None)
with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=-1)
with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=0)
def test_too_large_vocabulary_size(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size + 1)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
with self.assertRaisesRegexp(errors.OpError, 'Invalid vocab_size'):
with self.test_session():
lookup_ops.tables_initializer().run()
def test_invalid_num_oov_buckets(self):
with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path', vocabulary_size=3,
num_oov_buckets=-1)
def test_invalid_dtype(self):
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path', vocabulary_size=3,
dtype=dtypes.float64)
def test_invalid_buckets_and_default_value(self):
with self.assertRaisesRegexp(
ValueError, 'both num_oov_buckets and default_value'):
fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=100,
default_value=2)
def test_invalid_input_dtype_int32(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
dtype=dtypes.string)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(12, 24, 36),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_invalid_input_dtype_string(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_tensor = _transform_features({'aaa': inputs}, [column])[column]
with _initialized_session():
_assert_sparse_tensor_value(self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(
(2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
def test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
column._get_sparse_tensors(
_LazyBuilder({
'aaa': inputs
}), weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa': (('marlo', ''), ('skywalker', 'omar'))
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_default_value_in_vocabulary(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
default_value=2)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 2, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 2)),
values=('marlo', 'skywalker', 'omar', 'heisenberg'),
dense_shape=(2, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 33, 0, 62), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_small_vocabulary_size(self):
# 'marlo' is the last entry in our vocabulary file, so be setting
# `vocabulary_size` to 1 less than number of entries in file, we take
# 'marlo' out of the vocabulary.
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size - 1)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((-1, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_dense_input(self):
default_value = -100
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32,
default_value=default_value)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa': ((11, -1, -1), (100, 30, -1), (-1, -1, 22))
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((2, default_value, 0, 4), dtype=np.int64),
dense_shape=(3, 3)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 60, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_file(
key='wire',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
wire_column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), predictions.eval())
class VocabularyListCategoricalColumnTest(test.TestCase):
def test_defaults_string(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
self.assertEqual('aaa', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, column._parse_example_spec)
def test_defaults_int(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36))
self.assertEqual('aaa', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column._parse_example_spec)
def test_all_constructor_args(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32,
default_value=-99)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column._parse_example_spec)
def test_deep_copy(self):
original = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column._parse_example_spec)
def test_invalid_dtype(self):
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'),
dtype=dtypes.float32)
def test_invalid_mapping_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary dtype must be string or integer'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12., 24., 36.))
def test_mismatched_int_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'dtype.*and vocabulary dtype.*do not match'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'),
dtype=dtypes.int32)
def test_mismatched_string_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'dtype.*and vocabulary dtype.*do not match'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string)
def test_none_mapping(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary_list.*must be non-empty'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=None)
def test_empty_mapping(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary_list.*must be non-empty'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=tuple([]))
def test_duplicate_mapping(self):
with self.assertRaisesRegexp(ValueError, 'Duplicate keys'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 12))
def test_invalid_num_oov_buckets(self):
with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36),
num_oov_buckets=-1)
def test_invalid_buckets_and_default_value(self):
with self.assertRaisesRegexp(
ValueError, 'both num_oov_buckets and default_value'):
fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=(12, 24, 36),
num_oov_buckets=100,
default_value=2)
def test_invalid_input_dtype_int32(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(12, 24, 36),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_invalid_input_dtype_string(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=(12, 24, 36))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_parse_example_string(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_parse_example_int(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(11, 21, 31))
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[11, 21]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=[11, 21],
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_tensor = _transform_features({'aaa': inputs}, [column])[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
def test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
column._get_sparse_tensors(
_LazyBuilder({
'aaa': inputs
}), weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa': (('marlo', ''), ('skywalker', 'omar'))
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_default_value_in_vocabulary(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
default_value=2)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 2, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 2)),
values=('marlo', 'skywalker', 'omar', 'heisenberg'),
dense_shape=(2, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 33, 0, 62), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((11, 100, 30, 22), dtype=np.int32),
dense_shape=(3, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_dense_input(self):
default_value = -100
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32,
default_value=default_value)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa':
np.array(
((11, -1, -1), (100, 30, -1), (-1, -1, 22)), dtype=np.int32)
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((2, default_value, 0, 4), dtype=np.int64),
dense_shape=(3, 3)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 60, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
wire_column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), predictions.eval())
class IdentityCategoricalColumnTest(test.TestCase):
def test_constructor(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual('aaa', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column._parse_example_spec)
def test_deep_copy(self):
original = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column._parse_example_spec)
def test_invalid_num_buckets_zero(self):
with self.assertRaisesRegexp(ValueError, 'num_buckets 0 < 1'):
fc.categorical_column_with_identity(key='aaa', num_buckets=0)
def test_invalid_num_buckets_negative(self):
with self.assertRaisesRegexp(ValueError, 'num_buckets -1 < 1'):
fc.categorical_column_with_identity(key='aaa', num_buckets=-1)
def test_invalid_default_value_too_small(self):
with self.assertRaisesRegexp(ValueError, 'default_value -1 not in range'):
fc.categorical_column_with_identity(
key='aaa', num_buckets=3, default_value=-1)
def test_invalid_default_value_too_big(self):
with self.assertRaisesRegexp(ValueError, 'default_value 3 not in range'):
fc.categorical_column_with_identity(
key='aaa', num_buckets=3, default_value=3)
def test_invalid_input_dtype(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'Invalid input, not integer'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_parse_example(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=30)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[11, 21]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([11, 21], dtype=np.int64),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
id_tensor = _transform_features({'aaa': inputs}, [column])[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
def test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
column._get_sparse_tensors(
_LazyBuilder({
'aaa': inputs
}), weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa': ((0, -1), (1, 0))
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_inputs_too_small(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, -1, 0),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
with self.assertRaisesRegexp(
errors.OpError, 'assert_greater_or_equal_0'):
id_weight_pair.id_tensor.eval()
def test_get_sparse_tensors_with_inputs_too_big(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 99, 0),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
with self.assertRaisesRegexp(
errors.OpError, 'assert_less_than_num_buckets'):
id_weight_pair.id_tensor.eval()
def test_get_sparse_tensors_with_default_value(self):
column = fc.categorical_column_with_identity(
key='aaa', num_buckets=4, default_value=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, -1, 99),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((1, 3, 3), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_default_value_and_placeholder_inputs(self):
column = fc.categorical_column_with_identity(
key='aaa', num_buckets=4, default_value=3)
input_indices = array_ops.placeholder(dtype=dtypes.int64)
input_values = array_ops.placeholder(dtype=dtypes.int32)
input_shape = array_ops.placeholder(dtype=dtypes.int64)
inputs = sparse_tensor.SparseTensorValue(
indices=input_indices,
values=input_values,
dense_shape=input_shape)
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=np.array(((0, 0), (1, 0), (1, 1)), dtype=np.int64),
values=np.array((1, 3, 3), dtype=np.int64),
dense_shape=np.array((2, 2), dtype=np.int64)),
id_weight_pair.id_tensor.eval(feed_dict={
input_indices: ((0, 0), (1, 0), (1, 1)),
input_values: (1, -1, 99),
input_shape: (2, 2),
}))
def test_linear_model(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual(3, column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] = 1
# weight_var[2] + weight_var[1] = 3+2 = 5
self.assertAllClose(((1.,), (5.,)), predictions.eval())
class TransformFeaturesTest(test.TestCase):
# All transform tests are distributed in column test.
# Here we only test multi column case and naming
def transform_multi_column(self):
bucketized_price = fc.bucketized_column(
fc.numeric_column('price'), boundaries=[0, 2, 4, 6])
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
with ops.Graph().as_default():
features = {
'price': [[-1.], [5.]],
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
}
transformed = _transform_features(features,
[bucketized_price, hashed_sparse])
with _initialized_session():
self.assertIn(bucketized_price.name, transformed[bucketized_price].name)
self.assertAllEqual([[0], [3]], transformed[bucketized_price].eval())
self.assertIn(hashed_sparse.name, transformed[hashed_sparse].name)
self.assertAllEqual([6, 4, 1], transformed[hashed_sparse].values.eval())
def test_column_order(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _LoggerColumn(_FeatureColumn):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def _transform_feature(self, inputs):
del inputs
self.call_order = call_logger['count']
call_logger['count'] += 1
return 'Anything'
@property
def _parse_example_spec(self):
pass
with ops.Graph().as_default():
column1 = _LoggerColumn('1')
column2 = _LoggerColumn('2')
call_logger = {'count': 0}
_transform_features({}, [column1, column2])
self.assertEqual(0, column1.call_order)
self.assertEqual(1, column2.call_order)
call_logger = {'count': 0}
_transform_features({}, [column2, column1])
self.assertEqual(0, column1.call_order)
self.assertEqual(1, column2.call_order)
class IndicatorColumnTest(test.TestCase):
def test_indicator_column(self):
a = fc.categorical_column_with_hash_bucket('a', 4)
indicator_a = fc.indicator_column(a)
self.assertEqual(indicator_a.categorical_column.name, 'a')
self.assertEqual(indicator_a._variable_shape, [1, 4])
b = fc.categorical_column_with_hash_bucket('b', hash_bucket_size=100)
indicator_b = fc.indicator_column(b)
self.assertEqual(indicator_b.categorical_column.name, 'b')
self.assertEqual(indicator_b._variable_shape, [1, 100])
def test_1D_shape_succeeds(self):
animal = fc.indicator_column(
fc.categorical_column_with_hash_bucket('animal', 4))
builder = _LazyBuilder({'animal': ['fox', 'fox']})
output = builder.get(animal)
with self.test_session():
self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], output.eval())
def test_2D_shape_succeeds(self):
# TODO(ispir/cassandrax): Swith to categorical_column_with_keys when ready.
animal = fc.indicator_column(
fc.categorical_column_with_hash_bucket('animal', 4))
builder = _LazyBuilder({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0]],
values=['fox', 'fox'],
dense_shape=[2, 1])
})
output = builder.get(animal)
with self.test_session():
self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], output.eval())
def test_multi_hot(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
builder = _LazyBuilder({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 1], dense_shape=[1, 2])
})
output = builder.get(animal)
with self.test_session():
self.assertAllEqual([[0., 2., 0., 0.]], output.eval())
def test_multi_hot2(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
builder = _LazyBuilder({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
})
output = builder.get(animal)
with self.test_session():
self.assertAllEqual([[0., 1., 1., 0.]], output.eval())
def test_deep_copy(self):
a = fc.categorical_column_with_hash_bucket('a', 4)
column = fc.indicator_column(a)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.categorical_column.name, 'a')
self.assertEqual(column.name, 'a_indicator')
self.assertEqual(column._variable_shape, [1, 4])
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_indicator = fc.indicator_column(a)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_indicator]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_transform(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_indicator = fc.indicator_column(a)
features = {
'aaa': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}
indicator_tensor = _transform_features(features, [a_indicator])[a_indicator]
with _initialized_session():
self.assertAllEqual([[0, 0, 1], [1, 0, 0]], indicator_tensor.eval())
def test_transform_with_weighted_column(self):
# Github issue 12557
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
'ids': constant_op.constant([['c', 'b', 'a']]),
'weights': constant_op.constant([[2., 4., 6.]])
}
indicator_tensor = _transform_features(features, [indicator])[indicator]
with _initialized_session():
self.assertAllEqual([[6., 4., 2.]], indicator_tensor.eval())
def test_transform_with_missing_value_in_weighted_column(self):
# Github issue 12583
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
'ids': constant_op.constant([['c', 'b', 'unknown']]),
'weights': constant_op.constant([[2., 4., 6.]])
}
indicator_tensor = _transform_features(features, [indicator])[indicator]
with _initialized_session():
self.assertAllEqual([[0., 4., 2.]], indicator_tensor.eval())
def test_transform_with_missing_value_in_categorical_column(self):
# Github issue 12583
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
indicator = fc.indicator_column(ids)
features = {
'ids': constant_op.constant([['c', 'b', 'unknown']]),
}
indicator_tensor = _transform_features(features, [indicator])[indicator]
with _initialized_session():
self.assertAllEqual([[0., 1., 1.]], indicator_tensor.eval())
def test_linear_model(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
predictions = fc.linear_model(features, [animal])
weight_var = get_linear_model_column_var(animal)
with _initialized_session():
# All should be zero-initialized.
self.assertAllClose([[0.], [0.], [0.], [0.]], weight_var.eval())
self.assertAllClose([[0.]], predictions.eval())
weight_var.assign([[1.], [2.], [3.], [4.]]).eval()
self.assertAllClose([[2. + 3.]], predictions.eval())
def test_input_layer(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
net = fc.input_layer(features, [animal])
with _initialized_session():
self.assertAllClose([[0., 1., 1., 0.]], net.eval())
class EmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertIsNotNone(embedding_column.initializer)
self.assertIsNone(embedding_column.ckpt_to_load_from)
self.assertIsNone(embedding_column.tensor_name_in_ckpt)
self.assertIsNone(embedding_column.max_norm)
self.assertTrue(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual(
(embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_all_constructor_args(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
combiner='my_combiner', initializer=lambda: 'my_initializer',
ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42., trainable=False)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_initializer', embedding_column.initializer())
self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column.max_norm)
self.assertFalse(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual(
(embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_deep_copy(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
original = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
combiner='my_combiner', initializer=lambda: 'my_initializer',
ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42., trainable=False)
for embedding_column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', embedding_column.categorical_column.name)
self.assertEqual(3, embedding_column.categorical_column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.categorical_column._parse_example_spec)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_initializer', embedding_column.initializer())
self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column.max_norm)
self.assertFalse(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual(
(embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_invalid_initializer(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
fc.embedding_column(categorical_column, dimension=2, initializer='not_fn')
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_embedded = fc.embedding_column(a, dimension=2)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_embedded]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_transform_feature(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
a_embedded = fc.embedding_column(a, dimension=2)
features = {
'aaa': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
}
outputs = _transform_features(features, [a, a_embedded])
output_a = outputs[a]
output_embedded = outputs[a_embedded]
with _initialized_session():
_assert_sparse_tensor_value(
self, output_a.eval(), output_embedded.eval())
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa': sparse_input
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
def test_get_dense_tensor_3d(self):
# Inputs.
vocabulary_size = 4
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0, 0), (1, 1, 0), (1, 1, 4), (3, 0, 0), (3, 1, 2)),
values=(2, 0, 1, 1, 2),
dense_shape=(4, 2, 5))
# Embedding variable.
embedding_dimension = 3
embedding_values = (
(1., 2., 4.), # id 0
(3., 5., 1.), # id 1
(7., 11., 2.), # id 2
(2., 7., 12.) # id 3
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [[2], []], embedding = [[7, 11, 2], [0, 0, 0]]
((7., 11., 2.), (0., 0., 0.)),
# example 1, ids [[], [0, 1]], embedding
# = mean([[], [1, 2, 4] + [3, 5, 1]]) = [[0, 0, 0], [2, 3.5, 2.5]]
((0., 0., 0.), (2., 3.5, 2.5)),
# example 2, ids [[], []], embedding = [[0, 0, 0], [0, 0, 0]]
((0., 0., 0.), (0., 0., 0.)),
# example 3, ids [[1], [2]], embedding = [[3, 5, 1], [7, 11, 2]]
((3., 5., 1.), (7., 11., 2.)),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa': sparse_input
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
def test_get_dense_tensor_weight_collections(self):
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_column = fc.embedding_column(categorical_column, dimension=2)
# Provide sparse input and get dense result.
embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa': sparse_input
}), weight_collections=('my_vars',))
# Assert expected embedding variable and lookups.
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
my_vars = ops.get_collection('my_vars')
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in my_vars]))
def test_get_dense_tensor_placeholder_inputs(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
input_indices = array_ops.placeholder(dtype=dtypes.int64)
input_values = array_ops.placeholder(dtype=dtypes.int64)
input_shape = array_ops.placeholder(dtype=dtypes.int64)
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa':
sparse_tensor.SparseTensorValue(
indices=input_indices,
values=input_values,
dense_shape=input_shape)
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval(
feed_dict={
input_indices: sparse_input.indices,
input_values: sparse_input.values,
input_shape: sparse_input.dense_shape,
}))
def test_get_dense_tensor_restore_from_ckpt(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable. The checkpoint file contains _embedding_values.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
ckpt_path = test.test_src_dir_path(
'python/feature_column/testdata/embedding.ckpt')
ckpt_tensor = 'my_embedding'
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
ckpt_to_load_from=ckpt_path,
tensor_name_in_ckpt=ckpt_tensor)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa': sparse_input
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
def test_linear_model(self):
# Inputs.
batch_size = 4
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(batch_size, 5))
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = fc.linear_model({
categorical_column.name: sparse_input
}, (embedding_column,))
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_embedding/weights:0',
'linear_model/aaa_embedding/embedding_weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v for v in ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_embedding/embedding_weights:0']
linear_weights = trainable_vars[
'linear_model/aaa_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), bias.eval())
self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights.eval())
self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# example 2, ids [], embedding[2] = [0, 0]
# example 3, ids [1], embedding[3] = [3, 5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), predictions.eval())
def test_input_layer(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in trainable_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, trainable_vars[0].eval())
self.assertAllEqual(expected_lookups, input_layer.eval())
def test_input_layer_not_trainable(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer, trainable=False)
# Provide sparse input and get dense result.
input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, input_layer.eval())
class WeightedCategoricalColumnTest(test.TestCase):
def test_defaults(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
self.assertEqual('ids_weighted_by_values', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'ids': parsing_ops.VarLenFeature(dtypes.int64),
'values': parsing_ops.VarLenFeature(dtypes.float32)
}, column._parse_example_spec)
def test_deep_copy(self):
"""Tests deepcopy of categorical_column_with_hash_bucket."""
original = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
for column in (original, copy.deepcopy(original)):
self.assertEqual('ids_weighted_by_values', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'ids': parsing_ops.VarLenFeature(dtypes.int64),
'values': parsing_ops.VarLenFeature(dtypes.float32)
}, column._parse_example_spec)
def test_invalid_dtype_none(self):
with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values',
dtype=None)
def test_invalid_dtype_string(self):
with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values',
dtype=dtypes.string)
def test_invalid_input_dtype(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
strings = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'Bad dtype'):
_transform_features({'ids': strings, 'values': strings}, (column,))
def test_column_name_collision(self):
with self.assertRaisesRegexp(ValueError, r'Parse config.*already exists'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='aaa', num_buckets=3),
weight_feature_key='aaa')._parse_example_spec()
def test_missing_weights(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(
ValueError, 'values is not in features dictionary'):
_transform_features({'ids': inputs}, (column,))
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_weighted = fc.weighted_categorical_column(a, weight_feature_key='weights')
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
'weights':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[1., 10.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_weighted]))
self.assertIn('aaa', features)
self.assertIn('weights', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([1., 10.], dtype=np.float32),
dense_shape=[1, 2]),
features['weights'].eval())
def test_transform_features(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
weights = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0.5, 1.0, 0.1),
dense_shape=(2, 2))
id_tensor, weight_tensor = _transform_features({
'ids': inputs,
'values': weights,
}, (column,))[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(inputs.values, dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=weights.indices,
values=np.array(weights.values, dtype=np.float32),
dense_shape=weights.dense_shape),
weight_tensor.eval())
def test_transform_features_dense_input(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
weights = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0.5, 1.0, 0.1),
dense_shape=(2, 2))
id_tensor, weight_tensor = _transform_features({
'ids': ((0, -1), (1, 0)),
'values': weights,
}, (column,))[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_tensor.eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=weights.indices,
values=np.array(weights.values, dtype=np.float32),
dense_shape=weights.dense_shape),
weight_tensor.eval())
def test_transform_features_dense_weights(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 1, 0),
dense_shape=(2, 2))
id_tensor, weight_tensor = _transform_features({
'ids': inputs,
'values': ((.5, 0.), (1., .1)),
}, (column,))[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(inputs.values, dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((.5, 1., .1), dtype=np.float32),
dense_shape=(2, 2)),
weight_tensor.eval())
def test_linear_model(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc.linear_model({
'ids': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(.5, 1., .1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), predictions.eval())
def test_linear_model_mismatched_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError, r'Dimensions.*are not compatible'):
fc.linear_model({
'ids': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0), (1, 1)),
values=(.5, 11., 1., .1),
dense_shape=(2, 2))
}, (column,))
def test_linear_model_mismatched_dense_values(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc.linear_model({
'ids': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,))
}, (column,))
with _initialized_session():
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
predictions.eval()
def test_linear_model_mismatched_dense_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc.linear_model({
'ids': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,), (.1,))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), predictions.eval())
# TODO(ptucker): Add test with embedding of weighted categorical.
if __name__ == '__main__':
test.main()
| 40.412245 | 123 | 0.636596 |
529025baf3a389264eb7e663f7f1f9ddce63e5bb | 5,144 | py | Python | lte/gateway/python/magma/subscriberdb/tests/protocols/diameter/server_tests.py | gurrapualt/magma | 13e05788fa6c40293a58b6e03cfb394bb79fa98f | [
"BSD-3-Clause"
] | 2 | 2020-12-09T11:42:30.000Z | 2021-09-26T03:28:33.000Z | lte/gateway/python/magma/subscriberdb/tests/protocols/diameter/server_tests.py | ViniBR01/magma | c1214880e66444b6e73000eb1165ec1a2d110a44 | [
"BSD-3-Clause"
] | 124 | 2020-08-21T06:11:21.000Z | 2022-03-21T05:25:26.000Z | lte/gateway/python/magma/subscriberdb/tests/protocols/diameter/server_tests.py | ViniBR01/magma | c1214880e66444b6e73000eb1165ec1a2d110a44 | [
"BSD-3-Clause"
] | 1 | 2020-09-21T04:25:06.000Z | 2020-09-21T04:25:06.000Z | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint:disable=protected-access
import asyncio
import unittest
from unittest.mock import Mock
from magma.subscriberdb.protocols.diameter import server, message
class ServerTests(unittest.TestCase):
"""
Test class for Diameter Server dispatch to Applications
"""
def setUp(self):
self._server = server.S6aServer(Mock(),
Mock(),
"mai.facebook.com",
"hss.mai.facebook.com")
# Mock the message handler
self._server._handle_msg = Mock()
# Mock the writes to check responses
self._writes = Mock()
def convert_memview_to_bytes(memview):
""" Deep copy the memoryview for checking later """
return self._writes(memview.tobytes())
self._transport = asyncio.Transport()
self._transport.write = Mock(side_effect=convert_memview_to_bytes)
# Here goes nothing..
self._server.connection_made(self._transport)
def _check_handler(self, req_bytes, application_id):
"""
Send data to the protocol in different step lengths to
verify that we assemble all segments and invoke the correct handler
Args:
req_bytes (bytes): request which would be sent
multiple times with different step sizes
application_id: the application handler which should be invoked
Returns:
None
"""
for step in range(1, len(req_bytes) + 1):
offset = 0
while offset < len(req_bytes):
self._server.data_received(req_bytes[offset:offset + step])
offset += step
self.assertTrue(self._server._handle_msg.called)
# pylint:disable=unsubscriptable-object
self.assertEqual(self._server._handle_msg.call_args[0][0],
application_id)
self._server._handle_msg.reset_mock()
def test_application_dispatch(self):
"""Check that we can decode an inbound message and call
the appropriate handler"""
msg = message.Message()
msg.header.application_id = 0xfac3b00c
msg.header.request = True
req_buf = bytearray(msg.length)
msg.encode(req_buf, 0)
self._check_handler(req_buf, 0xfac3b00c)
def test_too_short(self):
"""Check that if we didn't receive enough data
we keep it in the buffer"""
# Read in less than the header length
req_buf = bytearray(b'\x01' * 19)
self._server.data_received(req_buf)
self.assertEqual(len(self._server._readbuf), 19)
def test_decode_error(self):
"""Check that we can seek past garbage"""
# Feed garbage past the header length
req_buf = bytearray(b'\x01' * 20)
self._server.data_received(req_buf)
# We should flush the read buffer
self.assertEqual(len(self._server._readbuf), 0)
class WriterTests(unittest.TestCase):
"""
Test the Writer class for the diameter server
"""
def setUp(self):
# Mock the writes to check responses
self._writes = Mock()
def convert_memview_to_bytes(memview):
""" Deep copy the memoryview for checking later """
return self._writes(memview.tobytes())
self._transport = asyncio.Transport()
self._transport.write = Mock(side_effect=convert_memview_to_bytes)
self.writer = server.Writer("mai.facebook.com",
"hss.mai.facebook.com",
"127.0.0.1",
self._transport)
def test_send_msg(self):
"""Test that the writer will encode a message and write
it to the transport"""
msg = message.Message()
self.writer.send_msg(msg)
self._writes.assert_called_once_with(
b'\x01\x00\x00\x14'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00')
self._writes.reset_mock()
def test_gen_buf(self):
"""Test that the writer will generate a buffer of the
length of the message"""
msg = message.Message()
buf = self.writer._get_write_buf(msg)
self.assertEqual(len(buf), msg.length)
def test_write(self):
"""Test that the writer will push to the transport"""
msg = memoryview(b'helloworld')
self.writer._write(msg)
self._writes.assert_called_once_with(msg)
self._writes.reset_mock()
if __name__ == "__main__":
unittest.main()
| 33.842105 | 75 | 0.620723 |
ec88559db850d90ffa3abb43df6c058243cfa834 | 181 | py | Python | courses/MITx 6.00.1x/lec5/fingers1.py | NomikOS/learning | 268f94605214f6861ef476ca7573e68c068ccbe5 | [
"Unlicense"
] | null | null | null | courses/MITx 6.00.1x/lec5/fingers1.py | NomikOS/learning | 268f94605214f6861ef476ca7573e68c068ccbe5 | [
"Unlicense"
] | null | null | null | courses/MITx 6.00.1x/lec5/fingers1.py | NomikOS/learning | 268f94605214f6861ef476ca7573e68c068ccbe5 | [
"Unlicense"
] | null | null | null | def oddTuples(aTup):
'''
aTup: a tuple
returns: tuple, every other element of aTup.
'''
return aTup[::2]
print(oddTuples(('I', 'am', 'a', 'test', 'tuple')))
| 16.454545 | 51 | 0.546961 |
02fb069ddaad8396561f9f76277209145c80204e | 3,489 | py | Python | SVS/layers/global_mvn.py | ftshijt/SVS_system | 569d0a2f7ae89965bde132e5be538f6a84be471f | [
"Apache-2.0"
] | null | null | null | SVS/layers/global_mvn.py | ftshijt/SVS_system | 569d0a2f7ae89965bde132e5be538f6a84be471f | [
"Apache-2.0"
] | null | null | null | SVS/layers/global_mvn.py | ftshijt/SVS_system | 569d0a2f7ae89965bde132e5be538f6a84be471f | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from typing import Tuple
from typing import Union
import numpy as np
import torch
from typeguard import check_argument_types
from SVS.utils.nets_utils import make_pad_mask
from SVS.layers.abs_normalize import AbsNormalize
from SVS.layers.inversible_interface import InversibleInterface
class GlobalMVN(AbsNormalize, InversibleInterface):
"""Apply global mean and variance normalization
TODO(kamo): Make this class portable somehow
Args:
stats_file: npy file
norm_means: Apply mean normalization
norm_vars: Apply var normalization
eps:
"""
def __init__(
self,
stats_file: Union[Path, str],
norm_means: bool = True,
norm_vars: bool = True,
eps: float = 1.0e-20,
):
assert check_argument_types()
super().__init__()
self.norm_means = norm_means
self.norm_vars = norm_vars
self.eps = eps
stats_file = Path(stats_file)
self.stats_file = stats_file
stats = np.load(stats_file)
if isinstance(stats, np.ndarray):
# Kaldi like stats
count = stats[0].flatten()[-1]
mean = stats[0, :-1] / count
var = stats[1, :-1] / count - mean * mean
else:
# New style: Npz file
count = stats["count"]
sum_v = stats["sum"]
sum_square_v = stats["sum_square"]
mean = sum_v / count
var = sum_square_v / count - mean * mean
std = np.sqrt(np.maximum(var, eps))
self.register_buffer("mean", torch.from_numpy(mean))
self.register_buffer("std", torch.from_numpy(std))
def extra_repr(self):
return (
f"stats_file={self.stats_file}, "
f"norm_means={self.norm_means}, norm_vars={self.norm_vars}"
)
def forward(
self, x: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward function
Args:
x: (B, L, ...)
ilens: (B,)
"""
if ilens is None:
ilens = x.new_full([x.size(0)], x.size(1))
norm_means = self.norm_means
norm_vars = self.norm_vars
self.mean = self.mean.to(x.device, x.dtype)
self.std = self.std.to(x.device, x.dtype)
mask = make_pad_mask(ilens, x, 1)
# feat: (B, T, D)
if norm_means:
if x.requires_grad:
x = x - self.mean
else:
x -= self.mean
if x.requires_grad:
x = x.masked_fill(mask, 0.0)
else:
x.masked_fill_(mask, 0.0)
if norm_vars:
x /= self.std
return x, ilens
def inverse(
self, x: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
if ilens is None:
ilens = x.new_full([x.size(0)], x.size(1))
norm_means = self.norm_means
norm_vars = self.norm_vars
self.mean = self.mean.to(x.device, x.dtype)
self.std = self.std.to(x.device, x.dtype)
mask = make_pad_mask(ilens, x, 1)
if x.requires_grad:
x = x.masked_fill(mask, 0.0)
else:
x.masked_fill_(mask, 0.0)
if norm_vars:
x *= self.std
# feat: (B, T, D)
if norm_means:
x += self.mean
x.masked_fill_(make_pad_mask(ilens, x, 1), 0.0)
return x, ilens
| 29.319328 | 71 | 0.559473 |
5d14113cbc456ed3233ccfcc2d2a2fb5c64014c4 | 3,271 | py | Python | test/behave/steps/timer.py | qfruiti/mycroft-timer | 136c88b2ece119fdf7847b67334b7fb758a82377 | [
"Apache-2.0"
] | null | null | null | test/behave/steps/timer.py | qfruiti/mycroft-timer | 136c88b2ece119fdf7847b67334b7fb758a82377 | [
"Apache-2.0"
] | null | null | null | test/behave/steps/timer.py | qfruiti/mycroft-timer | 136c88b2ece119fdf7847b67334b7fb758a82377 | [
"Apache-2.0"
] | null | null | null | import time
from behave import given, then
from mycroft.audio import wait_while_speaking
from test.integrationtests.voight_kampff import (
emit_utterance,
wait_for_dialog)
@given('a {timer_length} timer is set')
@given('a timer is set for {timer_length}')
def given_set_timer_lenght(context, timer_length):
emit_utterance(context.bus, 'set a timer for {}'.format(timer_length))
wait_for_dialog(context.bus, ['started.timer'])
context.bus.clear_messages()
@given('a timer named {name} is set for {time}')
def given_set_timer_named(context, name, time):
emit_utterance(context.bus,
'set a timer for {} time called {}'.format(time, name))
wait_for_dialog(context.bus, ['started.timer.with.name'])
context.bus.clear_messages()
@given('a timer named {name} is set')
def given_set_named_timer(context, name):
emit_utterance(context.bus,
'set a timer for 95 minutes called {}'.format(name))
wait_for_dialog(context.bus, ['started.timer'])
context.bus.clear_messages()
@given('there is already an active timer')
def given_set_timer(context):
emit_utterance(context.bus, 'set a timer for 100 minutes')
wait_for_dialog(context.bus, ['started.timer'])
context.bus.clear_messages()
@given('no timers are active')
@given('no timers are set')
@given('no timers are previously set')
def given_no_timers(context):
followups = ['ask.cancel.running.plural',
'ask.cancel.desc.alarm.recurring']
no_timers = ['no.active.timer',
'cancel.all',
'cancelled.single.timer',
'cancelled.timer.named',
'cancelled.timer.named.with.ordinal',
'cancelled.timer.with.ordinal']
cancelled = ['cancel.all',
'cancelled.single.timer',
'cancelled.timer.named',
'cancelled.timer.named.with.ordinal',
'cancelled.timer.with.ordinal']
emit_utterance(context.bus, 'cancel all timers')
for i in range(10):
for message in context.bus.get_messages('speak'):
if message.data.get('meta', {}).get('dialog') in followups:
print('Answering yes!')
time.sleep(3)
wait_while_speaking()
emit_utterance(context.bus, 'yes')
wait_for_dialog(context.bus, cancelled)
context.bus.clear_messages()
return
elif message.data.get('meta', {}).get('dialog') in no_timers:
context.bus.clear_messages()
return
time.sleep(1)
@given('only one timer is set')
def given_single_timer(context):
given_no_timers(context)
given_set_timer(context)
@given('a timer is expired')
def given_expired_timer(context):
emit_utterance(context.bus, 'set a 3 second timer')
wait_for_dialog(context.bus, ['started.timer'])
time.sleep(4)
@then('"mycroft-timer" should stop beeping')
def then_stop_beeping(context):
# TODO: Better check!
import psutil
for i in range(10):
if 'paplay' not in [p.name() for p in psutil.process_iter()]:
break
time.sleep(1)
else:
assert False, "Timer is still ringing"
| 32.71 | 74 | 0.633751 |
3bc5501a5c371dacec1128da0ac60374ec6521b1 | 4,433 | py | Python | lib/es/tests/test_commands.py | oremj/olympia | a88cdedd5c4755e8021ea30801974dbc71efa1f8 | [
"BSD-3-Clause"
] | null | null | null | lib/es/tests/test_commands.py | oremj/olympia | a88cdedd5c4755e8021ea30801974dbc71efa1f8 | [
"BSD-3-Clause"
] | null | null | null | lib/es/tests/test_commands.py | oremj/olympia | a88cdedd5c4755e8021ea30801974dbc71efa1f8 | [
"BSD-3-Clause"
] | null | null | null | import StringIO
import threading
from nose.tools import eq_
from django.core import management
from django.db import connection
import amo.search
import amo.tests
from amo.urlresolvers import reverse
from amo.utils import urlparams
from es.management.commands.reindex import call_es
from lib.es.utils import is_reindexing_amo, unflag_reindexing_amo
class TestIndexCommand(amo.tests.ESTestCase):
def setUp(self):
super(TestIndexCommand, self).setUp()
if is_reindexing_amo():
unflag_reindexing_amo()
self.url = reverse('search.search')
# Any index created during the test will be deleted.
self.indices = call_es('_status').json()['indices'].keys()
def tearDown(self):
current_indices = call_es('_status').json()['indices'].keys()
for index in current_indices:
if index not in self.indices:
call_es(index, method='DELETE')
def check_results(self, expected):
"""Make sure the expected addons are listed in a standard search."""
response = self.client.get(urlparams(self.url, sort='downloads'))
eq_(response.status_code, 200, str(response.content))
got = self.get_results(response)
for addon in expected:
assert addon.pk in got, '%s is not in %s' % (addon.pk, got)
return response
def get_results(self, response):
"""Return pks of add-ons shown on search results page."""
pager = response.context['pager']
results = []
for page_num in range(pager.paginator.num_pages):
results.extend([item.pk for item
in pager.paginator.page(page_num + 1)])
return results
def get_indices_aliases(self):
"""Return the test indices with an alias."""
indices = call_es('_aliases').json()
items = [(index, aliases['aliases'].keys()[0])
for index, aliases in indices.items()
if len(aliases['aliases']) > 0 and index.startswith('test')]
items.sort()
return items
def test_reindexation(self):
# Adding an addon.
addon = amo.tests.addon_factory()
self.refresh()
# The search should return the addon.
wanted = [addon]
self.check_results(wanted)
# Current indices with aliases.
old_indices = self.get_indices_aliases()
# This is to start a reindexation in the background.
class ReindexThread(threading.Thread):
def __init__(self):
self.stdout = StringIO.StringIO()
super(ReindexThread, self).__init__()
def run(self):
management.call_command('reindex', stdout=self.stdout)
t = ReindexThread()
t.start()
# Wait for the reindex in the thread to flag the database.
# The database transaction isn't shared with the thread, so force the
# commit.
while t.is_alive() and not is_reindexing_amo():
connection._commit()
connection.clean_savepoints()
# We should still be able to search in the foreground while the reindex
# is being done in the background. We should also be able to index new
# documents, and they should not be lost.
old_addons_count = len(wanted)
while t.is_alive() and len(wanted) < old_addons_count + 3:
wanted.append(amo.tests.addon_factory())
connection._commit()
connection.clean_savepoints()
amo.search.get_es().refresh()
self.check_results(wanted)
if len(wanted) == old_addons_count:
raise AssertionError('Could not index objects in foreground while '
'reindexing in the background.')
t.join() # Wait for the thread to finish.
t.stdout.seek(0)
stdout = t.stdout.read()
assert 'Reindexation done' in stdout, stdout
# The reindexation is done, let's double check we have all our docs.
connection._commit()
connection.clean_savepoints()
amo.search.get_es().refresh()
self.check_results(wanted)
# New indices have been created, and aliases now point to them.
new_indices = self.get_indices_aliases()
eq_(len(old_indices), len(new_indices), (old_indices, new_indices))
assert new_indices != old_indices, stdout
| 36.04065 | 79 | 0.62734 |
60d7e716750e00e4a1a26c2de40fd417bc722e2a | 3,697 | py | Python | SampleOAuth2_UsingPythonClient/settings.py | mmattes/SampleOAuth2_UsingPythonClient | b3aabe7bc8af65fd00cc4908a8473175fc301913 | [
"Apache-2.0"
] | 24 | 2018-10-10T00:19:13.000Z | 2021-10-19T19:12:41.000Z | SampleOAuth2_UsingPythonClient/settings.py | mmattes/SampleOAuth2_UsingPythonClient | b3aabe7bc8af65fd00cc4908a8473175fc301913 | [
"Apache-2.0"
] | 5 | 2018-12-04T08:14:35.000Z | 2021-07-06T08:33:58.000Z | SampleOAuth2_UsingPythonClient/settings.py | mmattes/SampleOAuth2_UsingPythonClient | b3aabe7bc8af65fd00cc4908a8473175fc301913 | [
"Apache-2.0"
] | 16 | 2018-12-10T19:37:32.000Z | 2022-01-23T20:35:50.000Z | """
Django settings for SampleOAuth2_UsingPythonClient project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v1lpa3e#c__brq#htp@v_&m!t1!7ii)9(qjqh$3a+j$hyj_^&n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SampleOAuth2_UsingPythonClient.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SampleOAuth2_UsingPythonClient.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static"), ]
# OAauth2 config here
CLIENT_ID = '<EnterHere>'
CLIENT_SECRET = '<EnterHere>'
REDIRECT_URI = 'http://localhost:8000/app/callback'
ENVIRONMENT = 'sandbox'
# QBO Base URLs
QBO_BASE_SANDBOX = 'https://sandbox-quickbooks.api.intuit.com'
QBO_BASE_PROD = 'https://quickbooks.api.intuit.com'
# OAuth1 config for migration
CONSUMER_KEY = '<EnterHere>'
CONSUMER_SECRET = '<EnterHere>'
ACCESS_KEY = '<EnterHere>'
ACCESS_SECRET = '<EnterHere>'
REALM_ID = '<EnterHere>'
| 25.853147 | 91 | 0.706789 |
1a0b8628c1ea3fe63abd2edddda48334d3390744 | 2,107 | py | Python | python/dynamic-programming/boolean-paranthesis.py | fossabot/a-grim-loth | a6c8d549289a39ec981c1e0d0c754bb2708dfff9 | [
"MIT"
] | 4 | 2021-06-26T17:18:47.000Z | 2022-02-02T15:02:27.000Z | python/dynamic-programming/boolean-paranthesis.py | fossabot/a-grim-loth | a6c8d549289a39ec981c1e0d0c754bb2708dfff9 | [
"MIT"
] | 8 | 2021-06-29T07:00:32.000Z | 2021-12-01T11:26:22.000Z | python/dynamic-programming/boolean-paranthesis.py | fossabot/a-grim-loth | a6c8d549289a39ec981c1e0d0c754bb2708dfff9 | [
"MIT"
] | 3 | 2021-07-14T14:42:08.000Z | 2021-12-07T19:36:53.000Z | # Returns count of all possible
# parenthesizations that lead to
# result true for a boolean
# expression with symbols like
# true and false and operators
# like &, | and ^ filled between symbols
def countParenth(symb, oper, n):
F = [[0 for i in range(n + 1)] for i in range(n + 1)]
T = [[0 for i in range(n + 1)] for i in range(n + 1)]
# Fill diaginal entries first
# All diagonal entries in
# T[i][i] are 1 if symbol[i]
# is T (true). Similarly, all
# F[i][i] entries are 1 if
# symbol[i] is F (False)
for i in range(n):
if symb[i] == "F":
F[i][i] = 1
else:
F[i][i] = 0
if symb[i] == "T":
T[i][i] = 1
else:
T[i][i] = 0
# Now fill T[i][i+1], T[i][i+2],
# T[i][i+3]... in order And
# F[i][i+1], F[i][i+2],
# F[i][i+3]... in order
for gap in range(1, n):
i = 0
for j in range(gap, n):
T[i][j] = F[i][j] = 0
for g in range(gap):
# Find place of parenthesization
# using current value of gap
k = i + g
# Store Total[i][k] and Total[k+1][j]
tik = T[i][k] + F[i][k]
tkj = T[k + 1][j] + F[k + 1][j]
# Follow the recursive formulas
# according to the current operator
if oper[k] == "&":
T[i][j] += T[i][k] * T[k + 1][j]
F[i][j] += tik * tkj - T[i][k] * T[k + 1][j]
if oper[k] == "|":
F[i][j] += F[i][k] * F[k + 1][j]
T[i][j] += tik * tkj - F[i][k] * F[k + 1][j]
if oper[k] == "^":
T[i][j] += F[i][k] * T[k + 1][j] + T[i][k] * F[k + 1][j]
F[i][j] += T[i][k] * T[k + 1][j] + F[i][k] * F[k + 1][j]
i += 1
return T[0][n - 1]
# Driver Code
symbols = "TTFT"
operators = "|&^"
n = len(symbols)
# There are 4 ways
# ((T|T)&(F^T)), (T|(T&(F^T))),
# (((T|T)&F)^T) and (T|((T&F)^T))
print(countParenth(symbols, operators, n))
| 29.263889 | 76 | 0.41196 |
de3c65107a7b9847acd2b18b75cf93b8ca67777c | 2,231 | py | Python | discriminator.py | TropComplique/SRFeat-pytorch | 194add4f199116153c500edbd0b7bca97913e995 | [
"MIT"
] | null | null | null | discriminator.py | TropComplique/SRFeat-pytorch | 194add4f199116153c500edbd0b7bca97913e995 | [
"MIT"
] | 1 | 2019-10-16T03:51:06.000Z | 2019-10-16T10:51:21.000Z | discriminator.py | TropComplique/SRFeat-pytorch | 194add4f199116153c500edbd0b7bca97913e995 | [
"MIT"
] | 2 | 2019-12-16T01:29:04.000Z | 2021-09-01T15:13:41.000Z | import torch
import torch.nn as nn
from torch.nn.utils import spectral_norm
USE_SN = True
def normalization(module):
return spectral_norm(module) if USE_SN else module
class Discriminator(nn.Module):
def __init__(self, in_channels, image_size, depth=64):
"""
Arguments:
in_channels: an integer.
image_size: a tuple of integers (w, h).
depth: an integer.
"""
super(Discriminator, self).__init__()
self.layers = nn.Sequential(
normalization(nn.Conv2d(in_channels, depth, kernel_size=3, padding=1)),
nn.LeakyReLU(0.2, inplace=True),
conv3x3(depth, depth, stride=2),
conv3x3(depth, 2 * depth, stride=1),
conv3x3(2 * depth, 2 * depth, stride=2),
conv3x3(2 * depth, 4 * depth, stride=1),
conv3x3(4 * depth, 4 * depth, stride=2),
conv3x3(4 * depth, 8 * depth, stride=1),
conv3x3(8 * depth, 8 * depth, stride=2)
)
# right now receptive field is 61x61,
# see https://fomoro.com/research/article/receptive-field-calculator
w, h = image_size
assert w % 16 == 0 and h % 16 == 0
area = (w // 16) * (h // 16)
in_features = 8 * depth * area
self.fc = nn.Sequential(
normalization(nn.Linear(in_features, 1024)),
nn.LeakyReLU(0.2, inplace=True),
normalization(nn.Linear(1024, 1))
)
def forward(self, x):
"""
The input tensor represents
images with pixel values in [0, 1] range.
Arguments:
x: a float tensor with shape [b, 3, h, w].
Returns:
a float tensor with shape [b].
"""
x = 2.0 * x - 1.0
x = self.layers(x)
x = torch.flatten(x, start_dim=1)
x = self.fc(x).squeeze(1)
return x
def conv3x3(in_channels, out_channels, stride):
params = {
'kernel_size': 3, 'stride': stride,
'padding': 1, 'bias': False
}
return nn.Sequential(
normalization(nn.Conv2d(in_channels, out_channels, **params)),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.2, inplace=True)
)
| 27.207317 | 83 | 0.555805 |
5cf7578b8a1f7b8c90af05a59a55c38b2bcf85eb | 347 | py | Python | fileinfo/fileinfo.py | pycabook/fileinfo | 48f23f2bde22483495a3b1cd16a5de346447efb5 | [
"MIT"
] | 3 | 2019-09-28T21:04:53.000Z | 2021-01-06T22:18:27.000Z | fileinfo/fileinfo.py | lgiordani/fileinfo | 6395b4848a196c9e9f790ecbe8c6b20b76cab312 | [
"MIT"
] | 3 | 2020-02-12T12:01:34.000Z | 2020-04-14T20:23:43.000Z | fileinfo/fileinfo.py | pycabook/fileinfo | 48f23f2bde22483495a3b1cd16a5de346447efb5 | [
"MIT"
] | 2 | 2020-03-13T07:40:02.000Z | 2020-03-22T19:25:32.000Z | import os
class FileInfo:
def __init__(self, path):
self.original_path = path
self.filename = os.path.basename(path)
def get_info(self):
return (
self.filename,
self.original_path,
os.path.abspath(self.original_path),
os.path.getsize(self.original_path)
)
| 21.6875 | 48 | 0.579251 |
f98e463615922d4704dc706e931dd25b2975d7f0 | 2,733 | py | Python | menus_project/constants.py | arcanemachine/menu-maker | c675dff5f04cde9924095d953bd7d7ad554d659d | [
"CC-BY-4.0"
] | 2 | 2022-02-14T15:21:02.000Z | 2022-02-15T06:20:48.000Z | menus_project/constants.py | arcanemachine/Menu-Maker | c675dff5f04cde9924095d953bd7d7ad554d659d | [
"CC-BY-4.0"
] | 1 | 2022-03-13T06:02:48.000Z | 2022-03-28T12:58:47.000Z | menus_project/constants.py | arcanemachine/django-menu-maker | 665b815c7255b756725d08bf3f66f144407b83d8 | [
"CC-BY-4.0"
] | null | null | null | from django.conf import settings
import server_config
FRONTEND_SERVER_URL = server_config.FRONTEND_SERVER_URL
FRONTEND_SERVER_URL_CONFIRM_EMAIL = \
FRONTEND_SERVER_URL + '/register/confirm-email/'
# misc
PROJECT_NAME = "Menu Maker"
RESERVED_KEYWORDS = ['add-new-restaurant', 'all', 'delete', 'edit', 'new-item',
'new-section']
# validation #
MAX_RESTAURANTS_PER_USER = 3
# forms
FORMS_CAPTCHA_FIELD_HELP_TEXT = \
"Please enter the letters seen in the image above."
# testing
TEST_USER_USERNAME = 'test_user'
TEST_USER_FIRST_NAME = 'Test'
TEST_USER_LAST_NAME = 'User'
TEST_USER_EMAIL = 'test_user@email.com'
TEST_USER_PASSWORD = 'my_password321'
RESTAURANT_ADMIN_USER_USERNAME = 'restaurant_admin'
TEST_RESTAURANT_NAME = 'Test Restaurant'
TEST_MENU_NAME = 'Test Menu'
TEST_MENUSECTION_NAME = 'Test Menu Section'
TEST_MENUITEM_NAME = 'Test Menu Item'
TEST_MENUITEM_DESCRIPTION = 'Test Menu Item Description'
# STRINGS #
# misc
RESERVED_KEYWORD_ERROR_STRING = \
"This name is reserved and cannot be used. Please choose another name."
# restaurants
MAX_RESTAURANTS_PER_USER_ERROR_STRING = "You cannot register more than "\
f"{MAX_RESTAURANTS_PER_USER} restaurants. If you wish to register a "\
"new restaurant, you must first delete one of your existing restaurants."
RESTAURANT_DUPLICATE_SLUG_ERROR_STRING = \
"This name is too similar to an existing restaurant name."
# users - registration
USER_REGISTER_ALREADY_AUTHENTICATED_MESSAGE = "You are already logged in, "\
"so we redirected you here from the registration page."
if settings.EMAIL_CONFIRMATION_REQUIRED:
USER_REGISTER_SUCCESS_MESSAGE = "Registration successful. Please "\
"check your email inbox for your confirmation email."
else:
USER_REGISTER_SUCCESS_MESSAGE = "Registration successful. "\
"You may now login to your account."
# users - activation
USER_IS_UNCONFIRMED_MESSAGE = "You need to confirm your email address using "\
"the activation link we sent to your inbox (check the spam folder too)."
USER_ACTIVATION_VIEW_MESSAGE = \
"Please login to your account to complete the activation process."
USER_ACTIVATION_INVALID_URL_MESSAGE = "This validation URL is invalid."
USER_ACTIVATION_SUCCESS_MESSAGE = \
"Your account has been successfully activated."
# users - authentication
USER_LOGIN_ALREADY_AUTHENTICATED_MESSAGE = \
"You are already logged in, so we redirected you here from the login page."
USER_LOGIN_SUCCESS_MESSAGE = "You have successfully logged in."
USER_LOGOUT_SUCCESS_MESSAGE = "You have successfully logged out."
USER_UPDATE_SUCCESS_MESSAGE = "You have updated your personal information."
USER_DELETE_SUCCESS_MESSAGE = "Your account has been deleted."
| 37.958333 | 79 | 0.778632 |
eba8f88ddd89fc2c20a916ffdc5c8530763fa1da | 2,085 | py | Python | metadata/iucr.py | sjennewein/MetaDataDistiller | 9d1559d2d30ccceadeff8c9921e4468fdb0651f2 | [
"MIT"
] | null | null | null | metadata/iucr.py | sjennewein/MetaDataDistiller | 9d1559d2d30ccceadeff8c9921e4468fdb0651f2 | [
"MIT"
] | null | null | null | metadata/iucr.py | sjennewein/MetaDataDistiller | 9d1559d2d30ccceadeff8c9921e4468fdb0651f2 | [
"MIT"
] | null | null | null | import json
import requests
from bs4 import BeautifulSoup
from .payload import Payload, Author, Affiliation
def map(url):
meta = Payload()
try:
r = requests.get(url)
except:
return {}
html = BeautifulSoup(r.content, 'html.parser')
metadata = html.find_all("meta")
r.close()
author = None
for item in metadata:
if not item.has_attr('name'):
continue
if item['name'] == 'prism.publicationDate':
meta.publication_date = item['content']
elif item['name'] == 'citation_online_date':
meta.online_date = item['content']
elif item['name'] == 'citation_author':
author = Author(item['content'])
meta.authors.append(author)
elif item['name'] == 'citation_author_institution':
author.affiliations.append(Affiliation(item['content']))
elif item['name'] == 'citation_issue':
meta.issues.first = item['content']
elif item['name'] == 'citation_keywords':
meta.keywords.append(item['content'])
elif item['name'] == 'article_references':
meta.references = item['content']
elif item['name'].lower() == 'dc.language':
meta.language = item['content']
elif item['name'] == 'citation_journal_title':
meta.journal_title = item['content']
elif item['name'] == 'prism.issn':
meta.issn = item['content']
elif item['name'] == 'citation_doi':
meta.doi = item['content']
elif item['name'] == 'citation_title':
meta.title = item['content']
elif item['name'] == 'prism.startingPage':
meta.pages.first = item['content']
elif item['name'] == 'prism.endingPage':
meta.pages.last = item['content']
elif item['name'] == 'prism.volume':
meta.volumes.first = item['content']
elif item['name'] == 'citation_publisher':
meta.publisher = item['content']
if author:
meta.authors.append(author)
return meta
| 35.338983 | 68 | 0.572662 |
b93601c4387b72156d1158bae75ed920d2c4414a | 15,538 | py | Python | virt/ansible-latest/lib/python2.7/site-packages/ansible/modules/database/mongodb/mongodb_user.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | 1 | 2020-03-29T18:41:01.000Z | 2020-03-29T18:41:01.000Z | ansible/ansible/modules/database/mongodb/mongodb_user.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 7 | 2020-09-07T17:27:56.000Z | 2022-03-02T06:25:46.000Z | ansible/ansible/modules/database/mongodb/mongodb_user.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 1 | 2020-10-30T12:48:24.000Z | 2020-10-30T12:48:24.000Z | #!/usr/bin/python
# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
# (c) 2014, Epic Games, Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mongodb_user
short_description: Adds or removes a user from a MongoDB database.
description:
- Adds or removes a user from a MongoDB database.
version_added: "1.1"
options:
login_user:
description:
- The username used to authenticate with
login_password:
description:
- The password used to authenticate with
login_host:
description:
- The host running the database
default: localhost
login_port:
description:
- The port to connect to
default: 27017
login_database:
version_added: "2.0"
description:
- The database where login credentials are stored
replica_set:
version_added: "1.6"
description:
- Replica set to connect to (automatically connects to primary for writes)
database:
description:
- The name of the database to add/remove the user from
required: true
name:
description:
- The name of the user to add or remove
required: true
aliases: [ 'user' ]
password:
description:
- The password to use for the user
ssl:
version_added: "1.8"
description:
- Whether to use an SSL connection when connecting to the database
type: bool
ssl_cert_reqs:
version_added: "2.2"
description:
- Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided.
default: "CERT_REQUIRED"
choices: ["CERT_REQUIRED", "CERT_OPTIONAL", "CERT_NONE"]
roles:
version_added: "1.3"
description:
- >
The database user roles valid values could either be one or more of the following strings:
'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase',
'dbAdminAnyDatabase'
- "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
- "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
state:
description:
- The database user state
default: present
choices: [ "present", "absent" ]
update_password:
default: always
choices: ['always', 'on_create']
version_added: "2.1"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author:
- "Elliott Foster (@elliotttf)"
- "Julien Thebault (@Lujeni)"
'''
EXAMPLES = '''
# Create 'burgers' database user with name 'bob' and password '12345'.
- mongodb_user:
database: burgers
name: bob
password: 12345
state: present
# Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
- mongodb_user:
database: burgers
name: bob
password: 12345
state: present
ssl: True
# Delete 'burgers' database user with name 'bob'.
- mongodb_user:
database: burgers
name: bob
state: absent
# Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
- mongodb_user:
database: burgers
name: ben
password: 12345
roles: read
state: present
- mongodb_user:
database: burgers
name: jim
password: 12345
roles: readWrite,dbAdmin,userAdmin
state: present
- mongodb_user:
database: burgers
name: joe
password: 12345
roles: readWriteAnyDatabase
state: present
# add a user to database in a replica set, the primary server is automatically discovered and written to
- mongodb_user:
database: burgers
name: bob
replica_set: belcher
password: 12345
roles: readWriteAnyDatabase
state: present
# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
# please notice the credentials must be added to the 'admin' database because the 'local' database is not syncronized and can't receive user credentials
# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
# This syntax requires mongodb 2.6+ and pymongo 2.5+
- mongodb_user:
login_user: root
login_password: root_password
database: admin
user: oplog_reader
password: oplog_reader_password
state: present
replica_set: belcher
roles:
- db: local
role: read
'''
RETURN = '''
user:
description: The name of the user to add or remove.
returned: success
type: str
'''
import os
import ssl as ssl_lib
import traceback
from distutils.version import LooseVersion
from operator import itemgetter
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_native
# =========================================
# MongoDB module specific support methods.
#
def check_compatibility(module, client):
"""Check the compatibility between the driver and the database.
See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility
Args:
module: Ansible module.
client (cursor): Mongodb cursor on admin database.
"""
loose_srv_version = LooseVersion(client.server_info()['version'])
loose_driver_version = LooseVersion(PyMongoVersion)
if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'):
module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)')
elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'):
module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)')
elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'):
module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)')
elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
def user_find(client, user, db_name):
"""Check if the user exists.
Args:
client (cursor): Mongodb cursor on admin database.
user (str): User to check.
db_name (str): User's database.
Returns:
dict: when user exists, False otherwise.
"""
for mongo_user in client["admin"].system.users.find():
if mongo_user['user'] == user:
# NOTE: there is no 'db' field in mongo 2.4.
if 'db' not in mongo_user:
return mongo_user
if mongo_user["db"] == db_name:
return mongo_user
return False
def user_add(module, client, db_name, user, password, roles):
# pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
# without reproducing a lot of the logic in database.py of pymongo
db = client[db_name]
if roles is None:
db.add_user(user, password, False)
else:
db.add_user(user, password, None, roles=roles)
def user_remove(module, client, db_name, user):
exists = user_find(client, user, db_name)
if exists:
if module.check_mode:
module.exit_json(changed=True, user=user)
db = client[db_name]
db.remove_user(user)
else:
module.exit_json(changed=False, user=user)
def load_mongocnf():
config = configparser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (configparser.NoOptionError, IOError):
return False
return creds
def check_if_roles_changed(uinfo, roles, db_name):
# We must be aware of users which can read the oplog on a replicaset
# Such users must have access to the local DB, but since this DB does not store users credentials
# and is not synchronized among replica sets, the user must be stored on the admin db
# Therefore their structure is the following :
# {
# "_id" : "admin.oplog_reader",
# "user" : "oplog_reader",
# "db" : "admin", # <-- admin DB
# "roles" : [
# {
# "role" : "read",
# "db" : "local" # <-- local DB
# }
# ]
# }
def make_sure_roles_are_a_list_of_dict(roles, db_name):
output = list()
for role in roles:
if isinstance(role, (binary_type, text_type)):
new_role = {"role": role, "db": db_name}
output.append(new_role)
else:
output.append(role)
return output
roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name)
uinfo_roles = uinfo.get('roles', [])
if sorted(roles_as_list_of_dict, key=itemgetter('db')) == sorted(uinfo_roles, key=itemgetter('db')):
return False
return True
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
login_database=dict(default=None),
replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
name=dict(required=True, aliases=['user']),
password=dict(aliases=['pass'], no_log=True),
ssl=dict(default=False, type='bool'),
roles=dict(default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default="always", choices=["always", "on_create"]),
ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
),
supports_check_mode=True
)
if not pymongo_found:
module.fail_json(msg=missing_required_lib('pymongo'))
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['name']
password = module.params['password']
ssl = module.params['ssl']
roles = module.params['roles'] or []
state = module.params['state']
update_password = module.params['update_password']
try:
connection_params = {
"host": login_host,
"port": int(login_port),
}
if replica_set:
connection_params["replicaset"] = replica_set
if ssl:
connection_params["ssl"] = ssl
connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
client = MongoClient(**connection_params)
# NOTE: this check must be done ASAP.
# We doesn't need to be authenticated (this ability has lost in PyMongo 3.6)
if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'):
check_compatibility(module, client)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
if db_name != "admin":
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
# else: this has to be the first admin user added
except Exception as e:
module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
if state == 'present':
if password is None and update_password == 'always':
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
try:
if update_password != 'always':
uinfo = user_find(client, user, db_name)
if uinfo:
password = None
if not check_if_roles_changed(uinfo, roles, db_name):
module.exit_json(changed=False, user=user)
if module.check_mode:
module.exit_json(changed=True, user=user)
user_add(module, client, db_name, user, password, roles)
except Exception as e:
module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc())
# Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
# newuinfo = user_find(client, user, db_name)
# if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
# module.exit_json(changed=False, user=user)
elif state == 'absent':
try:
user_remove(module, client, db_name, user)
except Exception as e:
module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=True, user=user)
if __name__ == '__main__':
main()
| 34.995495 | 160 | 0.643519 |
6733e3d5835869fdba5e0339a77614cee989fdaf | 7,292 | py | Python | statistics/plot_statistics.py | JovanP1/Animal-sound-recognition | 354cec20167b32a1d71e5a71a65d6461abb5d9b1 | [
"MIT"
] | 1 | 2022-03-24T11:49:43.000Z | 2022-03-24T11:49:43.000Z | statistics/plot_statistics.py | jovan-stojanovic/Animal-sound-recognition | 354cec20167b32a1d71e5a71a65d6461abb5d9b1 | [
"MIT"
] | null | null | null | statistics/plot_statistics.py | jovan-stojanovic/Animal-sound-recognition | 354cec20167b32a1d71e5a71a65d6461abb5d9b1 | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
import argparse
import h5py
import time
import pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def load_statistics(statistics_path):
statistics_dict = pickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
return bal_map, test_map
def crop_label(label):
max_len = 16
if len(label) <= max_len:
return label
else:
words = label.split(' ')
cropped_label = ''
for w in words:
if len(cropped_label + ' ' + w) > max_len:
break
else:
cropped_label += ' {}'.format(w)
return cropped_label
def add_comma(integer):
"""E.g., 1234567 -> 1,234,567
"""
integer = int(integer)
if integer >= 1000:
return str(integer // 1000) + ',' + str(integer % 1000)
else:
return str(integer)
def plot_classwise_iteration_map(args):
# Paths
save_out_path = 'path_to_save.pdf'
# create_folder(os.path.dirname(save_out_path))
# Load statistics
statistics_dict = pickle.load(open(r'path_to_model.pkl', 'rb'))
for i in range(377):
statistics_dict['bal'][i]['average_precision'] = statistics_dict['bal'][i]['average_precision'][72:138]
statistics_dict['bal'][i]['auc'] = statistics_dict['bal'][i]['auc'][72:138]
statistics_dict['test'][i]['average_precision'] = statistics_dict['test'][i]['average_precision'][72:138]
statistics_dict['test'][i]['auc'] = statistics_dict['test'][i]['auc'][72:138]
mAP_mat = np.array([e['average_precision'] for e in statistics_dict['test']])
mAP_mat = mAP_mat[0 : 300, :] # 300 * 2000 = 600k iterations
sorted_indexes = np.argsort(config.full_samples_per_class)[::-1]
fig, axs = plt.subplots(1, 3, figsize=(20, 5))
ranges = [np.arange(1, 11), np.arange(11, 22), np.arange(22, 33)]
axs[0].set_ylabel('AP')
for col in range(0, 3):
axs[col].set_ylim(0, 1.)
axs[col].set_xlim(0, 301)
axs[col].set_xlabel('Iterations')
axs[col].set_ylabel('AP')
axs[col].xaxis.set_ticks(np.arange(0, 301, 100))
axs[col].xaxis.set_ticklabels(['0', '200k', '400k', '600k'])
lines = []
for _ix in ranges[col]:
_label = crop_label(config.labels[sorted_indexes[_ix]]) + \
' ({})'.format(add_comma(config.full_samples_per_class[sorted_indexes[_ix]]))
line, = axs[col].plot(mAP_mat[:, sorted_indexes[_ix]], label=_label)
lines.append(line)
box = axs[col].get_position()
axs[col].set_position([box.x0, box.y0, box.width * 1., box.height])
axs[col].legend(handles=lines, bbox_to_anchor=(1., 1.))
axs[col].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(pad=4, w_pad=1, h_pad=1)
plt.savefig(save_out_path)
print(save_out_path)
plot_classwise_iteration_map('plot_classwise_iteration_map')
def plot_six_figures(args):
# Arguments & parameters
classes_num = config.classes_num
labels = config.labels
max_plot_iteration = 15000
iterations = np.arange(0, max_plot_iteration, 2000)
# Paths
class_labels_indices_path = r'F:/audioset_tagging_cnn/metadata/class_labels_indices_clean.csv'
save_out_path = r"F:\audioset_data\results\two_figures_2.pdf"
# create_folder(os.path.dirname(save_out_path))
# Plot
fig, ax = plt.subplots(1, 2, figsize=(14, 7))
bal_alpha = 0.3
test_alpha = 1.0
linewidth = 1.
if True:
lines = []
(bal_map, test_map) = load_statistics(r"F:\audioset_data\CNN14 balanced mixup 100 percent 14750 iter\statistics\sample_rate=32000,window_size=1024,hop_size=320,mel_bins=64,fmin=50,fmax=14000\balanced=balanced\augmentation=mixup\batch_size=32\statistics.pkl")
line, = ax[1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1].plot(test_map, label='CNN14 balanced mixup, batch size=32', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map) = load_statistics(r"F:\audioset_data\CNN14 balanced mixup batch16 15000 iter\statistics\sample_rate=32000,window_size=1024,hop_size=320,mel_bins=64,fmin=50,fmax=14000\batch_size=16\statistics.pkl")
line, = ax[1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1].plot(test_map, label='CNN14 balanced mixup, batch size=16', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1].legend(handles=lines, loc=2)
ax[1].set_title('(d) Comparison of batch size')
# (d) Comparison of amount of training data
if True:
lines = []
# 100% of full training data
(bal_map, test_map) = load_statistics(r"F:\audioset_data\CNN14 balanced mixup 100 percent 14750 iter\statistics\sample_rate=32000,window_size=1024,hop_size=320,mel_bins=64,fmin=50,fmax=14000\balanced=balanced\augmentation=mixup\batch_size=32\statistics.pkl")
line, = ax[0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0].plot(test_map, label='CNN14 (100% full)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# 50% of full training data
(bal_map, test_map) = load_statistics(r"F:\audioset_data\CNN14 50 percent 20000 iter\statistics\sample_rate=32000,window_size=1024,hop_size=320,mel_bins=64,fmin=50,fmax=14000\batch_size=32\statistics.pkl")
line, = ax[0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0].plot(test_map, label='CNN14 (50% full)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0].legend(handles=lines, loc=2)
ax[0].set_title('(c) Amount of training data comparison')
for i in range(2):
ax[i].set_ylim(0, 0.8)
ax[i].set_xlim(0, len(iterations))
ax[i].set_xlabel('Iterations')
ax[i].set_ylabel('mAP')
ax[i].xaxis.set_ticks(np.arange(0, len(iterations), 2))
ax[i].xaxis.set_ticklabels(['0', '4k', '8k', '12k', '20k', '25k'])
ax[i].yaxis.set_ticks(np.arange(0, 0.81, 0.05))
ax[i].yaxis.set_ticklabels(['0', '', '0.1', '', '0.2', '', '0.3',
'', '0.4', '', '0.5', '', '0.6', '', '0.7', '', '0.8'])
ax[i].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
ax[i].xaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(0, 1, 0)
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
plot_six_figures('plot_six_figures')
| 42.150289 | 267 | 0.63124 |
594258688ee42e808d4ae9c54566fdc7050546fb | 2,197 | py | Python | SSMOECHS/Optimizer/HEED.py | lee-jingu/SSMOECHS | 5afb0899304689c05a68580a9eb5610dd83ea76a | [
"MIT"
] | 1 | 2021-02-12T01:32:23.000Z | 2021-02-12T01:32:23.000Z | SSMOECHS/Optimizer/HEED.py | lee-jingu/SSMOECHS | 5afb0899304689c05a68580a9eb5610dd83ea76a | [
"MIT"
] | null | null | null | SSMOECHS/Optimizer/HEED.py | lee-jingu/SSMOECHS | 5afb0899304689c05a68580a9eb5610dd83ea76a | [
"MIT"
] | null | null | null | import copy
import networkx as nx
import numpy as np
import config as cf
import random
import math
from network import Network
def Optimizer(network, Alive_Node, Residual=False, R=30, IN_Median=False):
HEED_NET=nx.create_empty_copy(network)
HEED_CHID=[]
P = cf.P_CH
## CH Selection
for i in Alive_Node:
r = HEED_NET.node[i]['round']
CH_Prob = P/(1-P*(r%(1/P)))
if Residual == True:
MAX = np.amax(Residual,axis=0)[1]
row,col = np.where(Residual==i)
CH_Prob *= (Residual[row][1]/MAX)
if random.random()<CH_Prob:
HEED_CHID.append(i)
HEED_NET.node[i]['round'] = 0
HEED_NET.node[i]['N_Packet']=cf.L
else:
HEED_NET.node[i]['round'] += 1
HEED_NET.node[i]['N_Packet']=cf.NCH_L
## Clustering
for i in Alive_Node:
x1,y1 = HEED_NET.node[i]['pos']
NN_Dist = math.sqrt((x1-50)**2+(y1-50)**2)
NNID = 0
for NN in HEED_CHID:
x2,y2 = HEED_NET.node[NN]['pos']
new_dist = math.sqrt((x1-x2)**2+(y1-y2)**2)
if new_dist == 0:
continue
if new_dist<NN_Dist:
NNID = NN
NN_Dist = new_dist
HEED_NET.node[i]['Next']=NNID
# if two nodes are linked each other,one of them have to link to BS
for CH in HEED_CHID:
NEXT_NODE = HEED_NET.node[CH]['Next']
CHK = HEED_NET.node[NEXT_NODE]['Next']
if HEED_NET.node[NEXT_NODE]['Next'] == CH:
## To BS distance
x1,y1 = HEED_NET.node[CH]['pos']
x2,y2 = HEED_NET.node[NEXT_NODE]['pos']
dist1 = math.sqrt((x1-50)**2 + (y1-50)**2)
dist2 = math.sqrt((x2-50)**2 + (y2-50)**2)
if dist1 > dist2: ##NNID to BS is more near than nodei to BS
HEED_NET.node[NEXT_NODE]['Next'] = 0 ##BSID
else:
HEED_NET.node[CH]['Next'] = 0
## add_Edge
for i in Alive_Node:
HEED_NET.add_edge(i,HEED_NET.node[i]['Next'])
return HEED_NET, HEED_CHID, R | 32.308824 | 95 | 0.517979 |
e6a5863616063680da1042613071ad8b14743cc5 | 779 | py | Python | src/app/views/local.py | chenweixu/bunnyc_mgr | 1243fa951c45c665442212247d682ce3d39aec08 | [
"Apache-2.0"
] | null | null | null | src/app/views/local.py | chenweixu/bunnyc_mgr | 1243fa951c45c665442212247d682ce3d39aec08 | [
"Apache-2.0"
] | null | null | null | src/app/views/local.py | chenweixu/bunnyc_mgr | 1243fa951c45c665442212247d682ce3d39aec08 | [
"Apache-2.0"
] | null | null | null | from flask import request
from flask import jsonify
from app import app
from app import work_log
from app.main.local import LocalTask
from app.utils.myencrypt import create_key, verify_key
@app.route("/api/v2/local", methods=["POST"])
def v2_local():
work_log.debug(str(request.path))
try:
key = request.json.get("key")
if verify_key(key) and request.json.get("obj") == "local":
info = LocalTask(request.json.get("content"))
data = info.run()
return jsonify(data)
else:
work_log.error("req verify_key or obj error")
return "", 404
except Exception as e:
work_log.error("host run error")
work_log.error(str(e))
return jsonify({"recode": 9, "redata": str(e)})
| 32.458333 | 66 | 0.629012 |
e6db7aa360b8ac1c60898da851cea8ed33860a6e | 1,325 | py | Python | kuryr/tests/unit/binding/__init__.py | openstack/kuryr | 5b38e84ef8a0a62ee44d53ddd1cda377b0d9d934 | [
"Apache-2.0"
] | 207 | 2015-07-15T01:46:02.000Z | 2022-03-24T10:06:53.000Z | kuryr/tests/unit/binding/__init__.py | MaysaMacedo/kuryr | 34439895e3c17e5b7de9a72cafc411bb97230853 | [
"Apache-2.0"
] | 1 | 2015-10-29T14:59:06.000Z | 2015-10-29T14:59:06.000Z | kuryr/tests/unit/binding/__init__.py | MaysaMacedo/kuryr | 34439895e3c17e5b7de9a72cafc411bb97230853 | [
"Apache-2.0"
] | 58 | 2015-07-22T08:29:32.000Z | 2021-08-11T08:56:11.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import importutils
from kuryr.lib import binding
from kuryr.lib import exceptions
from kuryr.tests.unit import base
class TestBinding(base.TestCase):
"""Unit tests for binding module"""
def test__verify_driver(self):
cfg.CONF.set_override('enabled_drivers',
['kuryr.lib.binding.drivers.veth'],
group='binding')
driver = importutils.import_module('kuryr.lib.binding.drivers.veth')
binding._verify_driver(driver) # assert no exception raise
driver = importutils.import_module('kuryr.lib.binding.drivers.vlan')
self.assertRaises(exceptions.DriverNotEnabledException,
binding._verify_driver, driver)
| 40.151515 | 76 | 0.710943 |
7a259555bf6db4c028f73470849980701be80517 | 21,065 | py | Python | loveletter/game.py | ErikGartner/love-letter | 36995788292ea6fdfc72a8b09adad01ba6683c26 | [
"MIT"
] | null | null | null | loveletter/game.py | ErikGartner/love-letter | 36995788292ea6fdfc72a8b09adad01ba6683c26 | [
"MIT"
] | null | null | null | loveletter/game.py | ErikGartner/love-letter | 36995788292ea6fdfc72a8b09adad01ba6683c26 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Love Letter Game object
"""
import numpy as np
from loveletter.card import Card
from loveletter.player import PlayerTools, PlayerAction, PlayerActionTools
class Game():
"""A Love Letter Game"""
def __init__(self, deck, players, turn_index, action_log=[]):
self._deck = deck
self._players = players
self._turn_index = turn_index
self._action_log = action_log
total_playing = sum(
[1 for player in players if PlayerTools.is_playing(player)])
self._game_active = total_playing > 1 and self.cards_left() > 0
def players(self):
"""List of current players."""
return self._players[:]
def deck(self):
"""
List of current cards.
NOTE: The LAST card [-1] is always held out
"""
return self._deck
def draw_card(self):
"""
Card currently available to the next player.
Only valid if the game is not over (otherwise No Card)
"""
return self._deck[0] if len(self._deck) > 1 else Card.noCard
def held_card(self):
"""
Card withheld from the game
"""
return self._deck[-1]
def turn_index(self):
"""
Overall turn index of the game.
This points to the actual action number
"""
return self._turn_index
def round(self):
"""Current round number."""
return self._turn_index // len(self._players)
def player_turn(self):
"""Player number of current player."""
return self._turn_index % len(self._players)
def is_winner(self, idx):
"""True iff that player has won the game"""
if self.active():
return False
player = self._players[idx]
if not PlayerTools.is_playing(player):
return False
other_scores = [
p.hand_card > player.hand_card for p in self._players if PlayerTools.is_playing(p)]
return sum(other_scores) == 0
def winner(self):
"""Return the index of the winning player. -1 if none"""
for idx in range(len(self._players)):
if self.is_winner(idx):
return idx
return -1
def player(self):
"""Returns the current player"""
return self._players[self.player_turn()]
def opponents(self):
"""Returns the opposing players"""
return [player for idx, player in enumerate(self._players)
if idx != self.player_turn() and
PlayerTools.is_playing(player)]
def opponent_turn(self):
"""Returns the opposing players indices"""
return [idx for idx, player in enumerate(self._players)
if idx != self.player_turn() and
PlayerTools.is_playing(player)]
def cards_left(self):
"""
Number of cards left in deck to distribute
Does not include the held back card
"""
return len(self._deck) - 1
def active(self):
"""Return True if the game is still playing"""
return self._game_active
def over(self):
"""Return True if the game is over"""
return not self.active()
def is_current_player_playing(self):
"""True if the current player has not been eliminated"""
return PlayerTools.is_playing(self.player())
def skip_eliminated_player(self, throw=False):
"""If the current player is eliminated, skip to next"""
if self.is_current_player_playing():
return self
return self._move(PlayerActionTools.blank(), throw)
def state_hand(self):
"""
Grab whats in players hand and record it as a one hot encoded array.
The result is a 16 length binary one hot encoded array
"""
# whats in hand
card_number1 = self.player().hand_card
card_number2 = self.deck()[0]
cardnumbers = [card_number1, card_number2]
cardnumbers.sort()
# initialize arrays
card1 = np.zeros(8)
card2 = np.zeros(8)
# encode whats in hand to array
card1[cardnumbers[0] - 1] = 1
card2[cardnumbers[1] - 1] = 1
return np.concatenate([card1, card2])
def consumed_cards(self):
"""
Looks at discarded cards and returns probabilities of outstanding cards.
"""
cards_discarded = np.array([Game.player_to_discards(
player) for player in self.players()]).flatten()
cards_hand = [self.player().hand_card, self.deck()[0]]
cards_all = np.concatenate([cards_discarded, cards_hand])
card_bins = np.bincount(cards_all, minlength=9)[1:9]
card_fractions = card_bins / Card.counts
return card_fractions
@staticmethod
def player_to_discards(player):
"""Returns a list of all cards discarded by player"""
return [action.discard for action in player.actions]
def state(self):
"""
Combines player hand and remaining cards into one array.
returns numpy float 1d of length 24
"""
state = np.concatenate([self.state_hand(), self.consumed_cards(),
self.state_action_log()])
return state
def relative_player_idx(self, player_idx, observing_player):
"""
Takes a player index and returns as a relative index
to the observing player. The observing player
always sees itself as player 0.
"""
return (player_idx - observing_player) % len(self._players)
def absolute_player_idx(self, relative_player_idx, observing_player):
"""
Translates a relative player index to the absolute player index.
"""
return (relative_player_idx + observing_player) % len(self._players)
def state_action_log(self, observing_player=None):
"""
Creates the state representation of the action log.
"""
if observing_player is None:
observing_player = self.player_turn()
#print(self.player_turn())
log = list(reversed([self._action_to_np(action, observing_player)
for action in self._action_log]))
if len(log) == 0:
return np.zeros(15 * 88)
padded_log = np.pad(np.array(log), ((0, 15 - len(self._action_log)), (0, 0)),
'constant').flatten()
return padded_log
def _action_to_np(self, action, observing_player):
player = np.zeros(4)
# Relative player index (maybe?)
player_index = self.relative_player_idx(action.player,
observing_player)
player[player_index] = 1
played_card = np.zeros(8)
played_card[action.discard - 1] = 1
target = np.zeros(4)
target_index = self.relative_player_idx(action.player_target,
observing_player)
target[target_index] = 1
guessed_card = np.zeros(8)
if action.guess > 0:
guessed_card[action.guess - 1] = 1
force_discard = np.zeros(32)
if action.force_discarded > 0:
i = self.relative_player_idx(action.force_discarder, observing_player)
force_discard[i*8 + action.force_discarded-1] = 1
revealed_card = np.zeros(32)
if action.revealed_card > 0:
i = self.relative_player_idx(action.player_target, observing_player)
revealed_card[i*8 + action.revealed_card-1] = 1
log_bits = np.concatenate([player, played_card, target, guessed_card,
force_discard, revealed_card])
return log_bits
def _reward(self, game, action):
"""
Record current reward.
"""
if game.active():
if self.is_action_valid(action):
return 0
else:
return -1
elif game.winner() == self.turn_index():
return 30
return -10
def move(self, action, throw=False):
"""Current player makes an action.
Returns (NewGame and Reward)<Game,int>
"""
game = self._move(action)
return game, self._reward(game, action)
def _move(self, action, throw=False):
"""Current player makes an action.
Returns (NewGame and Reward)<Game,int>"""
if self.over() or not self.is_action_valid(action):
return self._invalid_input(throw)
# player is out, increment turn index
if action.discard == Card.noCard:
return Game(self.deck(), self.players(), self.turn_index() + 1,
self._action_log)
player = self.player()
player_hand = [player.hand_card, self._deck[0]]
player_hand_new = Game.new_hand_card(action.discard, player_hand)
deck_new = self._deck[1:]
# choosing to discard the princess ... is valid
if action.discard == Card.princess:
return self._move_princess(self._deck[0], action, deck_new)
# priest requires modification of action (knowledge)
if action.discard == Card.priest:
return self._move_priest(action, player_hand_new, deck_new)
# updated players for the next turn
player = PlayerTools.move(self.player(), player_hand_new, action)
current_players = Game._set_player(
self._players, player, self.player_turn())
if action.discard == Card.baron:
return self._move_baron(action, current_players, player_hand_new, deck_new)
# No other logic for handmaids or countess
if action.discard == Card.handmaid or \
action.discard == Card.countess:
action_updated = action._replace(player=self.player_turn())
return Game(deck_new, current_players, self._turn_index + 1,
[*self._action_log, action_updated])
if action.discard == Card.guard:
return self._move_guard(current_players, action, deck_new)
if action.discard == Card.prince:
return self._move_prince(current_players, action, deck_new)
if action.discard == Card.king:
return self._move_king(current_players, action, deck_new)
raise NotImplementedError("Missing game logic")
def _move_guard(self, current_players, action, deck_new):
"""
Handle a guard action into a new game state
Player makes a guess to try and eliminate the opponent
"""
if self._players[action.player_target].hand_card == action.guess and \
not PlayerTools.is_defended(self._players[action.player_target]):
# then target player is out
player_target = PlayerTools.force_discard(
self._players[action.player_target])
current_players = Game._set_player(
current_players, player_target, action.player_target)
action_updated = action._replace(player=self.player_turn())
return Game(deck_new, current_players, self._turn_index + 1,
[*self._action_log, action_updated])
def _move_priest(self, action, player_hand_new, deck_new):
"""
Handle a priest action into a new game state
Action gains knowledge of other player's card
"""
player_targets_card = Card.noCard if \
PlayerTools.is_defended(self._players[action.player_target]) \
else self._players[action.player_target].hand_card
action_updated = action._replace(player=self.player_turn(),
revealed_card=player_targets_card)
player = PlayerTools.move(
self.player(), player_hand_new, action_updated)
current_players = Game._set_player(
self._players, player, self.player_turn())
return Game(deck_new, current_players, self._turn_index + 1,
[*self._action_log, action_updated])
def _move_baron(self, action, current_players, player_hand_new, deck_new):
"""
Handle a baron action into a new game state
Player and target compare hand cards. Player with lower hand
card is eliminated
"""
card_target = self._players[action.player_target].hand_card
if player_hand_new > card_target:
if not PlayerTools.is_defended(self._players[action.player_target]):
# target is eliminated
player_target = PlayerTools.force_discard(
self._players[action.player_target])
current_players = Game._set_player(
current_players, player_target, action.player_target)
action_updated = action._replace(player=self.player_turn(),
force_discarded=card_target,
force_discarder=action.player_target)
else:
action_updated = action._replace(player=self.player_turn())
elif player_hand_new == card_target:
# Tie, nobody wins
action_updated = action._replace(player=self.player_turn(),
revealed_card=card_target)
else:
# player is eliminated
player = PlayerTools.force_discard(self.player(), player_hand_new)
player = PlayerTools.force_discard(player)
current_players = Game._set_player(
current_players, player, self.player_turn())
action_updated = action._replace(player=self.player_turn(),
force_discarded=player_hand_new,
force_discarder=action.player)
return Game(deck_new, current_players, self._turn_index + 1,
[*self._action_log, action_updated])
def _move_prince(self, current_players, action, deck_new):
"""Handle a prince action into a new game state"""
player_before_discard = current_players[action.player_target]
action_updated = action._replace(player=self.player_turn(),
force_discarded=player_before_discard.hand_card,
force_discarder=action.player_target)
# if there are no more cards, this has no effect
if len(deck_new) - 1 < 1:
return Game(deck_new, current_players, self._turn_index + 1,
[*self._action_log, action_updated])
if player_before_discard.hand_card == Card.princess:
player_post_discard = PlayerTools.force_discard(
player_before_discard)
deck_final = deck_new
else:
player_post_discard = PlayerTools.force_discard(
player_before_discard, deck_new[0])
deck_final = deck_new[1:]
current_players = Game._set_player(
current_players, player_post_discard, action.player_target)
return Game(deck_final, current_players, self._turn_index + 1,
[*self._action_log, action_updated])
def _move_king(self, current_players, action, deck_new):
"""Handle a king action into a new game state"""
player = current_players[self.player_turn()]
target = current_players[action.player_target]
player_new = PlayerTools.set_hand(player, target.hand_card)
target_new = PlayerTools.set_hand(target, player.hand_card)
current_players = Game._set_player(
current_players, player_new, self.player_turn())
current_players = Game._set_player(
current_players, target_new, action.player_target)
action_updated = action._replace(player=self.player_turn())
return Game(deck_new, current_players, self._turn_index + 1,
[*self._action_log, action_updated])
def _move_princess(self, dealt_card, action, new_deck):
"""Handle a princess action into a new game state"""
player = PlayerTools.force_discard(self.player(), dealt_card)
player = PlayerTools.force_discard(player)
current_players = Game._set_player(
self._players, player, self.player_turn())
action_updated = action._replace(player=self.player_turn())
return Game(new_deck, current_players, self._turn_index + 1,
[*self._action_log, action_updated])
def is_action_valid(self, action):
"""Tests if an action is valid given the current game state"""
player = self.player()
# if player is out, only valid action is no action
if player.hand_card == Card.noCard:
return PlayerActionTools.is_blank(action)
target_player = self._players[action.player_target]
player_hand = [player.hand_card, self._deck[0]]
# cannot discard a card not in the hand
if action.discard not in player_hand:
return False
new_hand_card = Game.new_hand_card(action.discard, player_hand)
# countess must be discarded if the other card is king/prince
if new_hand_card == Card.countess and \
(action.discard == Card.prince or action.discard == Card.king):
return False
# cannot target an invalid player
if not self._is_valid_player_target(action.player_target):
return False
# cannot mis-target a card
if self.player_turn() == action.player_target and action.discard in Card.only_other:
# Check if self is the only valid target due to everybody else protected (or dead)
other_players_invalid = [not PlayerTools.is_playing(p) or PlayerTools.is_defended(p)
for p in self._players if p is not self.player()]
if all(other_players_invalid):
return True
else:
return False
if self.player_turn() != action.player_target and action.discard in Card.only_self:
return False
if not PlayerTools.is_playing(target_player):
return False
# Check if target is defender (and not the current player)
if PlayerTools.is_defended(target_player) and player != target_player:
return False
# Cannot guess guard or no card
if action.discard == Card.guard and (
action.guess == Card.guard or action.guess == Card.noCard):
return False
return True
def _is_valid_player_target(self, player_target):
"""True iff the player can be targeted by an action"""
return PlayerTools.is_playing(self._players[player_target])
def _invalid_input(self, throw):
"""Throw if true, otherwise return current game"""
if throw:
raise Exception("Invalid Move")
return self
def to_str(self):
"""Returns a string[] representation of the game"""
strings = [
"" + ("━" * 79),
"Game is active" if self.active() else "Game is over",
"Round:{: >2} | Cards Left:{: >2} | Withheld Card: {: >10} ".format(
self.round(), self.cards_left(), Card.render_card_number(self.held_card())),
""
]
for idx, player in enumerate(self._players):
strings += self._to_str_player(idx, player)
strings += [""]
return strings
def _to_str_player(self, idx, player):
is_playing = " " if PlayerTools.is_playing(player) else "☠️"
is_turn = "⭐" if self.player_turn() == idx else " "
draw_card = self.draw_card() if self.active(
) and self.player_turn() == idx else Card.noCard
draw_card_render = Card.render_card_number(draw_card)
header = "Player {} {} {}".format(idx, is_turn, is_playing)
state = " Current: {} {}".format(
draw_card_render, PlayerTools.to_str(player))
return [header, state]
@staticmethod
def _set_player(players, player_new, player_new_index):
"""Return a fresh copy of players with the new player in the index"""
players_new = players[:]
players_new[player_new_index] = player_new
return players_new
@staticmethod
def new_hand_card(card_discard, hand):
"""New hand card based on current cards in hand"""
new_hand = list(filter(lambda card: card != card_discard, hand))
if len(new_hand) < 1:
# this means the hand contained only one card. so one still remains
return card_discard
return int(new_hand[0])
@staticmethod
def new(player_count=4, seed=451):
"""Create a brand new game"""
deck = Card.shuffle_deck(seed)
dealt_cards = deck[:player_count]
undealt_cards = deck[player_count:]
players = list(map(PlayerTools.blank, dealt_cards))
return Game(undealt_cards, players, 0, [])
| 37.415631 | 96 | 0.609352 |
29147289bf39386e0a135df40adfed5bd69b9a16 | 1,830 | py | Python | src/pdc2/scripts/taxon_name_subst.py | jlanga/smsk_selection | 08070c6d4a6fbd9320265e1e698c95ba80f81123 | [
"MIT"
] | 4 | 2021-07-18T05:20:20.000Z | 2022-01-03T10:22:33.000Z | src/pdc2/scripts/taxon_name_subst.py | jlanga/smsk_selection | 08070c6d4a6fbd9320265e1e698c95ba80f81123 | [
"MIT"
] | 1 | 2017-08-21T07:26:13.000Z | 2018-11-08T13:59:48.000Z | src/pdc2/scripts/taxon_name_subst.py | jlanga/smsk_orthofinder | 08070c6d4a6fbd9320265e1e698c95ba80f81123 | [
"MIT"
] | 2 | 2021-07-18T05:20:26.000Z | 2022-03-31T18:23:31.000Z | import sys,os
import phylo3,newick3
import tree_utils
"""
to change sequence names with taxa names and make trees more readable,
and output a new file named infile.names
Create a tabular file that each line contains
code taxon_name
separated by tab
"""
if __name__ == "__main__":
if len(sys.argv) != 3:
print "python taxon_name_subst.py table treefile"
sys.exit(0)
DICT = {} #key is seq acronym, value is full taxon name, separated by tab
with open(sys.argv[1], "rU") as infile:
for line in infile:
spls = line.strip().split("\t")
if len(spls) > 1:
DICT[spls[0]] = spls[1]
print DICT
#DIR = sys.argv[2]+"/"
#for i in os.listdir(DIR):
treefile = sys.argv[2]
"""
#for alignments in fasta format
infile = open(DIR+i,"r")
outfile = open(DIR+i+".names","w")
for line in infile:
if line[0] == ">":
if "@" in line: #for homolog alignments
spls = (line[1:].strip()).split("@")
taxonID, seqID = spls[0],spls[1]
outfile.write('>'+DICT[taxonID]+"@"+seqID+"\n")
else: #for ortho and species alignments
id = line[1:].strip()
if id in DICT:
outfile.write('>'+DICT[id]+"\n")
else: outfile.write(line)
else: #no change
outfile.write(line)
infile.close()
outfile.close()
"""
with open(treefile,"r") as infile:
intree = newick3.parse(infile.readline())
for i in intree.iternodes():
if i.istip:
print i.label
if "@" in i.label: #for homolog trees
spls = (i.label).split("@")
taxonID, seqID = spls[0],spls[1]
i.label = taxonID+"_"+DICT[taxonID]+"@"+seqID
else: #for ortho and species trees with seqID removed
try:
i.label += "_"+DICT[i.label]
#i.label = DICT[i.label]
except:
print i.lable,"not in the taxon table provided"
with open(treefile+".name","w") as outfile:
outfile.write(newick3.tostring(intree)+";\n")
| 26.521739 | 74 | 0.642623 |
fce9947a74af7090a82dad2dbcd8cdb5083b09fd | 11,428 | py | Python | web-service/main.py | emergentdevices/physical-web | e5f8cc6a56b04bc39f4dd78859ed0779105bbf49 | [
"Apache-2.0"
] | null | null | null | web-service/main.py | emergentdevices/physical-web | e5f8cc6a56b04bc39f4dd78859ed0779105bbf49 | [
"Apache-2.0"
] | null | null | null | web-service/main.py | emergentdevices/physical-web | e5f8cc6a56b04bc39f4dd78859ed0779105bbf49 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import json
import logging
from datetime import datetime, timedelta
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
from urlparse import urljoin
from urlparse import urlparse
import os
import re
from lxml import etree
import cgi
from google.appengine.api import urlfetch_errors
class BaseModel(ndb.Model):
added_on = ndb.DateTimeProperty(auto_now_add = True)
updated_on = ndb.DateTimeProperty(auto_now = True)
class SiteInformation(BaseModel):
url = ndb.TextProperty()
favicon_url = ndb.TextProperty()
title = ndb.TextProperty()
description = ndb.TextProperty()
jsonlds = ndb.TextProperty()
class DemoMetadata(webapp2.RequestHandler):
def get(self):
objects = [
{'url': 'http://www.caltrain.com/schedules/realtime/stations/mountainviewstation-mobile.html'},
{'url': 'http://benfry.com/distellamap/'},
{'url': 'http://en.wikipedia.org/wiki/Le_D%C3%A9jeuner_sur_l%E2%80%99herbe'},
{'url': 'http://sfmoma.org'}
]
metadata_output = BuildResponse(objects)
output = {
"metadata": metadata_output
}
self.response.headers['Content-Type'] = 'application/json'
json_data = json.dumps(output);
self.response.write(json_data)
class ResolveScan(webapp2.RequestHandler):
def post(self):
input_data = self.request.body
input_object = json.loads(input_data) # Data is not sanitised.
if "objects" in input_object:
objects = input_object["objects"]
else:
objects = []
metadata_output = BuildResponse(objects)
output = {
"metadata": metadata_output
}
self.response.headers['Content-Type'] = 'application/json'
json_data = json.dumps(output);
self.response.write(json_data)
def BuildResponse(objects):
metadata_output = []
# Resolve the devices
for obj in objects:
key_id = None
url = None
force = False
valid = True
siteInfo = None
if "id" in obj:
key_id = obj["id"]
elif "url" in obj:
key_id = obj["url"]
url = obj["url"]
parsed_url = urlparse(url)
if parsed_url.scheme != 'http' and parsed_url.scheme != 'https':
valid = False
if "force" in obj:
force = True
# We need to go and fetch. We probably want to asyncly fetch.
# We don't need RSSI yet.
#rssi = obj["rssi"]
if valid:
# Really if we don't have the data we should not return it.
siteInfo = SiteInformation.get_by_id(url)
if force or siteInfo is None or siteInfo.updated_on < datetime.now() - timedelta(minutes=5):
# If we don't have the data or it is older than 5 minutes, fetch.
siteInfo = FetchAndStoreUrl(siteInfo, url)
device_data = {};
if siteInfo is not None:
device_data["id"] = url
device_data["url"] = siteInfo.url
if siteInfo.title is not None:
device_data["title"] = siteInfo.title
if siteInfo.description is not None:
device_data["description"] = siteInfo.description
if siteInfo.favicon_url is not None:
device_data["icon"] = siteInfo.favicon_url
if siteInfo.jsonlds is not None:
device_data["json-ld"] = json.loads(siteInfo.jsonlds)
else:
device_data["id"] = url
device_data["url"] = url
metadata_output.append(device_data)
return metadata_output
def FetchAndStoreUrl(siteInfo, url):
# Index the page
try:
result = urlfetch.fetch(url, validate_certificate = True)
except urlfetch_errors.DeadlineExceededError:
return None
if result.status_code == 200:
encoding = GetContentEncoding(result.content)
final_url = GetExpandedURL(url)
return StoreUrl(siteInfo, url, final_url, result.content, encoding)
def GetExpandedURL(url):
parsed_url = urlparse(url)
final_url = url
url_shorteners = ['t.co', 'goo.gl', 'bit.ly', 'j.mp', 'bitly.com',
'amzn.to', 'fb.com', 'bit.do', 'adf.ly', 'u.to', 'tinyurl.com',
'buzurl.com', 'yourls.org', 'qr.net']
url_shorteners_set = set(url_shorteners)
if parsed_url.netloc in url_shorteners_set and (parsed_url.path != '/' or
parsed_url.path != ''):
# expand
result = urlfetch.fetch(url, method = 'HEAD', follow_redirects = False)
if result.status_code == 301:
final_url = result.headers['location']
return final_url
def GetContentEncoding(content):
encoding = None
parser = etree.HTMLParser(encoding='iso-8859-1')
htmltree = etree.fromstring(content, parser)
value = htmltree.xpath("//head//meta[@http-equiv='Content-Type']/attribute::content")
if encoding is None:
if (len(value) > 0):
content_type = value[0]
_, params = cgi.parse_header(content_type)
if 'charset' in params:
encoding = params['charset']
if encoding is None:
value = htmltree.xpath("//head//meta/attribute::charset")
if (len(value) > 0):
encoding = value[0]
if encoding is None:
try:
encoding = 'utf-8'
u_value = unicode(content, 'utf-8')
except UnicodeDecodeError:
encoding = 'iso-8859-1'
u_value = unicode(content, 'iso-8859-1')
return encoding
def FlattenString(input):
input = input.strip()
input = input.replace("\r", " ");
input = input.replace("\n", " ");
input = input.replace("\t", " ");
input = input.replace("\v", " ");
input = input.replace("\f", " ");
while " " in input:
input = input.replace(" ", " ");
return input
def StoreUrl(siteInfo, url, final_url, content, encoding):
title = None
description = None
icon = None
# parse the content
parser = etree.HTMLParser(encoding=encoding)
htmltree = etree.fromstring(content, parser)
value = htmltree.xpath("//head//title/text()");
if (len(value) > 0):
title = value[0]
if title is None:
value = htmltree.xpath("//head//meta[@property='og:title']/attribute::content");
if (len(value) > 0):
title = value[0]
if title is not None:
title = FlattenString(title)
# Try to use <meta name="description" content="...">.
value = htmltree.xpath("//head//meta[@name='description']/attribute::content")
if (len(value) > 0):
description = value[0]
if description is not None and len(description) == 0:
description = None
if description == title:
description = None
# Try to use <meta property="og:description" content="...">.
if description is None:
value = htmltree.xpath("//head//meta[@property='og:description']/attribute::content")
description = ' '.join(value)
if len(description) == 0:
description = None
# Try to use <div class="content">...</div>.
if description is None:
value = htmltree.xpath("//body//*[@class='content']//*[not(*|self::script|self::style)]/text()")
description = ' '.join(value)
if len(description) == 0:
description = None
# Try to use <div id="content">...</div>.
if description is None:
value = htmltree.xpath("//body//*[@id='content']//*[not(*|self::script|self::style)]/text()")
description = ' '.join(value)
if len(description) == 0:
description = None
# Fallback on <body>...</body>.
if description is None:
value = htmltree.xpath("//body//*[not(*|self::script|self::style)]/text()")
description = ' '.join(value)
if len(description) == 0:
description = None
# Cleanup.
if description is not None:
description = FlattenString(description)
if len(description) > 500:
description = description[:500]
if icon is None:
value = htmltree.xpath("//head//link[@rel='shortcut icon']/attribute::href");
if (len(value) > 0):
icon = value[0]
if icon is None:
value = htmltree.xpath("//head//link[@rel='icon']/attribute::href");
if (len(value) > 0):
icon = value[0]
if icon is None:
value = htmltree.xpath("//head//link[@rel='apple-touch-icon-precomposed']/attribute::href");
if (len(value) > 0):
icon = value[0]
if icon is None:
value = htmltree.xpath("//head//link[@rel='apple-touch-icon']/attribute::href");
if (len(value) > 0):
icon = value[0]
if icon is None:
value = htmltree.xpath("//head//meta[@property='og:image']/attribute::content");
if (len(value) > 0):
icon = value[0]
if icon is not None:
icon = urljoin(final_url, icon)
if icon is None:
icon = urljoin(final_url, "/favicon.ico")
# make sure the icon exists
result = urlfetch.fetch(icon, method = 'HEAD')
if result.status_code != 200:
icon = None
jsonlds = []
value = htmltree.xpath("//head//script[@type='application/ld+json']/text()");
for jsonldtext in value:
jsonldobject = None
try:
jsonldobject = json.loads(jsonldtext) # Data is not sanitised.
except UnicodeDecodeError:
jsonldobject = None
if jsonldobject is not None:
jsonlds.append(jsonldobject)
if (len(jsonlds) > 0):
jsonlds_data = json.dumps(jsonlds);
logging.info(jsonlds_data)
else:
jsonlds_data = None
if siteInfo is None:
siteInfo = SiteInformation.get_or_insert(url,
url = final_url,
title = title,
favicon_url = icon,
description = description,
jsonlds = jsonlds_data)
else:
# update the data because it already exists
siteInfo.url = final_url
siteInfo.title = title
siteInfo.favicon_url = icon
siteInfo.description = description
siteInfo.jsonlds = jsonlds_data
siteInfo.put()
return siteInfo
class Index(webapp2.RequestHandler):
def get(self):
self.response.out.write("")
app = webapp2.WSGIApplication([
('/', Index),
('/resolve-scan', ResolveScan),
('/demo', DemoMetadata)
], debug=True)
| 34.215569 | 108 | 0.587592 |
98040cbc1c7deecbf212228e3e4aecc3130da852 | 28,250 | py | Python | env/lib/python3.8/site-packages/Crypto/PublicKey/RSA.py | juansjimenez/goodocity-backend | 77b2ab3f11047e2896e81358b8d8c63d7952b521 | [
"MIT"
] | 1 | 2020-09-26T02:27:05.000Z | 2020-09-26T02:27:05.000Z | env/lib/python3.8/site-packages/Crypto/PublicKey/RSA.py | juansjimenez/goodocity-backend | 77b2ab3f11047e2896e81358b8d8c63d7952b521 | [
"MIT"
] | 12 | 2021-04-11T19:46:06.000Z | 2021-06-18T16:08:37.000Z | env/lib/python3.8/site-packages/Crypto/PublicKey/RSA.py | juansjimenez/goodocity-backend | 77b2ab3f11047e2896e81358b8d8c63d7952b521 | [
"MIT"
] | 1 | 2018-07-06T03:48:08.000Z | 2018-07-06T03:48:08.000Z | # ===================================================================
#
# Copyright (c) 2016, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""RSA public-key cryptography algorithm (signature and encryption).
RSA_ is the most widespread and used public key algorithm. Its security is
based on the difficulty of factoring large integers. The algorithm has
withstood attacks for 30 years, and it is therefore considered reasonably
secure for new designs.
The algorithm can be used for both confidentiality (encryption) and
authentication (digital signature). It is worth noting that signing and
decryption are significantly slower than verification and encryption.
The cryptograhic strength is primarily linked to the length of the modulus *n*.
In 2012, a sufficient length is deemed to be 2048 bits. For more information,
see the most recent ECRYPT_ report.
Both RSA ciphertext and RSA signature are as big as the modulus *n* (256
bytes if *n* is 2048 bit long).
This module provides facilities for generating fresh, new RSA keys,
constructing them from known components, exporting them, and importing them.
>>> from Crypto.PublicKey import RSA
>>>
>>> key = RSA.generate(2048)
>>> f = open('mykey.pem','w')
>>> f.write(key.exportKey('PEM'))
>>> f.close()
...
>>> f = open('mykey.pem','r')
>>> key = RSA.import_key(f.read())
Even though you may choose to directly use the methods of an RSA key object
to perform the primitive cryptographic operations (e.g. `RsaKey._encrypt`),
it is recommended to use one of the standardized schemes instead (like
`Crypto.Cipher.PKCS1_v1_5` or `Crypto.Signature.PKCS1_v1_5`).
.. _RSA: http://en.wikipedia.org/wiki/RSA_%28algorithm%29
.. _ECRYPT: http://www.ecrypt.eu.org/documents/D.SPA.17.pdf
:sort: generate,construct,import_key
"""
__all__ = ['generate', 'construct', 'import_key',
'RsaKey', 'oid']
import binascii
import struct
from Crypto import Random
from Crypto.IO import PKCS8, PEM
from Crypto.Util.py3compat import tobytes, bord, bchr, b, tostr
from Crypto.Util.asn1 import DerSequence
from Crypto.Math.Numbers import Integer
from Crypto.Math.Primality import (test_probable_prime,
generate_probable_prime, COMPOSITE)
from Crypto.PublicKey import (_expand_subject_public_key_info,
_create_subject_public_key_info,
_extract_subject_public_key_info)
class RsaKey(object):
"""Class defining an actual RSA key.
:undocumented: __init__, __repr__, __getstate__, __eq__, __ne__, __str__,
sign, verify, encrypt, decrypt, blind, unblind, size
"""
def __init__(self, **kwargs):
"""Build an RSA key.
:Keywords:
n : integer
The modulus.
e : integer
The public exponent.
d : integer
The private exponent. Only required for private keys.
p : integer
The first factor of the modulus. Only required for private keys.
q : integer
The second factor of the modulus. Only required for private keys.
u : integer
The CRT coefficient (inverse of p modulo q). Only required for
privta keys.
"""
input_set = set(kwargs.keys())
public_set = set(('n', 'e'))
private_set = public_set | set(('p', 'q', 'd', 'u'))
if input_set not in (private_set, public_set):
raise ValueError("Some RSA components are missing")
for component, value in list(kwargs.items()):
setattr(self, "_" + component, value)
@property
def n(self):
"""Modulus"""
return int(self._n)
@property
def e(self):
"""Public exponent"""
return int(self._e)
@property
def d(self):
"""Private exponent"""
if not self.has_private():
raise AttributeError("No private exponent available for public keys")
return int(self._d)
@property
def p(self):
"""First factor of the modulus"""
if not self.has_private():
raise AttributeError("No CRT component 'p' available for public keys")
return int(self._p)
@property
def q(self):
"""Second factor of the modulus"""
if not self.has_private():
raise AttributeError("No CRT component 'q' available for public keys")
return int(self._q)
@property
def u(self):
"""Chinese remainder component (inverse of *p* modulo *q*)"""
if not self.has_private():
raise AttributeError("No CRT component 'u' available for public keys")
return int(self._u)
def size_in_bits(self):
"""Size of the RSA modulus in bits"""
return self._n.size_in_bits()
def size_in_bytes(self):
"""The minimal amount of bytes that can hold the RSA modulus"""
return (self._n.size_in_bits() - 1) // 8 + 1
def _encrypt(self, plaintext):
if not 0 < plaintext < self._n:
raise ValueError("Plaintext too large")
return int(pow(Integer(plaintext), self._e, self._n))
def _decrypt(self, ciphertext):
if not 0 < ciphertext < self._n:
raise ValueError("Ciphertext too large")
if not self.has_private():
raise TypeError("This is not a private key")
# Blinded RSA decryption (to prevent timing attacks):
# Step 1: Generate random secret blinding factor r,
# such that 0 < r < n-1
r = Integer.random_range(min_inclusive=1, max_exclusive=self._n)
# Step 2: Compute c' = c * r**e mod n
cp = Integer(ciphertext) * pow(r, self._e, self._n) % self._n
# Step 3: Compute m' = c'**d mod n (ordinary RSA decryption)
m1 = pow(cp, self._d % (self._p - 1), self._p)
m2 = pow(cp, self._d % (self._q - 1), self._q)
h = m2 - m1
while h < 0:
h += self._q
h = (h * self._u) % self._q
mp = h * self._p + m1
# Step 4: Compute m = m**(r-1) mod n
result = (r.inverse(self._n) * mp) % self._n
# Verify no faults occured
if ciphertext != pow(result, self._e, self._n):
raise ValueError("Fault detected in RSA decryption")
return result
def has_private(self):
return hasattr(self, "_d")
def can_encrypt(self):
return True
def can_sign(self):
return True
def publickey(self):
return RsaKey(n=self._n, e=self._e)
def __eq__(self, other):
if self.has_private() != other.has_private():
return False
if self.n != other.n or self.e != other.e:
return False
if not self.has_private():
return True
return (self.d == other.d and
self.q == other.q and
self.p == other.p and
self.u == other.u)
def __ne__(self, other):
return not (self == other)
def __getstate__(self):
# RSA key is not pickable
from pickle import PicklingError
raise PicklingError
def __repr__(self):
if self.has_private():
extra = ", d=%d, p=%d, q=%d, u=%d" % (int(self._d), int(self._p),
int(self._q), int(self._u))
else:
extra = ""
return "RsaKey(n=%d, e=%d%s)" % (int(self._n), int(self._e), extra)
def __str__(self):
if self.has_private():
key_type = "Private"
else:
key_type = "Public"
return "%s RSA key at 0x%X" % (key_type, id(self))
def exportKey(self, format='PEM', passphrase=None, pkcs=1,
protection=None, randfunc=None):
"""Export this RSA key.
:Parameters:
format : string
The format to use for wrapping the key:
- *'DER'*. Binary encoding.
- *'PEM'*. Textual encoding, done according to `RFC1421`_/`RFC1423`_.
- *'OpenSSH'*. Textual encoding, done according to OpenSSH specification.
Only suitable for public keys (not private keys).
passphrase : string
For private keys only. The pass phrase used for deriving the encryption
key.
pkcs : integer
For *DER* and *PEM* format only.
The PKCS standard to follow for assembling the components of the key.
You have two choices:
- **1** (default): the public key is embedded into
an X.509 ``SubjectPublicKeyInfo`` DER SEQUENCE.
The private key is embedded into a `PKCS#1`_
``RSAPrivateKey`` DER SEQUENCE.
- **8**: the private key is embedded into a `PKCS#8`_
``PrivateKeyInfo`` DER SEQUENCE. This value cannot be used
for public keys.
protection : string
The encryption scheme to use for protecting the private key.
If ``None`` (default), the behavior depends on ``format``:
- For *DER*, the *PBKDF2WithHMAC-SHA1AndDES-EDE3-CBC*
scheme is used. The following operations are performed:
1. A 16 byte Triple DES key is derived from the passphrase
using `Crypto.Protocol.KDF.PBKDF2` with 8 bytes salt,
and 1 000 iterations of `Crypto.Hash.HMAC`.
2. The private key is encrypted using CBC.
3. The encrypted key is encoded according to PKCS#8.
- For *PEM*, the obsolete PEM encryption scheme is used.
It is based on MD5 for key derivation, and Triple DES for encryption.
Specifying a value for ``protection`` is only meaningful for PKCS#8
(that is, ``pkcs=8``) and only if a pass phrase is present too.
The supported schemes for PKCS#8 are listed in the
`Crypto.IO.PKCS8` module (see ``wrap_algo`` parameter).
randfunc : callable
A function that provides random bytes. Only used for PEM encoding.
The default is `Crypto.Random.get_random_bytes`.
:Return: A byte string with the encoded public or private half
of the key.
:Raise ValueError:
When the format is unknown or when you try to encrypt a private
key with *DER* format and PKCS#1.
:attention:
If you don't provide a pass phrase, the private key will be
exported in the clear!
.. _RFC1421: http://www.ietf.org/rfc/rfc1421.txt
.. _RFC1423: http://www.ietf.org/rfc/rfc1423.txt
.. _`PKCS#1`: http://www.ietf.org/rfc/rfc3447.txt
.. _`PKCS#8`: http://www.ietf.org/rfc/rfc5208.txt
"""
if passphrase is not None:
passphrase = tobytes(passphrase)
if randfunc is None:
randfunc = Random.get_random_bytes
if format == 'OpenSSH':
e_bytes, n_bytes = [x.to_bytes() for x in (self._e, self._n)]
if bord(e_bytes[0]) & 0x80:
e_bytes = bchr(0) + e_bytes
if bord(n_bytes[0]) & 0x80:
n_bytes = bchr(0) + n_bytes
keyparts = [b('ssh-rsa'), e_bytes, n_bytes]
keystring = b('').join([struct.pack(">I", len(kp)) + kp for kp in keyparts])
return b('ssh-rsa ') + binascii.b2a_base64(keystring)[:-1]
# DER format is always used, even in case of PEM, which simply
# encodes it into BASE64.
if self.has_private():
binary_key = DerSequence([0,
self.n,
self.e,
self.d,
self.p,
self.q,
self.d % (self.p-1),
self.d % (self.q-1),
Integer(self.q).inverse(self.p)
]).encode()
if pkcs == 1:
key_type = 'RSA PRIVATE KEY'
if format == 'DER' and passphrase:
raise ValueError("PKCS#1 private key cannot be encrypted")
else: # PKCS#8
if format == 'PEM' and protection is None:
key_type = 'PRIVATE KEY'
binary_key = PKCS8.wrap(binary_key, oid, None)
else:
key_type = 'ENCRYPTED PRIVATE KEY'
if not protection:
protection = 'PBKDF2WithHMAC-SHA1AndDES-EDE3-CBC'
binary_key = PKCS8.wrap(binary_key, oid,
passphrase, protection)
passphrase = None
else:
key_type = "RSA PUBLIC KEY"
binary_key = _create_subject_public_key_info(oid,
DerSequence([self.n,
self.e])
)
if format == 'DER':
return binary_key
if format == 'PEM':
pem_str = PEM.encode(binary_key, key_type, passphrase, randfunc)
return tobytes(pem_str)
raise ValueError("Unknown key format '%s'. Cannot export the RSA key." % format)
# Methods defined in PyCrypto that we don't support anymore
def sign(self, M, K):
raise NotImplementedError("Use module Crypto.Signature.pkcs1_15 instead")
def verify(self, M, signature):
raise NotImplementedError("Use module Crypto.Signature.pkcs1_15 instead")
def encrypt(self, plaintext, K):
raise NotImplementedError("Use module Crypto.Cipher.PKCS1_OAEP instead")
def decrypt(self, ciphertext):
raise NotImplementedError("Use module Crypto.Cipher.PKCS1_OAEP instead")
def blind(self, M, B):
raise NotImplementedError
def unblind(self, M, B):
raise NotImplementedError
def size():
raise NotImplementedError
def generate(bits, randfunc=None, e=65537):
"""Create a new RSA key.
The algorithm closely follows NIST `FIPS 186-4`_ in its
sections B.3.1 and B.3.3. The modulus is the product of
two non-strong probable primes.
Each prime passes a suitable number of Miller-Rabin tests
with random bases and a single Lucas test.
:Parameters:
bits : integer
Key length, or size (in bits) of the RSA modulus.
It must be at least 1024.
The FIPS standard only defines 1024, 2048 and 3072.
randfunc : callable
Function that returns random bytes.
The default is `Crypto.Random.get_random_bytes`.
e : integer
Public RSA exponent. It must be an odd positive integer.
It is typically a small number with very few ones in its
binary representation.
The FIPS standard requires the public exponent to be
at least 65537 (the default).
:Return: An RSA key object (`RsaKey`).
.. _FIPS 186-4: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
"""
if bits < 1024:
raise ValueError("RSA modulus length must be >= 1024")
if e % 2 == 0 or e < 3:
raise ValueError("RSA public exponent must be a positive, odd integer larger than 2.")
if randfunc is None:
randfunc = Random.get_random_bytes
d = n = Integer(1)
e = Integer(e)
while n.size_in_bits() != bits and d < (1 << (bits // 2)):
# Generate the prime factors of n: p and q.
# By construciton, their product is always
# 2^{bits-1} < p*q < 2^bits.
size_q = bits // 2
size_p = bits - size_q
min_p = min_q = (Integer(1) << (2 * size_q - 1)).sqrt()
if size_q != size_p:
min_p = (Integer(1) << (2 * size_p - 1)).sqrt()
def filter_p(candidate):
return candidate > min_p and (candidate - 1).gcd(e) == 1
p = generate_probable_prime(exact_bits=size_p,
randfunc=randfunc,
prime_filter=filter_p)
min_distance = Integer(1) << (bits // 2 - 100)
def filter_q(candidate):
return (candidate > min_q and
(candidate - 1).gcd(e) == 1 and
abs(candidate - p) > min_distance)
q = generate_probable_prime(exact_bits=size_q,
randfunc=randfunc,
prime_filter=filter_q)
n = p * q
lcm = (p - 1).lcm(q - 1)
d = e.inverse(lcm)
if p > q:
p, q = q, p
u = p.inverse(q)
return RsaKey(n=n, e=e, d=d, p=p, q=q, u=u)
def construct(rsa_components, consistency_check=True):
"""Construct an RSA key from a tuple of valid RSA components.
The modulus **n** must be the product of two primes.
The public exponent **e** must be odd and larger than 1.
In case of a private key, the following equations must apply:
- e != 1
- p*q = n
- e*d = 1 mod lcm[(p-1)(q-1)]
- p*u = 1 mod q
:Parameters:
rsa_components : tuple
A tuple of long integers, with at least 2 and no
more than 6 items. The items come in the following order:
1. RSA modulus (*n*).
2. Public exponent (*e*).
3. Private exponent (*d*).
Only required if the key is private.
4. First factor of *n* (*p*).
Optional, but factor q must also be present.
5. Second factor of *n* (*q*). Optional.
6. CRT coefficient, *(1/p) mod q* (*u*). Optional.
consistency_check : boolean
If *True*, the library will verify that the provided components
fulfil the main RSA properties.
:Raise ValueError:
When the key being imported fails the most basic RSA validity checks.
:Return: An RSA key object (`RsaKey`).
"""
class InputComps(object):
pass
input_comps = InputComps()
for (comp, value) in zip(('n', 'e', 'd', 'p', 'q', 'u'), rsa_components):
setattr(input_comps, comp, Integer(value))
n = input_comps.n
e = input_comps.e
if not hasattr(input_comps, 'd'):
key = RsaKey(n=n, e=e)
else:
d = input_comps.d
if hasattr(input_comps, 'q'):
p = input_comps.p
q = input_comps.q
else:
# Compute factors p and q from the private exponent d.
# We assume that n has no more than two factors.
# See 8.2.2(i) in Handbook of Applied Cryptography.
ktot = d * e - 1
# The quantity d*e-1 is a multiple of phi(n), even,
# and can be represented as t*2^s.
t = ktot
while t % 2 == 0:
t //= 2
# Cycle through all multiplicative inverses in Zn.
# The algorithm is non-deterministic, but there is a 50% chance
# any candidate a leads to successful factoring.
# See "Digitalized Signatures and Public Key Functions as Intractable
# as Factorization", M. Rabin, 1979
spotted = False
a = Integer(2)
while not spotted and a < 100:
k = Integer(t)
# Cycle through all values a^{t*2^i}=a^k
while k < ktot:
cand = pow(a, k, n)
# Check if a^k is a non-trivial root of unity (mod n)
if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1:
# We have found a number such that (cand-1)(cand+1)=0 (mod n).
# Either of the terms divides n.
p = Integer(n).gcd(cand + 1)
spotted = True
break
k *= 2
# This value was not any good... let's try another!
a += 2
if not spotted:
raise ValueError("Unable to compute factors p and q from exponent d.")
# Found !
assert ((n % p) == 0)
q = n // p
if hasattr(input_comps, 'u'):
u = input_comps.u
else:
u = p.inverse(q)
# Build key object
key = RsaKey(n=n, e=e, d=d, p=p, q=q, u=u)
# Very consistency of the key
fmt_error = False
if consistency_check:
# Modulus and public exponent must be coprime
fmt_error = e <= 1 or e >= n
fmt_error |= Integer(n).gcd(e) != 1
# For RSA, modulus must be odd
fmt_error |= not n & 1
if not fmt_error and key.has_private():
# Modulus and private exponent must be coprime
fmt_error = d <= 1 or d >= n
fmt_error |= Integer(n).gcd(d) != 1
# Modulus must be product of 2 primes
fmt_error |= (p * q != n)
fmt_error |= test_probable_prime(p) == COMPOSITE
fmt_error |= test_probable_prime(q) == COMPOSITE
# See Carmichael theorem
phi = (p - 1) * (q - 1)
lcm = phi // (p - 1).gcd(q - 1)
fmt_error |= (e * d % int(lcm)) != 1
if hasattr(key, 'u'):
# CRT coefficient
fmt_error |= u <= 1 or u >= q
fmt_error |= (p * u % q) != 1
else:
fmt_error = True
if fmt_error:
raise ValueError("Invalid RSA key components")
return key
def _import_pkcs1_private(encoded, *kwargs):
# RSAPrivateKey ::= SEQUENCE {
# version Version,
# modulus INTEGER, -- n
# publicExponent INTEGER, -- e
# privateExponent INTEGER, -- d
# prime1 INTEGER, -- p
# prime2 INTEGER, -- q
# exponent1 INTEGER, -- d mod (p-1)
# exponent2 INTEGER, -- d mod (q-1)
# coefficient INTEGER -- (inverse of q) mod p
# }
#
# Version ::= INTEGER
der = DerSequence().decode(encoded, nr_elements=9, only_ints_expected=True)
if der[0] != 0:
raise ValueError("No PKCS#1 encoding of an RSA private key")
return construct(der[1:6] + [Integer(der[4]).inverse(der[5])])
def _import_pkcs1_public(encoded, *kwargs):
# RSAPublicKey ::= SEQUENCE {
# modulus INTEGER, -- n
# publicExponent INTEGER -- e
# }
der = DerSequence().decode(encoded, nr_elements=2, only_ints_expected=True)
return construct(der)
def _import_subjectPublicKeyInfo(encoded, *kwargs):
algoid, encoded_key, params = _expand_subject_public_key_info(encoded)
if algoid != oid or params is not None:
raise ValueError("No RSA subjectPublicKeyInfo")
return _import_pkcs1_public(encoded_key)
def _import_x509_cert(encoded, *kwargs):
sp_info = _extract_subject_public_key_info(encoded)
return _import_subjectPublicKeyInfo(sp_info)
def _import_pkcs8(encoded, passphrase):
k = PKCS8.unwrap(encoded, passphrase)
if k[0] != oid:
raise ValueError("No PKCS#8 encoded RSA key")
return _import_keyDER(k[1], passphrase)
def _import_keyDER(extern_key, passphrase):
"""Import an RSA key (public or private half), encoded in DER form."""
decodings = (_import_pkcs1_private,
_import_pkcs1_public,
_import_subjectPublicKeyInfo,
_import_x509_cert,
_import_pkcs8)
for decoding in decodings:
try:
return decoding(extern_key, passphrase)
except ValueError:
pass
raise ValueError("RSA key format is not supported")
def import_key(extern_key, passphrase=None):
"""Import an RSA key (public or private half), encoded in standard
form.
:Parameter extern_key:
The RSA key to import, encoded as a byte string.
An RSA public key can be in any of the following formats:
- X.509 certificate (binary or PEM format)
- X.509 ``subjectPublicKeyInfo`` DER SEQUENCE (binary or PEM
encoding)
- `PKCS#1`_ ``RSAPublicKey`` DER SEQUENCE (binary or PEM encoding)
- OpenSSH (textual public key only)
An RSA private key can be in any of the following formats:
- PKCS#1 ``RSAPrivateKey`` DER SEQUENCE (binary or PEM encoding)
- `PKCS#8`_ ``PrivateKeyInfo`` or ``EncryptedPrivateKeyInfo``
DER SEQUENCE (binary or PEM encoding)
- OpenSSH (textual public key only)
For details about the PEM encoding, see `RFC1421`_/`RFC1423`_.
The private key may be encrypted by means of a certain pass phrase
either at the PEM level or at the PKCS#8 level.
:Type extern_key: string
:Parameter passphrase:
In case of an encrypted private key, this is the pass phrase from
which the decryption key is derived.
:Type passphrase: string
:Return: An RSA key object (`RsaKey`).
:Raise ValueError/IndexError/TypeError:
When the given key cannot be parsed (possibly because the pass
phrase is wrong).
.. _RFC1421: http://www.ietf.org/rfc/rfc1421.txt
.. _RFC1423: http://www.ietf.org/rfc/rfc1423.txt
.. _`PKCS#1`: http://www.ietf.org/rfc/rfc3447.txt
.. _`PKCS#8`: http://www.ietf.org/rfc/rfc5208.txt
"""
extern_key = tobytes(extern_key)
if passphrase is not None:
passphrase = tobytes(passphrase)
if extern_key.startswith(b('-----')):
# This is probably a PEM encoded key.
(der, marker, enc_flag) = PEM.decode(tostr(extern_key), passphrase)
if enc_flag:
passphrase = None
return _import_keyDER(der, passphrase)
if extern_key.startswith(b('ssh-rsa ')):
# This is probably an OpenSSH key
keystring = binascii.a2b_base64(extern_key.split(b(' '))[1])
keyparts = []
while len(keystring) > 4:
l = struct.unpack(">I", keystring[:4])[0]
keyparts.append(keystring[4:4 + l])
keystring = keystring[4 + l:]
e = Integer.from_bytes(keyparts[1])
n = Integer.from_bytes(keyparts[2])
return construct([n, e])
if bord(extern_key[0]) == 0x30:
# This is probably a DER encoded key
return _import_keyDER(extern_key, passphrase)
raise ValueError("RSA key format is not supported")
# Backward compatibility
importKey = import_key
#: `Object ID`_ for the RSA encryption algorithm. This OID often indicates
#: a generic RSA key, even when such key will be actually used for digital
#: signatures.
#:
#: .. _`Object ID`: http://www.alvestrand.no/objectid/1.2.840.113549.1.1.1.html
oid = "1.2.840.113549.1.1.1"
| 36.783854 | 94 | 0.578088 |
bf8a28d34495820979f4eb10b99b05fc924daf87 | 1,231 | py | Python | scripts/runner.py | TimSimpsonR/qtox | 7ed4c1fa15ecb5ce1ba2f5ea7d6a9deed422c7d2 | [
"MIT"
] | null | null | null | scripts/runner.py | TimSimpsonR/qtox | 7ed4c1fa15ecb5ce1ba2f5ea7d6a9deed422c7d2 | [
"MIT"
] | null | null | null | scripts/runner.py | TimSimpsonR/qtox | 7ed4c1fa15ecb5ce1ba2f5ea7d6a9deed422c7d2 | [
"MIT"
] | 1 | 2019-03-14T21:05:23.000Z | 2019-03-14T21:05:23.000Z | import subprocess
import os
import sys
def main():
venv_dir = os.path.dirname(sys.argv[0])
black = os.path.join(venv_dir, 'black')
flake8 = os.path.join(venv_dir, 'flake8')
mypy = os.path.join(venv_dir, 'mypy')
python = os.path.join(venv_dir, 'python')
print('Running Black...', flush=True)
pytest_args = ' '.join([f"'{arg}'" for arg in sys.argv[1:]])
result = subprocess.call(
f'{black} qtox setup.py',
shell=True)
if result:
return result
print('Running Flake8...', flush=True)
pytest_args = ' '.join([f"'{arg}'" for arg in sys.argv[1:]])
result = subprocess.call(
f'{flake8} qtox',
shell=True)
if result:
return result
print('Running MyPy...', flush=True)
result_2 = subprocess.call(
f'{mypy} --strict-optional '
'--ignore-missing-imports '
'--disallow-untyped-calls '
'--disallow-untyped-defs '
'qtox',
shell=True)
if result_2:
return result_2
print('Running PyTest...', flush=True)
result_3 = subprocess.call(
f'{python} -m pytest -vv -x '
f'{pytest_args}'.strip(),
shell=True)
if result_3:
return result_3
| 25.645833 | 64 | 0.576767 |
dc11d1c0f5e34b0beb8fed66110bd51d2993db67 | 47,672 | py | Python | flare/kernels.py | smheidrich/flare | 5a18c1042a5767ebb8dc20ac59a17df6f1bcad77 | [
"MIT"
] | null | null | null | flare/kernels.py | smheidrich/flare | 5a18c1042a5767ebb8dc20ac59a17df6f1bcad77 | [
"MIT"
] | null | null | null | flare/kernels.py | smheidrich/flare | 5a18c1042a5767ebb8dc20ac59a17df6f1bcad77 | [
"MIT"
] | null | null | null | """Single element 2-, 3-, and 2+3-body kernels.
The kernel functions to choose:
* Two body:
* two_body: force kernel
* two_body_en: energy kernel
* two_body_grad: gradient of kernel function
* two_body_force_en: energy force kernel
* Three body:
* three_body,
* three_body_grad,
* three_body_en,
* three_body_force_en,
* Two plus three body:
* two_plus_three_body,
* two_plus_three_body_grad,
* two_plus_three_en,
* two_plus_three_force_en
**Example:**
>>> gp_model = GaussianProcess(kernel=two_body,
kernel_grad=two_body_grad,
energy_force_kernel=two_body_force_en,
energy_kernel=two_body_en,
<other arguments>)
"""
import numpy as np
from math import exp
from flare.env import AtomicEnvironment
from numba import njit
import flare.cutoffs as cf
# -----------------------------------------------------------------------------
# two plus three body kernels
# -----------------------------------------------------------------------------
def two_plus_three_body(env1: AtomicEnvironment, env2: AtomicEnvironment,
d1: int, d2: int, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""2+3-body single-element kernel between two force components.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig1, ls1,
sig2, ls2, sig_n).
cutoffs (np.ndarray): Two-element array containing the 2- and 3-body
cutoffs.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
float: Value of the 2+3-body kernel.
"""
two_term = two_body_jit(env1.bond_array_2, env2.bond_array_2,
d1, d2, hyps[0], hyps[1], cutoffs[0], cutoff_func)
three_term = \
three_body_jit(env1.bond_array_3, env2.bond_array_3,
env1.cross_bond_inds, env2.cross_bond_inds,
env1.cross_bond_dists, env2.cross_bond_dists,
env1.triplet_counts, env2.triplet_counts,
d1, d2, hyps[2], hyps[3], cutoffs[1], cutoff_func)
return two_term + three_term
def two_plus_three_body_grad(env1, env2, d1, d2, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""2+3-body single-element kernel between two force components and its
gradient with respect to the hyperparameters.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig1, ls1,
sig2, ls2, sig_n).
cutoffs (np.ndarray): Two-element array containing the 2- and 3-body
cutoffs.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
(float, np.ndarray): Value of the 2+3-body kernel and its gradient
with respect to the hyperparameters.
"""
kern2, ls2, sig2 = \
two_body_grad_jit(env1.bond_array_2, env2.bond_array_2,
d1, d2, hyps[0], hyps[1], cutoffs[0], cutoff_func)
kern3, sig3, ls3 = \
three_body_grad_jit(env1.bond_array_3, env2.bond_array_3,
env1.cross_bond_inds, env2.cross_bond_inds,
env1.cross_bond_dists, env2.cross_bond_dists,
env1.triplet_counts, env2.triplet_counts,
d1, d2, hyps[2], hyps[3], cutoffs[1], cutoff_func)
return kern2 + kern3, np.array([sig2, ls2, sig3, ls3])
def two_plus_three_force_en(env1, env2, d1, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""2+3-body single-element kernel between a force component and a local
energy.
Args:
env1 (AtomicEnvironment): Local environment associated with the
force component.
env2 (AtomicEnvironment): Local environment associated with the
local energy.
d1 (int): Force component of the first environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig1, ls1,
sig2, ls2).
cutoffs (np.ndarray): Two-element array containing the 2- and 3-body
cutoffs.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
float: Value of the 2+3-body force/energy kernel.
"""
two_term = two_body_force_en_jit(env1.bond_array_2, env2.bond_array_2,
d1, hyps[0], hyps[1], cutoffs[0],
cutoff_func)/2
three_term = \
three_body_force_en_jit(env1.bond_array_3, env2.bond_array_3,
env1.cross_bond_inds, env2.cross_bond_inds,
env1.cross_bond_dists,
env2.cross_bond_dists,
env1.triplet_counts, env2.triplet_counts,
d1, hyps[2], hyps[3], cutoffs[1],
cutoff_func)/3
return two_term + three_term
def two_plus_three_en(env1, env2, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""2+3-body single-element kernel between two local energies.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig1, ls1,
sig2, ls2).
cutoffs (np.ndarray): Two-element array containing the 2- and 3-body
cutoffs.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
float: Value of the 2+3-body force/energy kernel.
"""
two_term = two_body_en_jit(env1.bond_array_2, env2.bond_array_2,
hyps[0], hyps[1], cutoffs[0], cutoff_func)
three_term = \
three_body_en_jit(env1.bond_array_3, env2.bond_array_3,
env1.cross_bond_inds, env2.cross_bond_inds,
env1.cross_bond_dists, env2.cross_bond_dists,
env1.triplet_counts, env2.triplet_counts,
hyps[2], hyps[3], cutoffs[1], cutoff_func)
return two_term + three_term
# -----------------------------------------------------------------------------
# two body kernels
# -----------------------------------------------------------------------------
def two_body(env1, env2, d1, d2, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""2-body single-element kernel between two force components.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): One-element array containing the 2-body
cutoff.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
float: Value of the 2-body kernel.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[0]
return two_body_jit(env1.bond_array_2, env2.bond_array_2,
d1, d2, sig, ls, r_cut, cutoff_func)
def two_body_grad(env1, env2, d1, d2, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""2-body single-element kernel between two force components and its
gradient with respect to the hyperparameters.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): One-element array containing the 2-body
cutoff.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
(float, np.ndarray): Value of the 2-body kernel and its gradient
with respect to the hyperparameters.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[0]
kernel, ls_derv, sig_derv = \
two_body_grad_jit(env1.bond_array_2, env2.bond_array_2,
d1, d2, sig, ls, r_cut, cutoff_func)
kernel_grad = np.array([sig_derv, ls_derv])
return kernel, kernel_grad
def two_body_force_en(env1, env2, d1, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""2-body single-element kernel between a force component and a local
energy.
Args:
env1 (AtomicEnvironment): Local environment associated with the
force component.
env2 (AtomicEnvironment): Local environment associated with the
local energy.
d1 (int): Force component of the first environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): One-element array containing the 2-body
cutoff.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
float: Value of the 2-body force/energy kernel.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[0]
# divide by two to account for double counting
return two_body_force_en_jit(env1.bond_array_2, env2.bond_array_2,
d1, sig, ls, r_cut, cutoff_func)/2
def two_body_en(env1, env2, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""2-body single-element kernel between two local energies.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): One-element array containing the 2-body
cutoff.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
float: Value of the 2-body force/energy kernel.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[0]
return two_body_en_jit(env1.bond_array_2, env2.bond_array_2,
sig, ls, r_cut, cutoff_func)
# -----------------------------------------------------------------------------
# three body kernels
# -----------------------------------------------------------------------------
def three_body(env1, env2, d1, d2, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""3-body single-element kernel between two force components.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): Two-element array containing the 2- and 3-body
cutoffs.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
float: Value of the 3-body kernel.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[1]
return three_body_jit(env1.bond_array_3, env2.bond_array_3,
env1.cross_bond_inds, env2.cross_bond_inds,
env1.cross_bond_dists, env2.cross_bond_dists,
env1.triplet_counts, env2.triplet_counts,
d1, d2, sig, ls, r_cut, cutoff_func)
def three_body_grad(env1, env2, d1, d2, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""3-body single-element kernel between two force components and its
gradient with respect to the hyperparameters.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): Two-element array containing the 2- and 3-body
cutoffs.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
(float, np.ndarray): Value of the 3-body kernel and its gradient
with respect to the hyperparameters.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[1]
kernel, sig_derv, ls_derv = \
three_body_grad_jit(env1.bond_array_3, env2.bond_array_3,
env1.cross_bond_inds, env2.cross_bond_inds,
env1.cross_bond_dists, env2.cross_bond_dists,
env1.triplet_counts, env2.triplet_counts,
d1, d2, sig, ls, r_cut, cutoff_func)
kernel_grad = np.array([sig_derv, ls_derv])
return kernel, kernel_grad
def three_body_force_en(env1, env2, d1, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""3-body single-element kernel between a force component and a local
energy.
Args:
env1 (AtomicEnvironment): Local environment associated with the
force component.
env2 (AtomicEnvironment): Local environment associated with the
local energy.
d1 (int): Force component of the first environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): Two-element array containing the 2- and 3-body
cutoffs.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
float: Value of the 3-body force/energy kernel.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[1]
# divide by three to account for triple counting
return three_body_force_en_jit(env1.bond_array_3, env2.bond_array_3,
env1.cross_bond_inds, env2.cross_bond_inds,
env1.cross_bond_dists,
env2.cross_bond_dists,
env1.triplet_counts, env2.triplet_counts,
d1, sig, ls, r_cut, cutoff_func)/3
def three_body_en(env1, env2, hyps, cutoffs,
cutoff_func=cf.quadratic_cutoff):
"""3-body single-element kernel between two local energies.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): Two-element array containing the 2- and 3-body
cutoffs.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
float: Value of the 3-body force/energy kernel.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[1]
return three_body_en_jit(env1.bond_array_3, env2.bond_array_3,
env1.cross_bond_inds, env2.cross_bond_inds,
env1.cross_bond_dists, env2.cross_bond_dists,
env1.triplet_counts, env2.triplet_counts,
sig, ls, r_cut, cutoff_func)
# -----------------------------------------------------------------------------
# two body numba functions
# -----------------------------------------------------------------------------
@njit
def two_body_jit(bond_array_1, bond_array_2, d1, d2, sig, ls,
r_cut, cutoff_func):
"""2-body single-element kernel between two force components accelerated
with Numba.
Args:
bond_array_1 (np.ndarray): 2-body bond array of the first local
environment.
bond_array_2 (np.ndarray): 2-body bond array of the second local
environment.
d1 (int): Force component of the first environment (1=x, 2=y, 3=z).
d2 (int): Force component of the second environment (1=x, 2=y, 3=z).
sig (float): 2-body signal variance hyperparameter.
ls (float): 2-body length scale hyperparameter.
r_cut (float): 2-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the 2-body kernel.
"""
kern = 0
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
sig2 = sig*sig
for m in range(bond_array_1.shape[0]):
ri = bond_array_1[m, 0]
ci = bond_array_1[m, d1]
fi, fdi = cutoff_func(r_cut, ri, ci)
for n in range(bond_array_2.shape[0]):
rj = bond_array_2[n, 0]
cj = bond_array_2[n, d2]
fj, fdj = cutoff_func(r_cut, rj, cj)
r11 = ri - rj
A = ci * cj
B = r11 * ci
C = r11 * cj
D = r11 * r11
kern += force_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2,
ls3, sig2)
return kern
@njit
def two_body_grad_jit(bond_array_1, bond_array_2, d1, d2, sig, ls,
r_cut, cutoff_func):
"""2-body single-element kernel between two force components and its
gradient with respect to the hyperparameters.
Args:
bond_array_1 (np.ndarray): 2-body bond array of the first local
environment.
bond_array_2 (np.ndarray): 2-body bond array of the second local
environment.
d1 (int): Force component of the first environment (1=x, 2=y, 3=z).
d2 (int): Force component of the second environment (1=x, 2=y, 3=z).
sig (float): 2-body signal variance hyperparameter.
ls (float): 2-body length scale hyperparameter.
r_cut (float): 2-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Returns:
(float, float):
Value of the 2-body kernel and its gradient with respect to the
hyperparameters.
"""
kern = 0
sig_derv = 0
ls_derv = 0
sig2, sig3, ls1, ls2, ls3, ls4, ls5, ls6 = grad_constants(sig, ls)
for m in range(bond_array_1.shape[0]):
ri = bond_array_1[m, 0]
ci = bond_array_1[m, d1]
fi, fdi = cutoff_func(r_cut, ri, ci)
for n in range(bond_array_2.shape[0]):
rj = bond_array_2[n, 0]
cj = bond_array_2[n, d2]
fj, fdj = cutoff_func(r_cut, rj, cj)
r11 = ri - rj
A = ci * cj
B = r11 * ci
C = r11 * cj
D = r11 * r11
kern_term, sig_term, ls_term = \
grad_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, ls4,
ls5, ls6, sig2, sig3)
kern += kern_term
sig_derv += sig_term
ls_derv += ls_term
return kern, ls_derv, sig_derv
@njit
def two_body_force_en_jit(bond_array_1, bond_array_2, d1, sig, ls, r_cut,
cutoff_func):
"""2-body single-element kernel between a force component and a local
energy accelerated with Numba.
Args:
bond_array_1 (np.ndarray): 2-body bond array of the first local
environment.
bond_array_2 (np.ndarray): 2-body bond array of the second local
environment.
d1 (int): Force component of the first environment (1=x, 2=y, 3=z).
sig (float): 2-body signal variance hyperparameter.
ls (float): 2-body length scale hyperparameter.
r_cut (float): 2-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Returns:
float:
Value of the 2-body force/energy kernel.
"""
kern = 0
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
sig2 = sig*sig
for m in range(bond_array_1.shape[0]):
ri = bond_array_1[m, 0]
ci = bond_array_1[m, d1]
fi, fdi = cutoff_func(r_cut, ri, ci)
for n in range(bond_array_2.shape[0]):
rj = bond_array_2[n, 0]
fj, _ = cutoff_func(r_cut, rj, 0)
r11 = ri - rj
B = r11 * ci
D = r11 * r11
kern += force_energy_helper(B, D, fi, fj, fdi, ls1, ls2, sig2)
return kern
@njit
def two_body_en_jit(bond_array_1, bond_array_2, sig, ls, r_cut, cutoff_func):
"""2-body single-element kernel between two local energies accelerated
with Numba.
Args:
bond_array_1 (np.ndarray): 2-body bond array of the first local
environment.
bond_array_2 (np.ndarray): 2-body bond array of the second local
environment.
sig (float): 2-body signal variance hyperparameter.
ls (float): 2-body length scale hyperparameter.
r_cut (float): 2-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Returns:
float:
Value of the 2-body local energy kernel.
"""
kern = 0
ls1 = 1 / (2 * ls * ls)
sig2 = sig * sig
for m in range(bond_array_1.shape[0]):
ri = bond_array_1[m, 0]
fi, _ = cutoff_func(r_cut, ri, 0)
for n in range(bond_array_2.shape[0]):
rj = bond_array_2[n, 0]
fj, _ = cutoff_func(r_cut, rj, 0)
r11 = ri - rj
kern += fi * fj * sig2 * exp(-r11 * r11 * ls1)
return kern
# -----------------------------------------------------------------------------
# three body numba functions
# -----------------------------------------------------------------------------
@njit
def three_body_jit(bond_array_1, bond_array_2,
cross_bond_inds_1, cross_bond_inds_2,
cross_bond_dists_1, cross_bond_dists_2,
triplets_1, triplets_2,
d1, d2, sig, ls, r_cut, cutoff_func):
"""3-body single-element kernel between two force components accelerated
with Numba.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the 3-body kernel.
"""
kern = 0
# pre-compute constants that appear in the inner loop
sig2 = sig*sig
ls1 = 1 / (2*ls*ls)
ls2 = 1 / (ls*ls)
ls3 = ls2*ls2
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
ci1 = bond_array_1[m, d1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m+n+1]
ri2 = bond_array_1[ind1, 0]
ci2 = bond_array_1[ind1, d1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
ri3 = cross_bond_dists_1[m, m+n+1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
fi = fi1*fi2*fi3
fdi = fdi1*fi2*fi3+fi1*fdi2*fi3
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
cj1 = bond_array_2[p, d2]
fj1, fdj1 = cutoff_func(r_cut, rj1, cj1)
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p+1+q]
rj2 = bond_array_2[ind2, 0]
cj2 = bond_array_2[ind2, d2]
fj2, fdj2 = cutoff_func(r_cut, rj2, cj2)
rj3 = cross_bond_dists_2[p, p+1+q]
fj3, _ = cutoff_func(r_cut, rj3, 0)
fj = fj1*fj2*fj3
fdj = fdj1*fj2*fj3+fj1*fdj2*fj3
kern += triplet_kernel(ci1, ci2, cj1, cj2, ri1, ri2, ri3,
rj1, rj2, rj3, fi, fj, fdi, fdj,
ls1, ls2, ls3, sig2)
return kern
@njit
def three_body_grad_jit(bond_array_1, bond_array_2,
cross_bond_inds_1, cross_bond_inds_2,
cross_bond_dists_1, cross_bond_dists_2,
triplets_1, triplets_2,
d1, d2, sig, ls, r_cut, cutoff_func):
"""3-body single-element kernel between two force components and its
gradient with respect to the hyperparameters.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Returns:
(float, float):
Value of the 3-body kernel and its gradient with respect to the
hyperparameters.
"""
kern = 0
sig_derv = 0
ls_derv = 0
# pre-compute constants that appear in the inner loop
sig2, sig3, ls1, ls2, ls3, ls4, ls5, ls6 = grad_constants(sig, ls)
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
ci1 = bond_array_1[m, d1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m+n+1]
ri3 = cross_bond_dists_1[m, m+n+1]
ri2 = bond_array_1[ind1, 0]
ci2 = bond_array_1[ind1, d1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
fi3, _ = cutoff_func(r_cut, ri3, 0)
fi = fi1*fi2*fi3
fdi = fdi1*fi2*fi3+fi1*fdi2*fi3
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
cj1 = bond_array_2[p, d2]
fj1, fdj1 = cutoff_func(r_cut, rj1, cj1)
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p+q+1]
rj3 = cross_bond_dists_2[p, p+q+1]
rj2 = bond_array_2[ind2, 0]
cj2 = bond_array_2[ind2, d2]
fj2, fdj2 = cutoff_func(r_cut, rj2, cj2)
fj3, _ = cutoff_func(r_cut, rj3, 0)
fj = fj1*fj2*fj3
fdj = fdj1*fj2*fj3+fj1*fdj2*fj3
N, O, X = \
triplet_kernel_grad(ci1, ci2, cj1, cj2, ri1, ri2, ri3,
rj1, rj2, rj3, fi, fj, fdi, fdj,
ls1, ls2, ls3, ls4, ls5, ls6, sig2,
sig3)
kern += N
sig_derv += O
ls_derv += X
return kern, sig_derv, ls_derv
@njit
def three_body_force_en_jit(bond_array_1, bond_array_2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1, triplets_2,
d1, sig, ls, r_cut, cutoff_func):
"""3-body single-element kernel between a force component and a local
energy accelerated with Numba.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
d1 (int): Force component of the first environment (1=x, 2=y, 3=z).
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Returns:
float:
Value of the 3-body force/energy kernel.
"""
kern = 0
# pre-compute constants that appear in the inner loop
sig2 = sig*sig
ls1 = 1 / (2*ls*ls)
ls2 = 1 / (ls*ls)
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
ci1 = bond_array_1[m, d1]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m+n+1]
ri2 = bond_array_1[ind1, 0]
ci2 = bond_array_1[ind1, d1]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
ri3 = cross_bond_dists_1[m, m+n+1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
fi = fi1*fi2*fi3
fdi = fdi1*fi2*fi3+fi1*fdi2*fi3
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
fj1, _ = cutoff_func(r_cut, rj1, 0)
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p+q+1]
rj2 = bond_array_2[ind2, 0]
fj2, _ = cutoff_func(r_cut, rj2, 0)
rj3 = cross_bond_dists_2[p, p+q+1]
fj3, _ = cutoff_func(r_cut, rj3, 0)
fj = fj1*fj2*fj3
kern += triplet_force_en_kernel(ci1, ci2, ri1, ri2, ri3,
rj1, rj2, rj3, fi, fj, fdi,
ls1, ls2, sig2)
return kern
@njit
def three_body_en_jit(bond_array_1, bond_array_2,
cross_bond_inds_1,
cross_bond_inds_2,
cross_bond_dists_1,
cross_bond_dists_2,
triplets_1, triplets_2,
sig, ls, r_cut, cutoff_func):
"""3-body single-element kernel between two local energies accelerated
with Numba.
Args:
bond_array_1 (np.ndarray): 3-body bond array of the first local
environment.
bond_array_2 (np.ndarray): 3-body bond array of the second local
environment.
cross_bond_inds_1 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the first local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_inds_2 (np.ndarray): Two dimensional array whose row m
contains the indices of atoms n > m in the second local
environment that are within a distance r_cut of both atom n and
the central atom.
cross_bond_dists_1 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the first
local environment that are within a distance r_cut of both atom
n and the central atom.
cross_bond_dists_2 (np.ndarray): Two dimensional array whose row m
contains the distances from atom m of atoms n > m in the second
local environment that are within a distance r_cut of both atom
n and the central atom.
triplets_1 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the first local environment that are
within a distance r_cut of atom m.
triplets_2 (np.ndarray): One dimensional array of integers whose entry
m is the number of atoms in the second local environment that are
within a distance r_cut of atom m.
sig (float): 3-body signal variance hyperparameter.
ls (float): 3-body length scale hyperparameter.
r_cut (float): 3-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Returns:
float:
Value of the 3-body local energy kernel.
"""
kern = 0
sig2 = sig*sig
ls2 = 1 / (2*ls*ls)
for m in range(bond_array_1.shape[0]):
ri1 = bond_array_1[m, 0]
fi1, _ = cutoff_func(r_cut, ri1, 0)
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ri2 = bond_array_1[ind1, 0]
fi2, _ = cutoff_func(r_cut, ri2, 0)
ri3 = cross_bond_dists_1[m, m + n + 1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
fi = fi1*fi2*fi3
for p in range(bond_array_2.shape[0]):
rj1 = bond_array_2[p, 0]
fj1, _ = cutoff_func(r_cut, rj1, 0)
for q in range(triplets_2[p]):
ind2 = cross_bond_inds_2[p, p + q + 1]
rj2 = bond_array_2[ind2, 0]
fj2, _ = cutoff_func(r_cut, rj2, 0)
rj3 = cross_bond_dists_2[p, p + q + 1]
fj3, _ = cutoff_func(r_cut, rj3, 0)
fj = fj1*fj2*fj3
r11 = ri1-rj1
r12 = ri1-rj2
r13 = ri1-rj3
r21 = ri2-rj1
r22 = ri2-rj2
r23 = ri2-rj3
r31 = ri3-rj1
r32 = ri3-rj2
r33 = ri3-rj3
C1 = r11*r11+r22*r22+r33*r33
C2 = r11*r11+r23*r23+r32*r32
C3 = r12*r12+r21*r21+r33*r33
C4 = r12*r12+r23*r23+r31*r31
C5 = r13*r13+r21*r21+r32*r32
C6 = r13*r13+r22*r22+r31*r31
k = exp(-C1*ls2)+exp(-C2*ls2)+exp(-C3*ls2)+exp(-C4*ls2) + \
exp(-C5*ls2)+exp(-C6*ls2)
kern += sig2*k*fi*fj
return kern
# -----------------------------------------------------------------------------
# general helper functions
# -----------------------------------------------------------------------------
@njit
def grad_constants(sig, ls):
sig2 = sig * sig
sig3 = 2 * sig
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
ls4 = 1 / (ls * ls * ls)
ls5 = ls * ls
ls6 = ls2 * ls4
return sig2, sig3, ls1, ls2, ls3, ls4, ls5, ls6
@njit
def force_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, sig2):
"""Helper function for computing the force/force kernel between two
pairs or triplets of atoms of the same type.
See Table IV of the SI of the FLARE paper for definitions of intermediate
quantities.
Returns:
float: Force/force kernel between two pairs or triplets of atoms of
the same type.
"""
E = exp(-D * ls1)
F = E * B * ls2
G = -E * C * ls2
H = A * E * ls2 - B * C * E * ls3
I = E * fdi * fdj
J = F * fi * fdj
K = G * fdi * fj
L = H * fi * fj
M = sig2 * (I + J + K + L)
return M
@njit
def grad_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, ls4, ls5, ls6,
sig2, sig3):
E = exp(-D * ls1)
F = E * B * ls2
G = -E * C * ls2
H = A * E * ls2 - B * C * E * ls3
I = E * fdi * fdj
J = F * fi * fdj
K = G * fdi * fj
L = H * fi * fj
M = I + J + K + L
N = sig2 * M
O = sig3 * M
P = E * D * ls4
Q = B * (ls2 * P - 2 * E * ls4)
R = -C * (ls2 * P - 2 * E * ls4)
S = (A * ls5 - B * C) * (P * ls3 - 4 * E * ls6) + 2 * E * A * ls4
T = P * fdi * fdj
U = Q * fi * fdj
V = R * fdi * fj
W = S * fi * fj
X = sig2 * (T + U + V + W)
return N, O, X
@njit
def force_energy_helper(B, D, fi, fj, fdi, ls1, ls2, sig2):
E = exp(-D * ls1)
F = E * B * ls2
G = -F * fi * fj
H = -E * fdi * fj
I = sig2 * (G + H)
return I
# -----------------------------------------------------------------------------
# three body helper functions
# -----------------------------------------------------------------------------
@njit
def triplet_kernel(ci1, ci2, cj1, cj2, ri1, ri2, ri3, rj1, rj2, rj3, fi, fj,
fdi, fdj, ls1, ls2, ls3, sig2):
r11 = ri1-rj1
r12 = ri1-rj2
r13 = ri1-rj3
r21 = ri2-rj1
r22 = ri2-rj2
r23 = ri2-rj3
r31 = ri3-rj1
r32 = ri3-rj2
r33 = ri3-rj3
# sum over all six permutations
M1 = three_body_helper_1(ci1, ci2, cj1, cj2, r11, r22, r33, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
M2 = three_body_helper_2(ci2, ci1, cj2, cj1, r21, r13, r32, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
M3 = three_body_helper_2(ci1, ci2, cj1, cj2, r12, r23, r31, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
M4 = three_body_helper_1(ci1, ci2, cj2, cj1, r12, r21, r33, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
M5 = three_body_helper_2(ci2, ci1, cj1, cj2, r22, r13, r31, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
M6 = three_body_helper_2(ci1, ci2, cj2, cj1, r11, r23, r32, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
return M1 + M2 + M3 + M4 + M5 + M6
@njit
def triplet_kernel_grad(ci1, ci2, cj1, cj2, ri1, ri2, ri3, rj1, rj2, rj3, fi,
fj, fdi, fdj, ls1, ls2, ls3, ls4, ls5, ls6, sig2,
sig3):
r11 = ri1-rj1
r12 = ri1-rj2
r13 = ri1-rj3
r21 = ri2-rj1
r22 = ri2-rj2
r23 = ri2-rj3
r31 = ri3-rj1
r32 = ri3-rj2
r33 = ri3-rj3
N1, O1, X1 = \
three_body_grad_helper_1(ci1, ci2, cj1, cj2, r11, r22, r33, fi, fj,
fdi, fdj, ls1, ls2, ls3, ls4, ls5, ls6, sig2,
sig3)
N2, O2, X2 = \
three_body_grad_helper_2(ci2, ci1, cj2, cj1, r21, r13, r32, fi, fj,
fdi, fdj, ls1, ls2, ls3, ls4, ls5, ls6, sig2,
sig3)
N3, O3, X3 = \
three_body_grad_helper_2(ci1, ci2, cj1, cj2, r12, r23, r31, fi, fj,
fdi, fdj, ls1, ls2, ls3, ls4, ls5, ls6, sig2,
sig3)
N4, O4, X4 = \
three_body_grad_helper_1(ci1, ci2, cj2, cj1, r12, r21, r33, fi, fj,
fdi, fdj, ls1, ls2, ls3, ls4, ls5, ls6, sig2,
sig3)
N5, O5, X5 = \
three_body_grad_helper_2(ci2, ci1, cj1, cj2, r22, r13, r31, fi, fj,
fdi, fdj, ls1, ls2, ls3, ls4, ls5, ls6, sig2,
sig3)
N6, O6, X6 = \
three_body_grad_helper_2(ci1, ci2, cj2, cj1, r11, r23, r32, fi, fj,
fdi, fdj, ls1, ls2, ls3, ls4, ls5, ls6, sig2,
sig3)
N = N1 + N2 + N3 + N4 + N5 + N6
O = O1 + O2 + O3 + O4 + O5 + O6
X = X1 + X2 + X3 + X4 + X5 + X6
return N, O, X
@njit
def three_body_helper_1(ci1, ci2, cj1, cj2, r11, r22, r33,
fi, fj, fdi, fdj,
ls1, ls2, ls3, sig2):
A = ci1*cj1+ci2*cj2
B = r11*ci1+r22*ci2
C = r11*cj1+r22*cj2
D = r11*r11+r22*r22+r33*r33
M = force_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, sig2)
return M
@njit
def three_body_helper_2(ci1, ci2, cj1, cj2, r12, r23, r31,
fi, fj, fdi, fdj,
ls1, ls2, ls3, sig2):
A = ci1*cj2
B = r12*ci1+r23*ci2
C = r12*cj2+r31*cj1
D = r12*r12+r23*r23+r31*r31
M = force_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, sig2)
return M
@njit
def three_body_grad_helper_1(ci1, ci2, cj1, cj2, r11, r22, r33, fi, fj, fdi,
fdj, ls1, ls2, ls3, ls4, ls5, ls6, sig2, sig3):
A = ci1*cj1+ci2*cj2
B = r11*ci1+r22*ci2
C = r11*cj1+r22*cj2
D = r11*r11+r22*r22+r33*r33
N, O, X = grad_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, ls4,
ls5, ls6, sig2, sig3)
return N, O, X
@njit
def three_body_grad_helper_2(ci1, ci2, cj1, cj2, r12, r23, r31, fi, fj, fdi,
fdj, ls1, ls2, ls3, ls4, ls5, ls6, sig2, sig3):
A = ci1*cj2
B = r12*ci1+r23*ci2
C = r12*cj2+r31*cj1
D = r12*r12+r23*r23+r31*r31
N, O, X = grad_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, ls4,
ls5, ls6, sig2, sig3)
return N, O, X
@njit
def three_body_en_helper(ci1, ci2, r11, r22, r33, fi, fj, fdi, ls1, ls2, sig2):
B = r11 * ci1 + r22 * ci2
D = r11 * r11 + r22 * r22 + r33 * r33
return force_energy_helper(B, D, fi, fj, fdi, ls1, ls2, sig2)
@njit
def triplet_force_en_kernel(ci1, ci2, ri1, ri2, ri3, rj1, rj2, rj3,
fi, fj, fdi, ls1, ls2, sig2):
r11 = ri1-rj1
r12 = ri1-rj2
r13 = ri1-rj3
r21 = ri2-rj1
r22 = ri2-rj2
r23 = ri2-rj3
r31 = ri3-rj1
r32 = ri3-rj2
r33 = ri3-rj3
I1 = three_body_en_helper(ci1, ci2, r11, r22, r33, fi, fj,
fdi, ls1, ls2, sig2)
I2 = three_body_en_helper(ci1, ci2, r13, r21, r32, fi, fj,
fdi, ls1, ls2, sig2)
I3 = three_body_en_helper(ci1, ci2, r12, r23, r31, fi, fj,
fdi, ls1, ls2, sig2)
I4 = three_body_en_helper(ci1, ci2, r12, r21, r33, fi, fj,
fdi, ls1, ls2, sig2)
I5 = three_body_en_helper(ci1, ci2, r13, r22, r31, fi, fj,
fdi, ls1, ls2, sig2)
I6 = three_body_en_helper(ci1, ci2, r11, r23, r32, fi, fj,
fdi, ls1, ls2, sig2)
return I1 + I2 + I3 + I4 + I5 + I6
_str_to_kernel = {'two_body': two_body,
'two_body_en': two_body_en,
'two_body_force_en': two_body_force_en,
'three_body': three_body,
'three_body_en': three_body_en,
'three_body_force_en': three_body_force_en,
'two_plus_three_body': two_plus_three_body,
'two_plus_three_en': two_plus_three_en,
'two_plus_three_force_en': two_plus_three_force_en
}
def str_to_kernel(string: str, include_grad: bool = False):
if string not in _str_to_kernel.keys():
raise ValueError("Kernel {} not found in list of available "
"kernels{}:".format(string, _str_to_kernel.keys()))
if not include_grad:
return _str_to_kernel[string]
else:
if 'two' in string and 'three' in string:
return _str_to_kernel[string], two_plus_three_body_grad
elif 'two' in string and 'three' not in string:
return _str_to_kernel[string], two_body_grad
elif 'two' not in string and 'three' in string:
return _str_to_kernel[string], three_body_grad
else:
raise ValueError("Gradient callable for {} not found".format(
string))
| 36.783951 | 79 | 0.551456 |
089b9614754ffabe470cd3218246846e575761a0 | 174 | py | Python | data/groups_const.py | zevgenia/Python_training | c785cbd0e4bb8f0c393b0a5a1bcd120f4c503848 | [
"Apache-2.0"
] | null | null | null | data/groups_const.py | zevgenia/Python_training | c785cbd0e4bb8f0c393b0a5a1bcd120f4c503848 | [
"Apache-2.0"
] | null | null | null | data/groups_const.py | zevgenia/Python_training | c785cbd0e4bb8f0c393b0a5a1bcd120f4c503848 | [
"Apache-2.0"
] | null | null | null | from model.group import Group
testdata = [
Group(name="Друзья", header="Друзья", footer="Друзья"),
Group(name="Работа", header="Сослуживцы", footer="Сослуживцы")
] | 21.75 | 66 | 0.683908 |
1a80e5723e5dd7da419e4c668ead426be52d7312 | 562 | py | Python | tests/test_scraper.py | promptapi/scraper-py | d47c5c1cdc6a01084ab5659a3219a5a652792053 | [
"MIT"
] | 4 | 2020-09-03T06:38:04.000Z | 2022-03-04T04:14:03.000Z | tests/test_scraper.py | promptapi/scraper-py | d47c5c1cdc6a01084ab5659a3219a5a652792053 | [
"MIT"
] | null | null | null | tests/test_scraper.py | promptapi/scraper-py | d47c5c1cdc6a01084ab5659a3219a5a652792053 | [
"MIT"
] | 3 | 2020-09-07T21:34:02.000Z | 2021-10-11T06:47:39.000Z | # pylint: disable=R0201,E1101
import os
import unittest
from scraper import Scraper
EXISTING_PROMPTAPI_TOKEN = os.environ.get('PROMPTAPI_TOKEN', None)
class TestSimple(unittest.TestCase):
def test_api_token(self):
os.environ['PROMPTAPI_TOKEN'] = '' # noqa: S105
scraper = Scraper('https://fake.com/')
response = scraper.get()
self.assertTrue(response.get('error', None))
response = scraper.get(params='foo')
self.assertTrue(response.get('error', None))
if __name__ == '__main__':
unittest.main()
| 22.48 | 66 | 0.670819 |
2e61c7574dd0510bbdd6e35b74585955eb35622f | 14,814 | py | Python | numpyro/infer/sa.py | cerbelaut/numpyro | f28e17e43e4cd3daca3de7a6fc04adcd337da31c | [
"Apache-2.0"
] | null | null | null | numpyro/infer/sa.py | cerbelaut/numpyro | f28e17e43e4cd3daca3de7a6fc04adcd337da31c | [
"Apache-2.0"
] | null | null | null | numpyro/infer/sa.py | cerbelaut/numpyro | f28e17e43e4cd3daca3de7a6fc04adcd337da31c | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
from jax import device_put, lax, random, vmap
from jax.flatten_util import ravel_pytree
import jax.numpy as jnp
from jax.scipy.special import logsumexp
import numpyro.distributions as dist
from numpyro.distributions.util import cholesky_update
from numpyro.infer.mcmc import MCMCKernel
from numpyro.infer.util import init_to_uniform, initialize_model
from numpyro.util import identity
def _get_proposal_loc_and_scale(samples, loc, scale, new_sample):
# get loc/scale of q_{-n} (Algorithm 1, line 5 of ref [1]) for n from 1 -> N
# these loc/scale will be stacked to the first dim; so
# proposal_loc.shape[0] = proposal_loc.shape[0] = N
# Here, we use the numerical stability procedure in Appendix 6 of [1].
weight = 1 / samples.shape[0]
if scale.ndim > loc.ndim:
new_scale = cholesky_update(scale, new_sample - loc, weight)
proposal_scale = cholesky_update(new_scale, samples - loc, -weight)
proposal_scale = cholesky_update(proposal_scale, new_sample - samples, - (weight ** 2))
else:
var = jnp.square(scale) + weight * jnp.square(new_sample - loc)
proposal_var = var - weight * jnp.square(samples - loc)
proposal_var = proposal_var - weight ** 2 * jnp.square(new_sample - samples)
proposal_scale = jnp.sqrt(proposal_var)
proposal_loc = loc + weight * (new_sample - samples)
return proposal_loc, proposal_scale
def _sample_proposal(inv_mass_matrix_sqrt, rng_key, batch_shape=()):
eps = random.normal(rng_key, batch_shape + jnp.shape(inv_mass_matrix_sqrt)[:1])
if inv_mass_matrix_sqrt.ndim == 1:
r = jnp.multiply(inv_mass_matrix_sqrt, eps)
elif inv_mass_matrix_sqrt.ndim == 2:
r = jnp.matmul(inv_mass_matrix_sqrt, eps[..., None])[..., 0]
else:
raise ValueError("Mass matrix has incorrect number of dims.")
return r
SAAdaptState = namedtuple('SAAdaptState', ['zs', 'pes', 'loc', 'inv_mass_matrix_sqrt'])
SAState = namedtuple('SAState', ['i', 'z', 'potential_energy', 'accept_prob',
'mean_accept_prob', 'diverging', 'adapt_state', 'rng_key'])
"""
A :func:`~collections.namedtuple` used in Sample Adaptive MCMC.
This consists of the following fields:
- **i** - iteration. This is reset to 0 after warmup.
- **z** - Python collection representing values (unconstrained samples from
the posterior) at latent sites.
- **potential_energy** - Potential energy computed at the given value of ``z``.
- **accept_prob** - Acceptance probability of the proposal. Note that ``z``
does not correspond to the proposal if it is rejected.
- **mean_accept_prob** - Mean acceptance probability until current iteration
during warmup or sampling (for diagnostics).
- **diverging** - A boolean value to indicate whether the new sample potential energy
is diverging from the current one.
- **adapt_state** - A ``SAAdaptState`` namedtuple which contains adaptation information:
+ **zs** - Step size to be used by the integrator in the next iteration.
+ **pes** - Potential energies of `zs`.
+ **loc** - Mean of those `zs`.
+ **inv_mass_matrix_sqrt** - If using dense mass matrix, this is Cholesky of the
covariance of `zs`. Otherwise, this is standard deviation of those `zs`.
- **rng_key** - random number generator seed used for the iteration.
"""
def _numpy_delete(x, idx):
"""
Gets the subarray from `x` where data from index `idx` on the first axis is removed.
"""
# NB: numpy.delete is not yet available in JAX
mask = jnp.arange(x.shape[0] - 1) < idx
return jnp.where(mask.reshape((-1,) + (1,) * (x.ndim - 1)), x[:-1], x[1:])
# TODO: consider to expose this functional style
def _sa(potential_fn=None, potential_fn_gen=None):
wa_steps = None
max_delta_energy = 1000.
def init_kernel(init_params,
num_warmup,
adapt_state_size=None,
inverse_mass_matrix=None,
dense_mass=False,
model_args=(),
model_kwargs=None,
rng_key=random.PRNGKey(0)):
nonlocal wa_steps
wa_steps = num_warmup
pe_fn = potential_fn
if potential_fn_gen:
if pe_fn is not None:
raise ValueError('Only one of `potential_fn` or `potential_fn_gen` must be provided.')
else:
kwargs = {} if model_kwargs is None else model_kwargs
pe_fn = potential_fn_gen(*model_args, **kwargs)
rng_key_sa, rng_key_zs, rng_key_z = random.split(rng_key, 3)
z = init_params
z_flat, unravel_fn = ravel_pytree(z)
if inverse_mass_matrix is None:
inverse_mass_matrix = jnp.identity(z_flat.shape[-1]) if dense_mass else jnp.ones(z_flat.shape[-1])
inv_mass_matrix_sqrt = jnp.linalg.cholesky(inverse_mass_matrix) if dense_mass \
else jnp.sqrt(inverse_mass_matrix)
if adapt_state_size is None:
# XXX: heuristic choice
adapt_state_size = 2 * z_flat.shape[-1]
else:
assert adapt_state_size > 1, 'adapt_state_size should be greater than 1.'
# NB: mean is init_params
zs = z_flat + _sample_proposal(inv_mass_matrix_sqrt, rng_key_zs, (adapt_state_size,))
# compute potential energies
pes = lax.map(lambda z: pe_fn(unravel_fn(z)), zs)
if dense_mass:
cov = jnp.cov(zs, rowvar=False, bias=True)
if cov.shape == (): # JAX returns scalar for 1D input
cov = cov.reshape((1, 1))
cholesky = jnp.linalg.cholesky(cov)
# if cholesky is NaN, we use the scale from `sample_proposal` here
inv_mass_matrix_sqrt = jnp.where(jnp.any(jnp.isnan(cholesky)), inv_mass_matrix_sqrt, cholesky)
else:
inv_mass_matrix_sqrt = jnp.std(zs, 0)
adapt_state = SAAdaptState(zs, pes, jnp.mean(zs, 0), inv_mass_matrix_sqrt)
k = random.categorical(rng_key_z, jnp.zeros(zs.shape[0]))
z = unravel_fn(zs[k])
pe = pes[k]
sa_state = SAState(0, z, pe, 0., 0., False, adapt_state, rng_key_sa)
return device_put(sa_state)
def sample_kernel(sa_state, model_args=(), model_kwargs=None):
pe_fn = potential_fn
if potential_fn_gen:
pe_fn = potential_fn_gen(*model_args, **model_kwargs)
zs, pes, loc, scale = sa_state.adapt_state
# we recompute loc/scale after each iteration to avoid precision loss
# XXX: consider to expose a setting to do this job periodically
# to save some computations
loc = jnp.mean(zs, 0)
if scale.ndim == 2:
cov = jnp.cov(zs, rowvar=False, bias=True)
if cov.shape == (): # JAX returns scalar for 1D input
cov = cov.reshape((1, 1))
cholesky = jnp.linalg.cholesky(cov)
scale = jnp.where(jnp.any(jnp.isnan(cholesky)), scale, cholesky)
else:
scale = jnp.std(zs, 0)
rng_key, rng_key_z, rng_key_reject, rng_key_accept = random.split(sa_state.rng_key, 4)
_, unravel_fn = ravel_pytree(sa_state.z)
z = loc + _sample_proposal(scale, rng_key_z)
pe = pe_fn(unravel_fn(z))
pe = jnp.where(jnp.isnan(pe), jnp.inf, pe)
diverging = (pe - sa_state.potential_energy) > max_delta_energy
# NB: all terms having the pattern *s will have shape N x ...
# and all terms having the pattern *s_ will have shape (N + 1) x ...
locs, scales = _get_proposal_loc_and_scale(zs, loc, scale, z)
zs_ = jnp.concatenate([zs, z[None, :]])
pes_ = jnp.concatenate([pes, pe[None]])
locs_ = jnp.concatenate([locs, loc[None, :]])
scales_ = jnp.concatenate([scales, scale[None, ...]])
if scale.ndim == 2: # dense_mass
log_weights_ = dist.MultivariateNormal(locs_, scale_tril=scales_).log_prob(zs_) + pes_
else:
log_weights_ = dist.Normal(locs_, scales_).log_prob(zs_).sum(-1) + pes_
# mask invalid values (nan, +inf) by -inf
log_weights_ = jnp.where(jnp.isfinite(log_weights_), log_weights_, -jnp.inf)
# get rejecting index
j = random.categorical(rng_key_reject, log_weights_)
zs = _numpy_delete(zs_, j)
pes = _numpy_delete(pes_, j)
loc = locs_[j]
scale = scales_[j]
adapt_state = SAAdaptState(zs, pes, loc, scale)
# NB: weights[-1] / sum(weights) is the probability of rejecting the new sample `z`.
accept_prob = 1 - jnp.exp(log_weights_[-1] - logsumexp(log_weights_))
itr = sa_state.i + 1
n = jnp.where(sa_state.i < wa_steps, itr, itr - wa_steps)
mean_accept_prob = sa_state.mean_accept_prob + (accept_prob - sa_state.mean_accept_prob) / n
# XXX: we make a modification of SA sampler in [1]
# in [1], each MCMC state contains N points `zs`
# here we do resampling to pick randomly a point from those N points
k = random.categorical(rng_key_accept, jnp.zeros(zs.shape[0]))
z = unravel_fn(zs[k])
pe = pes[k]
return SAState(itr, z, pe, accept_prob, mean_accept_prob, diverging, adapt_state, rng_key)
return init_kernel, sample_kernel
# TODO: this shares almost the same code as HMC, so we can abstract out much of the implementation
class SA(MCMCKernel):
"""
Sample Adaptive MCMC, a gradient-free sampler.
This is a very fast (in term of n_eff / s) sampler but requires
many warmup (burn-in) steps. In each MCMC step, we only need to
evaluate potential function at one point.
Note that unlike in reference [1], we return a randomly selected (i.e. thinned)
subset of approximate posterior samples of size num_chains x num_samples
instead of num_chains x num_samples x adapt_state_size.
.. note:: We recommend to use this kernel with `progress_bar=False` in :class:`MCMC`
to reduce JAX's dispatch overhead.
**References:**
1. *Sample Adaptive MCMC* (https://papers.nips.cc/paper/9107-sample-adaptive-mcmc),
Michael Zhu
:param model: Python callable containing Pyro :mod:`~numpyro.primitives`.
If model is provided, `potential_fn` will be inferred using the model.
:param potential_fn: Python callable that computes the potential energy
given input parameters. The input parameters to `potential_fn` can be
any python collection type, provided that `init_params` argument to
:meth:`init` has the same type.
:param int adapt_state_size: The number of points to generate proposal
distribution. Defaults to 2 times latent size.
:param bool dense_mass: A flag to decide if mass matrix is dense or
diagonal (default to ``dense_mass=True``)
:param callable init_strategy: a per-site initialization function.
See :ref:`init_strategy` section for available functions.
"""
def __init__(self, model=None, potential_fn=None, adapt_state_size=None,
dense_mass=True, init_strategy=init_to_uniform):
if not (model is None) ^ (potential_fn is None):
raise ValueError('Only one of `model` or `potential_fn` must be specified.')
self._model = model
self._potential_fn = potential_fn
self._adapt_state_size = adapt_state_size
self._dense_mass = dense_mass
self._init_strategy = init_strategy
self._init_fn = None
self._postprocess_fn = None
self._sample_fn = None
def _init_state(self, rng_key, model_args, model_kwargs, init_params):
if self._model is not None:
init_params, potential_fn, postprocess_fn, _ = initialize_model(
rng_key,
self._model,
dynamic_args=True,
init_strategy=self._init_strategy,
model_args=model_args,
model_kwargs=model_kwargs)
init_params = init_params[0]
# NB: init args is different from HMC
self._init_fn, sample_fn = _sa(potential_fn_gen=potential_fn)
if self._postprocess_fn is None:
self._postprocess_fn = postprocess_fn
else:
self._init_fn, sample_fn = _sa(potential_fn=self._potential_fn)
if self._sample_fn is None:
self._sample_fn = sample_fn
return init_params
def init(self, rng_key, num_warmup, init_params=None, model_args=(), model_kwargs={}):
# non-vectorized
if rng_key.ndim == 1:
rng_key, rng_key_init_model = random.split(rng_key)
# vectorized
else:
rng_key, rng_key_init_model = jnp.swapaxes(vmap(random.split)(rng_key), 0, 1)
# we need only a single key for initializing PE / constraints fn
rng_key_init_model = rng_key_init_model[0]
init_params = self._init_state(rng_key_init_model, model_args, model_kwargs, init_params)
if self._potential_fn and init_params is None:
raise ValueError('Valid value of `init_params` must be provided with'
' `potential_fn`.')
# NB: init args is different from HMC
sa_init_fn = lambda init_params, rng_key: self._init_fn( # noqa: E731
init_params,
num_warmup=num_warmup,
adapt_state_size=self._adapt_state_size,
dense_mass=self._dense_mass,
rng_key=rng_key,
model_args=model_args,
model_kwargs=model_kwargs,
)
if rng_key.ndim == 1:
init_state = sa_init_fn(init_params, rng_key)
else:
init_state = vmap(sa_init_fn)(init_params, rng_key)
sample_fn = vmap(self._sample_fn, in_axes=(0, None, None))
self._sample_fn = sample_fn
return init_state
@property
def sample_field(self):
return 'z'
@property
def default_fields(self):
return ('z', 'diverging')
def get_diagnostics_str(self, state):
return 'acc. prob={:.2f}'.format(state.mean_accept_prob)
def postprocess_fn(self, args, kwargs):
if self._postprocess_fn is None:
return identity
return self._postprocess_fn(*args, **kwargs)
def sample(self, state, model_args, model_kwargs):
"""
Run SA from the given :data:`~numpyro.infer.mcmc.SAState` and return the resulting
:data:`~numpyro.infer.mcmc.SAState`.
:param SAState state: Represents the current state.
:param model_args: Arguments provided to the model.
:param model_kwargs: Keyword arguments provided to the model.
:return: Next `state` after running SA.
"""
return self._sample_fn(state, model_args, model_kwargs)
| 45.027356 | 110 | 0.649251 |
93b92fe6572d7003222a2e78e87453683b31ed1f | 4,859 | py | Python | src/python/nimbusml/internal/core/preprocessing/normalization/binner.py | michaelgsharp/NimbusML | 50031157265f49eec85d27fe67582d9ddaf01ef9 | [
"MIT"
] | 134 | 2018-11-01T22:15:24.000Z | 2019-05-04T11:30:08.000Z | src/python/nimbusml/internal/core/preprocessing/normalization/binner.py | michaelgsharp/NimbusML | 50031157265f49eec85d27fe67582d9ddaf01ef9 | [
"MIT"
] | 226 | 2019-05-07T19:00:44.000Z | 2021-01-06T07:59:48.000Z | src/python/nimbusml/internal/core/preprocessing/normalization/binner.py | michaelgsharp/NimbusML | 50031157265f49eec85d27fe67582d9ddaf01ef9 | [
"MIT"
] | 43 | 2019-05-15T20:19:42.000Z | 2022-03-30T10:26:07.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
Binner
"""
__all__ = ["Binner"]
from ....entrypoints.transforms_binnormalizer import transforms_binnormalizer
from ....utils.utils import trace
from ...base_pipeline_item import BasePipelineItem, DefaultSignature
class Binner(BasePipelineItem, DefaultSignature):
"""
Normalizes columns as specified below.
.. remarks::
In linear classification algorithms instances are viewed as vectors
in
multi-dimensional space. Since the range of values of raw data varies
widely, some objective functions do not work properly without
normalization. For example, if one of the features has a broad range
of
values, the distances between points is governed by this particular
feature. Therefore, the range of all features should be normalized so
that each feature contributes approximately proportionately to the
final
distance. This can provide significant speedup and accuracy benefits.
In
all the linear algorithms in nimbusml (:py:class:`Logistic Regression
<nimbusml.linear_model.LogisticRegressionClassifier>`,
:py:class:`Averaged Perceptron
<nimbusml.linear_model.AveragedPerceptronBinaryClassifier>`, etc.),
the default is to normalize features before training.
``Binner`` creates equi-density bins, and then normalizes every
value in the bin to be divided by the total number of bins. The
number of bins the normalizer uses can be defined by the user, and
the
default is 1000.
:param num_bins: Max number of bins, power of 2 recommended.
:param fix_zero: Whether to map zero to zero, preserving sparsity.
:param max_training_examples: Max number of examples used to train the
normalizer.
:param params: Additional arguments sent to compute engine.
.. seealso::
:py:class:`MinMaxScaler
<nimbusml.preprocessing.normalization.MinMaxScaler>`,
:py:class:`MeanVarianceScaler
<nimbusml.preprocessing.normalization.MeanVarianceScaler>`,
:py:class:`LogMeanVarianceScaler
<nimbusml.preprocessing.normalization.LogMeanVarianceScaler>`,
:py:class:`GlobalContrastRowScaler
<nimbusml.preprocessing.normalization.GlobalContrastRowScaler>`.
.. index:: normalize, preprocessing
Example:
.. literalinclude:: /../nimbusml/examples/Binner.py
:language: python
"""
@trace
def __init__(
self,
num_bins=1024,
fix_zero=True,
max_training_examples=1000000000,
**params):
BasePipelineItem.__init__(
self, type='transform', **params)
self.num_bins = num_bins
self.fix_zero = fix_zero
self.max_training_examples = max_training_examples
@property
def _entrypoint(self):
return transforms_binnormalizer
@trace
def _get_node(self, **all_args):
input_columns = self.input
if input_columns is None and 'input' in all_args:
input_columns = all_args['input']
if 'input' in all_args:
all_args.pop('input')
output_columns = self.output
if output_columns is None and 'output' in all_args:
output_columns = all_args['output']
if 'output' in all_args:
all_args.pop('output')
# validate input
if input_columns is None:
raise ValueError(
"'None' input passed when it cannot be none.")
if not isinstance(input_columns, list):
raise ValueError(
"input has to be a list of strings, instead got %s" %
type(input_columns))
# validate output
if output_columns is None:
output_columns = input_columns
if not isinstance(output_columns, list):
raise ValueError(
"output has to be a list of strings, instead got %s" %
type(output_columns))
algo_args = dict(
column=[
dict(
Source=i,
Name=o) for i,
o in zip(
input_columns,
output_columns)] if input_columns else None,
num_bins=self.num_bins,
fix_zero=self.fix_zero,
max_training_examples=self.max_training_examples)
all_args.update(algo_args)
return self._entrypoint(**all_args)
| 34.707143 | 94 | 0.617411 |
7a6f12f72b8aa90dd95b67209d7b47435edfc5c3 | 4,843 | py | Python | model/alexnet/alexnet.py | rawrawiz/ICNN | 145be374d690bdec32d3a358259584125d08f246 | [
"MIT"
] | null | null | null | model/alexnet/alexnet.py | rawrawiz/ICNN | 145be374d690bdec32d3a358259584125d08f246 | [
"MIT"
] | null | null | null | model/alexnet/alexnet.py | rawrawiz/ICNN | 145be374d690bdec32d3a358259584125d08f246 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import h5py
import math
import copy
import scipy.io as io
import numpy as np
from scipy.io import loadmat
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.alexnet.conv_mask import conv_mask
class alexnet(nn.Module):
def __init__(self, pretrain_path, label_num, dropoutrate, losstype):
super(alexnet, self).__init__()
self.pretrian_path = pretrain_path
self.dropoutrate = dropoutrate
self.label_num = label_num
self.losstype = losstype
self.conv1 = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=(11, 11), stride=(4, 4), padding=(0, 0)),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(5, alpha=0.00002, beta=0.75, k=1.0),)
self.maxpool1 = nn.Sequential(
nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(0, 0), dilation=(1, 1), ceil_mode=False), )
self.conv2 = nn.Sequential(
nn.Conv2d(96, 256, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(5, alpha=0.00002, beta=0.75, k=1.0),)
self.maxpool2 = nn.Sequential(
nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(0, 0), dilation=(1, 1), ceil_mode=False), )
self.conv3 = nn.Sequential(
nn.Conv2d(256, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(inplace=True),
nn.Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(inplace=True), )
self.mask1 = nn.Sequential(
conv_mask(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), labelnum=self.label_num, loss_type = self.losstype, ), )
self.maxpool3 = nn.Sequential(
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(0, 0), dilation=(1, 1), ceil_mode=False), )
self.mask2 = nn.Sequential(
conv_mask(256, 4096, kernel_size=(6, 6), stride=(1, 1), padding=(0, 0), labelnum=self.label_num, loss_type = self.losstype, ), )
self.relu = nn.Sequential(
nn.ReLU(inplace=True), )
self.line = nn.Sequential(
nn.Dropout2d(p=self.dropoutrate),
nn.Conv2d(4096, 4096, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),
nn.ReLU(inplace=True),
nn.Dropout2d(p=self.dropoutrate),
nn.Conv2d(4096, self.label_num, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)), )
self.init_weight()
def init_weight(self):
data = loadmat(self.pretrian_path)
w, b = data['layers'][0][0][0]['weights'][0][0]
self.conv1[0].weight.data.copy_(torch.from_numpy(w.transpose([3, 2, 0, 1])))
self.conv1[0].bias.data.copy_(torch.from_numpy(b.reshape(-1)))
w, b = data['layers'][0][4][0]['weights'][0][0]
w = w.transpose([3, 2, 0, 1])
w = np.concatenate((w, w), axis=1)
self.conv2[0].weight.data.copy_(torch.from_numpy(w))
self.conv2[0].bias.data.copy_(torch.from_numpy(b.reshape(-1)))
w, b = data['layers'][0][8][0]['weights'][0][0]
self.conv3[0].weight.data.copy_(torch.from_numpy(w.transpose([3, 2, 0, 1])))
self.conv3[0].bias.data.copy_(torch.from_numpy(b.reshape(-1)))
w, b = data['layers'][0][10][0]['weights'][0][0]
w = w.transpose([3, 2, 0, 1])
w = np.concatenate((w, w), axis=1)
self.conv3[2].weight.data.copy_(torch.from_numpy(w))
self.conv3[2].bias.data.copy_(torch.from_numpy(b.reshape(-1)))
w, b = data['layers'][0][12][0]['weights'][0][0]
w = w.transpose([3, 2, 0, 1])
w = np.concatenate((w, w), axis=1)
self.conv3[4].weight.data.copy_(torch.from_numpy(w))
self.conv3[4].bias.data.copy_(torch.from_numpy(b.reshape(-1)))
torch.nn.init.normal_(self.mask1[0].weight.data, mean=0, std=0.01)
torch.nn.init.normal_(self.mask2[0].weight.data, mean=0, std=0.01)
torch.nn.init.normal_(self.line[1].weight.data, mean=0, std=0.01)
torch.nn.init.zeros_(self.line[1].bias.data)
torch.nn.init.normal_(self.line[4].weight.data, mean=0, std=0.01)
torch.nn.init.zeros_(self.line[4].bias.data)
def forward(self, x, label, Iter, density):
x = self.conv1(x)
x = F.pad(x, (0, 1, 0, 1))
x = self.maxpool1(x)
x = self.conv2(x)
x = F.pad(x, (0, 1, 0, 1))
x = self.maxpool2(x)
x = self.conv3(x)
x = self.mask1[0](x, label, Iter, density)
x = self.maxpool3(x)
x = self.mask2[0](x, label, Iter, density)
x = self.relu(x)
x = self.line(x)
return x
| 42.113043 | 140 | 0.576915 |
6173010d8e1cb8f1b890d9048ac1f56279faf39b | 5,868 | py | Python | HW9- State Machines:MDPs/MDP.py | edsul/6.036---2020 | 0ce7a21171baecadff2df626cacc9c176c70329b | [
"MIT"
] | null | null | null | HW9- State Machines:MDPs/MDP.py | edsul/6.036---2020 | 0ce7a21171baecadff2df626cacc9c176c70329b | [
"MIT"
] | null | null | null | HW9- State Machines:MDPs/MDP.py | edsul/6.036---2020 | 0ce7a21171baecadff2df626cacc9c176c70329b | [
"MIT"
] | 1 | 2020-11-24T07:28:38.000Z | 2020-11-24T07:28:38.000Z | import numpy as np
class SM:
start_state = None
def transduce(self, input_seq):
'''input_seq: a list of inputs to feed into SM
returns: a list of outputs of SM'''
res=[]
for x in input_seq:
new_state= self.transition_fn(self.start_state,x)
self.start_state = new_state
res.append(self.output_fn(new_state))
return res
class Binary_Addition(SM):
start_state = (0,0) # Change
def transition_fn(self, s, x):
# Your code here
if x[0] + x[1] == 2:
if s[0]>0:
return (1,1)
return (1,0)
elif x[0] + x[1] == 0:
if s[0]>0:
return (0,1)
return (0,0)
else:
if s[0]>0:
return (1,0)
return (0,1)
def output_fn(self, s):
# Your code here
return s[-1]
class Binary_Addition(SM):
start_state = (0,0) # Change
def transition_fn(self, s, x):
# Your code here
if x[0] + x[1] == 2:
if s[0]>0:
return (1,1)
return (1,0)
elif x[0] + x[1] == 0:
if s[0]>0:
return (0,1)
return (0,0)
else:
if s[0]>0:
return (1,0)
return (0,1)
def output_fn(self, s):
# Your code here
return s[-1]
class RNN(SM):
def __init__(self, Wsx, Wss, Wo, Wss_0, Wo_0, f1, f2, start_state):
self.Wsx=Wsx
self.Wss=Wss
self.Wo=Wo
self.Wss_0=Wss_0
self.Wo_0=Wo_0
self.f1=f1
self.f2=f2
self.start_state=start_state
pass
def transition_fn(self, s, x):
# Your code here
return self.f1(self.Wss@s + self.Wsx@x + self.Wss_0)
def output_fn(self, s):
# Your code here
return self.f2(self.Wo@s + self.Wo_0)
#Accumulator RNN
# Wsx = np.array([[1]]) # Your code here
# Wss = np.array([[1]]) # Your code here
# Wo = np.array([[1]]) # Your code here
# Wss_0 = np.array([[0]]) # Your code here
# Wo_0 = np.array([[0]]) # Your code here
# f1 = lambda x : x# Your code here, e.g. lambda x : x
# f2 = lambda x: np.sign(x) # Your code here
# start_state = np.array([[0]]) # Your code here
# acc_sign = RNN(Wsx, Wss, Wo, Wss_0, Wo_0, f1, f2, start_state)
#Autoregression RNN
# Wsx = np.zeros(shape=(3,1)) # Your code here
# Wss = np.array([[1,-2,3],[1,0,0],[0,1,0]]) # Your code here
# Wo = np.array([[1,0,0]]) # Your code here
# Wss_0 = np.zeros(shape=(3,1)) # Your code here
# Wo_0 = 0 # Your code here
# f1 = lambda x: x # Your code here, e.g. lambda x : x
# f2 = lambda x: x # Your code here
# start_state = np.array([[-2,0,0]]).T #(1,3) # Your code here
# auto = RNN(Wsx, Wss, Wo, Wss_0, Wo_0, f1, f2, start_state)
def value(q, s):
""" Return Q*(s,a) based on current Q
>>> q = TabularQ([0,1,2,3],['b','c'])
>>> q.set(0, 'b', 5)
>>> q.set(0, 'c', 10)
>>> q_star = value(q,0)
>>> q_star
10
"""
res=[]
for a in q.actions:
res.append(q.get(s,a))
return max(res)
def greedy(q, s):
""" Return pi*(s) based on a greedy strategy.
>>> q = TabularQ([0,1,2,3],['b','c'])
>>> q.set(0, 'b', 5)
>>> q.set(0, 'c', 10)
>>> q.set(1, 'b', 2)
>>> greedy(q, 0)
'c'
>>> greedy(q, 1)
'b'
"""
# Your code here
res=[]
for a in (q.actions):
res.append(q.get(s,a))
index=np.argmax(res)
return q.actions[index]
def epsilon_greedy(q, s, eps = 0.5):
""" Returns an action.
>>> q = TabularQ([0,1,2,3],['b','c'])
>>> q.set(0, 'b', 5)
>>> q.set(0, 'c', 10)
>>> q.set(1, 'b', 2)
>>> eps = 0.
>>> epsilon_greedy(q, 0, eps) #greedy
'c'
>>> epsilon_greedy(q, 1, eps) #greedy
'b'
"""
if random.random() < eps:# True with prob eps, random action
# Your code here
return uniform_dist(q.actions).draw()
else:
return greedy(q,s)
# Your code here
##
# Make sure to copy the Q function between iterations, e.g. new_q = q.copy(), so that you are only using Q-values from the previous iteration.
# The q parameter contains the initialization of the Q function.
# The value function is already defined.
# Use mdp class definitions to get
# the reward function: reward_fn,
# the discount factor: discount_factor,
# transition model: transition_model,
# expectation of the Q-values over a distribution: transition_model(s,a).expectation.
def value_iteration(mdp, q, eps = 0.01, max_iters=1000):
# Your code here
count=0
states=q.states
actions=q.actions
while count<=max_iters:
count+=1
q_old=q.copy()
for s in states:
for a in actions:
dist=mdp.transition_model(s,a)
probs= dist.getAllProbs()
sum=0
for s_,p in probs:
x = p*value(q_old,s_)
sum+=x
q.set(s,a,mdp.reward_fn(s,a)+mdp.discount_factor*sum)
_max=-9**99
for s in states:
for a in actions:
_max=np.maximum(_max,np.abs(q_old.get(s,a)- q.get(s,a)))
if _max<eps:
break
return q
def q_em(mdp, s, a, h):
# Your code here
if h==0:
return 0
else:
sum=0
dist=mdp.transition_model(s,a)
probs= dist.getAllProbs()
for st,p in probs:
max_= -10**9
for a in mdp.actions:
x=q_em(mdp, st, a, h-1)
max_= np.maximum(max_,x)
sum+=p*max_
return mdp.reward_fn(s,a)+mdp.discount_factor*sum
| 28.485437 | 142 | 0.503579 |
306230b58df77ef40162ce2ca414f9647d418b6e | 9,643 | py | Python | orcid_api_v3/models/researcher_url_v30_rc2.py | tenet-ac-za/NZ-ORCID-Hub | f1183fbb94509b102fa58d7812ed33d8f35c5d4d | [
"MIT"
] | 15 | 2017-02-06T01:41:57.000Z | 2021-07-22T08:53:40.000Z | orcid_api_v3/models/researcher_url_v30_rc2.py | tenet-ac-za/NZ-ORCID-Hub | f1183fbb94509b102fa58d7812ed33d8f35c5d4d | [
"MIT"
] | 82 | 2017-03-23T00:30:04.000Z | 2022-02-01T00:10:34.000Z | orcid_api_v3/models/researcher_url_v30_rc2.py | tenet-ac-za/NZ-ORCID-Hub | f1183fbb94509b102fa58d7812ed33d8f35c5d4d | [
"MIT"
] | 6 | 2017-03-23T07:26:05.000Z | 2021-02-23T11:20:21.000Z | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v30_rc2 import CreatedDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc2 import LastModifiedDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.source_v30_rc2 import SourceV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.url_v30_rc2 import UrlV30Rc2 # noqa: F401,E501
class ResearcherUrlV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30Rc2',
'last_modified_date': 'LastModifiedDateV30Rc2',
'source': 'SourceV30Rc2',
'url_name': 'str',
'url': 'UrlV30Rc2',
'visibility': 'str',
'path': 'str',
'put_code': 'int',
'display_index': 'int'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'url_name': 'url-name',
'url': 'url',
'visibility': 'visibility',
'path': 'path',
'put_code': 'put-code',
'display_index': 'display-index'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, url_name=None, url=None, visibility=None, path=None, put_code=None, display_index=None): # noqa: E501
"""ResearcherUrlV30Rc2 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._url_name = None
self._url = None
self._visibility = None
self._path = None
self._put_code = None
self._display_index = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if url_name is not None:
self.url_name = url_name
if url is not None:
self.url = url
if visibility is not None:
self.visibility = visibility
if path is not None:
self.path = path
if put_code is not None:
self.put_code = put_code
if display_index is not None:
self.display_index = display_index
@property
def created_date(self):
"""Gets the created_date of this ResearcherUrlV30Rc2. # noqa: E501
:return: The created_date of this ResearcherUrlV30Rc2. # noqa: E501
:rtype: CreatedDateV30Rc2
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this ResearcherUrlV30Rc2.
:param created_date: The created_date of this ResearcherUrlV30Rc2. # noqa: E501
:type: CreatedDateV30Rc2
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this ResearcherUrlV30Rc2. # noqa: E501
:return: The last_modified_date of this ResearcherUrlV30Rc2. # noqa: E501
:rtype: LastModifiedDateV30Rc2
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this ResearcherUrlV30Rc2.
:param last_modified_date: The last_modified_date of this ResearcherUrlV30Rc2. # noqa: E501
:type: LastModifiedDateV30Rc2
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this ResearcherUrlV30Rc2. # noqa: E501
:return: The source of this ResearcherUrlV30Rc2. # noqa: E501
:rtype: SourceV30Rc2
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this ResearcherUrlV30Rc2.
:param source: The source of this ResearcherUrlV30Rc2. # noqa: E501
:type: SourceV30Rc2
"""
self._source = source
@property
def url_name(self):
"""Gets the url_name of this ResearcherUrlV30Rc2. # noqa: E501
:return: The url_name of this ResearcherUrlV30Rc2. # noqa: E501
:rtype: str
"""
return self._url_name
@url_name.setter
def url_name(self, url_name):
"""Sets the url_name of this ResearcherUrlV30Rc2.
:param url_name: The url_name of this ResearcherUrlV30Rc2. # noqa: E501
:type: str
"""
self._url_name = url_name
@property
def url(self):
"""Gets the url of this ResearcherUrlV30Rc2. # noqa: E501
:return: The url of this ResearcherUrlV30Rc2. # noqa: E501
:rtype: UrlV30Rc2
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this ResearcherUrlV30Rc2.
:param url: The url of this ResearcherUrlV30Rc2. # noqa: E501
:type: UrlV30Rc2
"""
self._url = url
@property
def visibility(self):
"""Gets the visibility of this ResearcherUrlV30Rc2. # noqa: E501
:return: The visibility of this ResearcherUrlV30Rc2. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this ResearcherUrlV30Rc2.
:param visibility: The visibility of this ResearcherUrlV30Rc2. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
@property
def path(self):
"""Gets the path of this ResearcherUrlV30Rc2. # noqa: E501
:return: The path of this ResearcherUrlV30Rc2. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this ResearcherUrlV30Rc2.
:param path: The path of this ResearcherUrlV30Rc2. # noqa: E501
:type: str
"""
self._path = path
@property
def put_code(self):
"""Gets the put_code of this ResearcherUrlV30Rc2. # noqa: E501
:return: The put_code of this ResearcherUrlV30Rc2. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this ResearcherUrlV30Rc2.
:param put_code: The put_code of this ResearcherUrlV30Rc2. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def display_index(self):
"""Gets the display_index of this ResearcherUrlV30Rc2. # noqa: E501
:return: The display_index of this ResearcherUrlV30Rc2. # noqa: E501
:rtype: int
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""Sets the display_index of this ResearcherUrlV30Rc2.
:param display_index: The display_index of this ResearcherUrlV30Rc2. # noqa: E501
:type: int
"""
self._display_index = display_index
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResearcherUrlV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResearcherUrlV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.221212 | 182 | 0.604998 |
dd95c38f305805aa28f3b75472d7b160ad40264f | 496 | py | Python | src/stack-hci/azext_stack_hci/vendored_sdks/azurestackhci/version.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 2 | 2021-06-05T17:51:26.000Z | 2021-11-17T11:17:56.000Z | src/stack-hci/azext_stack_hci/vendored_sdks/azurestackhci/version.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 3 | 2020-05-27T20:16:26.000Z | 2020-07-23T19:46:49.000Z | src/stack-hci/azext_stack_hci/vendored_sdks/azurestackhci/version.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 5 | 2020-05-09T17:47:09.000Z | 2020-10-01T19:52:06.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
VERSION = "1.0.0rc1"
| 35.428571 | 76 | 0.522177 |
c241062bcd86adf95556e7b4efec0959078c05eb | 1,560 | py | Python | freecad/cadquery2workbench/examples/FreeCAD/Ex023_Sweep.py | jpmlt/freecad-cadquery2-workbench | 6c488df2b8bfb767c0a02bbf8e4de9d00fc114e8 | [
"Apache-2.0"
] | 14 | 2021-12-02T23:38:48.000Z | 2022-03-24T13:23:33.000Z | freecad/cadquery2workbench/examples/FreeCAD/Ex023_Sweep.py | jpmlt/freecad-cadquery2-workbench | 6c488df2b8bfb767c0a02bbf8e4de9d00fc114e8 | [
"Apache-2.0"
] | 2 | 2022-01-19T03:40:35.000Z | 2022-03-05T00:38:10.000Z | freecad/cadquery2workbench/examples/FreeCAD/Ex023_Sweep.py | jpmlt/freecad-cadquery2-workbench | 6c488df2b8bfb767c0a02bbf8e4de9d00fc114e8 | [
"Apache-2.0"
] | null | null | null | import cadquery as cq
# Points we will use to create spline and polyline paths to sweep over
pts = [(0, 1), (1, 2), (2, 4)]
# Spline path generated from our list of points (tuples)
path = cq.Workplane("XZ").spline(pts)
# Sweep a circle with a diameter of 1.0 units along the spline path we just created
defaultSweep = cq.Workplane("XY").circle(1.0).sweep(path)
# Sweep defaults to making a solid and not generating a Frenet solid. Setting Frenet to True helps prevent creep in
# the orientation of the profile as it is being swept
frenetShell = cq.Workplane("XY").circle(1.0).sweep(path, makeSolid=True, isFrenet=True)
# We can sweep shapes other than circles
defaultRect = cq.Workplane("XY").rect(1.0, 1.0).sweep(path)
# Switch to a polyline path, but have it use the same points as the spline
path = cq.Workplane("XZ").polyline(pts, includeCurrent=True)
# Using a polyline path leads to the resulting solid having segments rather than a single swept outer face
plineSweep = cq.Workplane("XY").circle(1.0).sweep(path)
# Switch to an arc for the path
path = cq.Workplane("XZ").threePointArc((1.0, 1.5), (0.0, 1.0))
# Use a smaller circle section so that the resulting solid looks a little nicer
arcSweep = cq.Workplane("XY").circle(0.5).sweep(path)
# Translate the resulting solids so that they do not overlap and display them left to right
show_object(defaultSweep)
show_object(frenetShell.translate((5, 0, 0)))
show_object(defaultRect.translate((10, 0, 0)))
show_object(plineSweep.translate((15, 0, 0)))
show_object(arcSweep.translate((20, 0, 0)))
| 42.162162 | 115 | 0.742949 |
db20a12193d6fbdea27e1c318d3adfaae0053a38 | 447 | py | Python | 4 OOP/3_dataclass_car.py | pranjal-teaching/CP1890 | e3f238de65c0b67df229a9e04fc8b271ef7fe795 | [
"MIT"
] | null | null | null | 4 OOP/3_dataclass_car.py | pranjal-teaching/CP1890 | e3f238de65c0b67df229a9e04fc8b271ef7fe795 | [
"MIT"
] | null | null | null | 4 OOP/3_dataclass_car.py | pranjal-teaching/CP1890 | e3f238de65c0b67df229a9e04fc8b271ef7fe795 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
@dataclass
class Car:
make:str
model:str
year:int
def drive(self):
print(f"Driving my {self.year} {self.make} {self.model}.")
def to_string(self):
return f'Car {self.make} {self.model} {self.year}'
corolla2015 = Car('DC_Toyota', 'Corolla', 2015)
# print(corolla2015.make)
corolla2015.drive()
# print(corolla2015.to_string()) # will this print???
Car.drive(corolla2015) #
| 20.318182 | 66 | 0.668904 |
f637400779dea154f4d159e56a0e5d94cd7994ba | 16,268 | py | Python | back-end/social_network_analyzer/twitterSNA1-v2.1.py | tzamalisp/saint-open-source-tool-for-cyberthreats-monitoring | c30e6da5358555d06413541b6d3893c62a475368 | [
"MIT"
] | null | null | null | back-end/social_network_analyzer/twitterSNA1-v2.1.py | tzamalisp/saint-open-source-tool-for-cyberthreats-monitoring | c30e6da5358555d06413541b6d3893c62a475368 | [
"MIT"
] | null | null | null | back-end/social_network_analyzer/twitterSNA1-v2.1.py | tzamalisp/saint-open-source-tool-for-cyberthreats-monitoring | c30e6da5358555d06413541b6d3893c62a475368 | [
"MIT"
] | null | null | null | from pymongo import MongoClient
import pandas as pd
from collections import Counter
# NLP libraries
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
import string
import csv
import json
# from datetime import datetime
import datetime
from collections import deque
"""TIME SERIES DESCRIPTIVE ANALYSIS SECTION"""
"""TIME SERIES DESCRIPTIVE ANALYSIS - BUG BOUNTY HASHTAGS"""
# Function for Data Analysis and CSV file creation
def findHashtagsTimeSeriesBugBounty():
print("Finding tweets with #bugbounty hashtag from Database.")
print('Querying database and retrieving the data.')
# Mongo Shell query
# db.twitterQuery2.find({'entities.hashtags.text': {$regex:"ransomware",$options:"$i"}}, {'created_at': 1, '_id':0})
# creating query + projection for MongoDB
query = {'entities.hashtags.text': {'$regex':'bugbounty', '$options': 'i'}}
projection = {'created_at': 1, '_id': 0}
# running query
try:
cursor = twitterOutput1.find(query, projection)
# cursor = cursor.limit(2)
except Exception as e:
print("Unexpected error:", type(e), e)
# Listing dates coming from tweets for storing later the corresponding query in a CSV file
datesQuery = []
for doc in cursor:
# print(doc['created_at'])
datesQuery.append(doc['created_at'])
"""
TIME SERIES ANALYSIS PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating Time Series:')
# a list of "1" to count the hashtags
ones = [1] * len(datesQuery)
# the index of the series
idx = pd.DatetimeIndex(datesQuery)
# print('idx:')
# print(idx)
# the actual series (at series of 1s for the moment)
timeSeries01 = pd.Series(ones, index=idx)
print(timeSeries01.head())
print("Counting tweets per day - executing descriptive analysis - Re-sampling / Bucketing..")
# Resampling / bucketing
per_day = timeSeries01.resample('1D').sum().fillna(0)
print('Time Series created:')
print(per_day.head())
print('Creating data frame..')
s = pd.DataFrame(per_day)
print('Data frame:')
print(s.head())
print('Writing CSV file..')
s.to_csv('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesBugBounty.csv')
print('Writing Bug Bounty Time Series Descriptive Analysis CSV file completed!')
# function for converting CSV to JSON
def csvToJsonTimeSeriesBugBounty():
print('Starting CSV to JSON conversion.')
print('Data file processing..')
jsonTimeSeries = []
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesBugBounty.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
for row in readCSV:
row[0] = row[0] + ' 14:00:00.000'
datetimeObject = datetime.datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S.%f')
millisec = datetimeObject.timestamp() * 1000
row[0] = millisec
row[1] = int(float(row[1]))
# print(row)
jsonTimeSeries.append(row)
# removing the head (first object) with not useful data - Data cleaning
del jsonTimeSeries[0]
# print('New file --> Time Series:')
# print(jsonTimeSeries)
print('Writing JSON file..')
with open('/var/www/html/saint/twitterSNA-Aug17/perdayTimeSeriesBugBounty.json', 'w') as file:
json.dump(jsonTimeSeries, file, indent=4)
print('Writing Time Series Bug Bounty JSON file completed!')
print()
print('Next:')
"""CATEGORICAL ANALYSIS SECTION"""
"""CATEGORICAL ANALYSIS - MOST FREQUENT HASHTAGS"""
# Function for Categorical Analysis and CSV file creation
def findMostFrequentHashtags():
print("Finding tweets with included hashtags from the Database, over the last 7 days.")
print('Querying database and retrieving the data.')
# computing the datetime now - 7 days ago
sevenDaysAgo = datetime.datetime.utcnow() - datetime.timedelta(days=7)
# Mongo Shell query
# db.twitterQuery2.find({'entities.hashtags.text': {$exists : true}}, {'entities.hashtags.text': 1, '_id': 0}).limit(5)
# creating query + projection for MongoDB
query = {'$and': [{'entities.hashtags.text': {'$exists': 'true'}}, {'mongoDate': {'$gte': sevenDaysAgo}}]}
projection = {'entities.hashtags.text': 1, '_id': 0}
# running query
cursor = []
try:
cursor = twitterOutput1.find(query, projection)
# cursor = cursor.limit(20)
except Exception as e:
print("Unexpected error:", type(e), e)
print("Finding used hashtags frequency..")
# Listing countered hashtags coming from tweets for storing later the corresponding query in a CSV file
countAllHashtags = Counter()
hashtagsList = []
for doc in cursor:
hashtagsKey = doc['entities']['hashtags']
for item in hashtagsKey:
# print(item['text'])
hashtagsList.append(('#' + item['text'].lower()))
# countAllHashtags.update(item['text'])
# print(hashtagsList)
countAllHashtags.update(hashtagsList)
print('Most 10 frequently used hashtags:')
print(countAllHashtags.most_common(10))
"""
CATEGORICAL ANALYSIS (BAR-PLOT) PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating data frame:')
hash_freq = countAllHashtags.most_common(10)
hash = pd.DataFrame(hash_freq)
hash.set_index(0, inplace=True)
print('Data frame:')
print(hash.head())
print('Writing CSV file..')
hash.to_csv('/var/www/html/saint/twitterSNA-Aug17/hashtagsMarkets.csv')
print('Writing Hashtags Categorical Analysis to CSV file completed!')
# function for converting CSV to JSON
def csvToJsonHashtags():
print('Starting CSV to JSON conversion.')
print('Data file processing..')
jsonBarPlotsHashtags = []
with open('/var/www/html/saint/twitterSNA-Aug17/hashtagsMarkets.csv') as csvfileHash:
readCSVHash = csv.reader(csvfileHash, delimiter=',')
next(readCSVHash)
for row in readCSVHash:
row[1] = int(row[1])
jsonBarPlotsHashtags.append(row)
# print('New file --> Hashtags Bar-Plots:')
# print(jsonBarPlotsHashtags)
print('Writing JSON file..')
with open('/var/www/html/saint/twitterSNA-Aug17/hashtagsMarkets.json', 'w') as file:
json.dump(jsonBarPlotsHashtags, file, indent=4)
print('Writing Hashtags Bar-Plots JSON file completed!')
print()
print('Next:')
"""CATEGORICAL ANALYSIS - MOST FREQUENT MENTIONS"""
# Function for Categorical Analysis and CSV file creation
def findMostFrequentMentions():
print("Finding tweets with included mentions from the Database, over the last 7 days.")
print('Querying database and retrieving the data.')
# computing the datetime now - 7 days ago
sevenDaysAgo = datetime.datetime.utcnow() - datetime.timedelta(days=7)
# Mongo Shell query
# db.twitterQuery2.find({'entities.user_mentions.screen_name': {$exists : true}}, {'entities.user_mentions.screen_name': 1, '_id': 0}).limit(5)
# creating query + projection for MongoDB
query = {'$and': [{'entities.user_mentions.screen_name': {'$exists': 'true'}}, {'mongoDate': {'$gte': sevenDaysAgo}}]}
projection = {'entities.user_mentions.screen_name': 1, '_id': 0}
# running query
cursor = []
try:
cursor = twitterOutput1.find(query, projection)
# cursor = cursor.limit(20)
except Exception as e:
print("Unexpected error:", type(e), e)
print("Finding used mentions frequency..")
# Listing countered mentions coming from tweets for storing later the corresponding query in a CSV file
countAllMentions = Counter()
mentionsList = []
for doc in cursor:
screenNameKey = doc['entities']['user_mentions']
for item in screenNameKey:
# print(item['screen_name'])
mentionsList.append(item['screen_name'])
# countAllMentions.update(item['screen_name'])
# print(mentionsList)
countAllMentions.update(mentionsList)
print('Most 10 frequently used mentions:')
print(countAllMentions.most_common(10))
"""
CATEGORICAL ANALYSIS (BAR-PLOT) PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating data frame:')
mentions_freq = countAllMentions.most_common(10)
mentions = pd.DataFrame(mentions_freq)
mentions.set_index(0, inplace=True)
print('Data frame:')
print(mentions.head())
print('Writing CSV file..')
mentions.to_csv('/var/www/html/saint/twitterSNA-Aug17/mentionsMarkets.csv')
print('Writing Mentions Categorical Analysis to CSV file completed!')
# function for converting CSV to JSON
def csvToJsonMentions():
print('Starting CSV to JSON conversion.')
print('Data file processing..')
jsonBarPlotsMentions = []
with open('/var/www/html/saint/twitterSNA-Aug17/mentionsMarkets.csv') as csvfileMentions:
readCSVMentions = csv.reader(csvfileMentions, delimiter=',')
next(readCSVMentions)
for row in readCSVMentions:
row[1] = int(row[1])
jsonBarPlotsMentions.append(row)
# print('New file --> Mentions Bar-Plots:')
# print(jsonBarPlotsMentions)
print('Writing JSON file..')
with open('/var/www/html/saint/twitterSNA-Aug17/mentionsMarkets.json', 'w') as file:
json.dump(jsonBarPlotsMentions, file, indent=4)
print('Writing Mentions Bar-Plots JSON file completed!')
print()
print('Next:')
"""CATEGORICAL ANALYSIS - MOST FREQUENT TERMS"""
# function for tweet processing
def process(text, tokenizer=TweetTokenizer(), stopwords=[]):
"""Process the text of a tweet:
1. Lowercase
2. Tokenize
3. Stopword removal
4. Digits removal
5. Return list
"""
textLowercase = text.lower()
textLowercase = textLowercase
tokens = tokenizer.tokenize(textLowercase)
return [tok for tok in tokens if tok not in stopwords and not tok.isdigit() and not tok.startswith(('#', '@', 'http', 'https'))]
# function for splitting contracted forms of two separate tokens
def normalize_contractions(tokens):
token_map = {
"i'm": "i am",
"you're": "you are",
"it's": "it is",
"we're": "we are",
"we'll": "we will",
}
for tok in tokens:
if tok in token_map.keys():
for item in token_map[tok].split():
yield item
else:
yield tok
# Function for Categorical Analysis and CSV file creation
def findMostFrequentTerms():
print("Finding tweets with included terms from the Database, over the last 7 days.")
print('Querying database and retrieving the data.')
# computing the datetime now - 7 days ago
sevenDaysAgo = datetime.datetime.utcnow() - datetime.timedelta(days=7)
# creating query + projection for MongoDB
query = {'$and': [{'text': {'$exists': 'true'}}, {'mongoDate': {'$gte': sevenDaysAgo}}]}
projection = {'text': 1, '_id': 0}
# running query
cursor = []
try:
cursor = twitterOutput1.find(query, projection)
# cursor = cursor.limit(50)
except Exception as e:
print("Unexpected error:", type(e), e)
print("Finding used terms frequency..")
# Listing countered terms coming from tweets for storing later the corresponding query in a CSV file
termsList = []
countAllTerms = Counter()
tweetTokenizer = TweetTokenizer()
punct = list(string.punctuation)
stopwordList = stopwords.words('english') + punct + ['rt', 'via', '…', '...', '..', 'yang', "i'm",
'one', 'like', 'de', 'la', 'le', 'les', 'et', 'que', 'en',
'qui', 'un', 'des', 'pour', 'une', 'ce', 'pas', 'avec',
'est', 'sur', 'se', 'du', 'dans', 'el', "c'est", "don't",
'vous', 'il', 'di', 'ne', 'sont', 'fs', 'au', 'aku', 'dan',
'love', 'yg', 'ada', 'tidak', 'dm', 'ya', 'es', 'kamu',
'lebih', 'son', 'par', 'naruto', 'jika', 'kau', 'dia', 'te',
'ft', 'dari', 'bisa', 'f', 'v', 'ou', 'al', 'una', 'im',
"i'll", 'con', 'tu', 'zaif', 'apa', 'us', 'pada', 'mau',
'ou', 'oh', 'e', 'u', 'si', 'itu', "you're", "you re", 'ga',
'je', 'las', 'b', 'h', 'die', 'ini', 'ont', 'c', 'l', 'r',
'jangan', 'akan', ':/', 'karena', 'dont', 'ass', 'kita',
'tak', "that's", 'untuk', 'dalam', 'lagi', 'it', 'adalah',
'orang', 'visit', "can't", 'cant', 'know', "it's", 'get',
'burp', 'jenkins', 'using', 'time', 'condoms', 'condom',
'epic', 'hi', 'new', '->', 'tab']
for doc in cursor:
tokens = ''
doc['text'] = doc['text'].encode('ascii', 'ignore')
# print(doc['text'])
try:
tokens = process(text=doc['text'], tokenizer=tweetTokenizer, stopwords=stopwordList)
# print(tokens)
except Exception as exceptionTweet:
print('Error! Not valid term:', exceptionTweet)
countAllTerms.update(tokens)
print('Most 10 frequently used terms:')
print(countAllTerms.most_common(10))
"""
CATEGORICAL ANALYSIS (BAR-PLOT) PANDAS SECTION
"""
print('Starting data analysis with Pandas.')
print('Creating data frame:')
terms_freq = countAllTerms.most_common(10)
terms = pd.DataFrame(terms_freq)
terms.set_index(0, inplace=True)
print('Data frame:')
print(terms.head())
print('Writing CSV file..')
terms.to_csv('/var/www/html/saint/twitterSNA-Aug17/termsMarkets.csv')
print('Writing Terms Categorical Analysis to CSV file completed!')
# function for converting CSV to JSON
def csvToJsonTerms():
print('Starting CSV to JSON conversion.')
print('Data file processing..')
jsonBarPlotsTerms = []
with open('/var/www/html/saint/twitterSNA-Aug17/termsMarkets.csv') as csvfileTerms:
readCSVTerms = csv.reader(csvfileTerms, delimiter=',')
next(readCSVTerms)
for row in readCSVTerms:
row[1] = int(row[1])
jsonBarPlotsTerms.append(row)
# print('New file --> Terms Bar-Plots:')
# print(jsonBarPlotsTerms)
print('Writing JSON file..')
with open('/var/www/html/saint/twitterSNA-Aug17/termsMarkets.json', 'w') as file:
json.dump(jsonBarPlotsTerms, file, indent=4)
print('Writing Terms Bar Plots to JSON file completed!')
"""MAIN FUNCTION"""
if __name__ == '__main__':
# current Datetime the process is running
now = datetime.datetime.now()
print('Time now:', now)
utcnow = datetime.datetime.utcnow()
print('Time now in UTC:', utcnow)
# connect to database
connection = MongoClient('XXX.XXX.XXX.XXX', 27017)
db = connection.admin
db.authenticate('xxxxxx', 'xxxXXXxxxXX')
# find the db
twitterDB = connection.twitter
# find the right collection
twitterOutput1 = twitterDB.twitterQuery1
print("Database connection successful..")
print()
print('TIME SERIES DESCRIPTIVE ANALYSIS - BUG BOUNTY HASHTAGS')
findHashtagsTimeSeriesBugBounty()
csvToJsonTimeSeriesBugBounty()
print()
# print('CATEGORICAL ANALYSIS - MOST FREQUENT HASHTAGS')
# findMostFrequentHashtags()
# csvToJsonHashtags()
# print()
# print('CATEGORICAL ANALYSIS - MOST FREQUENT MENTIONS')
# findMostFrequentMentions()
# csvToJsonMentions()
# print()
# print('CATEGORICAL ANALYSIS - MOST FREQUENT TERMS')
# findMostFrequentTerms()
# csvToJsonTerms()
print('Process completed!')
| 37.226545 | 147 | 0.61495 |
bf4955dd17acbf427dcc8c19b8d7e9cb2ea4d6d5 | 15,982 | py | Python | bin/ADFRsuite/CCSBpckgs/DejaVu2/GleObjects.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | null | null | null | bin/ADFRsuite/CCSBpckgs/DejaVu2/GleObjects.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | null | null | null | bin/ADFRsuite/CCSBpckgs/DejaVu2/GleObjects.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | 1 | 2021-11-04T21:48:14.000Z | 2021-11-04T21:48:14.000Z | ################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. Michel F. Sanner and TSRI 2016
##
################################################################################
#############################################################################
#
# Author: Michel F. SANNER, Sophie Coon
#
# Copyright: M. Sanner TSRI 2000
#
#############################################################################
# $Header: /mnt/raid/services/cvs/DejaVu2/GleObjects.py,v 1.1.1.1.4.1 2017/07/13 22:28:32 annao Exp $
#
# $Id: GleObjects.py,v 1.1.1.1.4.1 2017/07/13 22:28:32 annao Exp $
#
DEBUG = False
try:
import gle
except:
if DEBUG:
print 'Sorry you need the GLE extension module'
from DejaVu2.viewerFns import checkKeywords
from DejaVu2.Geom import Geom
from DejaVu2.triangle_strip import Triangle_strip
from opengltk.OpenGL import GL
import numpy
class GleObject(Triangle_strip):
keywords = Triangle_strip.keywords + [
'normalStyle',
'joinStyle'
]
def __init__(self, name=None, check=1, **kw):
self.normalStyle = gle.TUBE_NORM_PATH_EDGE
self.joinStyle = gle.TUBE_JN_ANGLE
apply( Triangle_strip.__init__, (self, name, check), kw)
def Set(self, check=1, redo=1, updateOwnGui=True, **kw):
"""set data for this object:
check=1 : verify that all the keywords present can be handle by this func
redo=1 : append self to viewer.objectsNeedingRedo
updateOwnGui=True : allow to update owngui at the end this func
"""
if kw.has_key('materials') and kw['materials'] is not None:
materials = numpy.array((kw['materials']),'f')
else:
materials = numpy.array(((0.,0.,1.,1.),),'f')
redoFlags = apply( Triangle_strip.Set, (self, check, 0), kw )
nm = kw.get( 'normalStyle')
# nm can be TUBE_NORM_FACET, TUBE_NORM_EDGE, TUBE_NORM_PATH_EDGE
if nm:
self.normalStyle = self.normalStyle & ~gle.TUBE_NORM_MASK
self.normalStyle = self.normalStyle | nm
gle.gleSetJoinStyle (self.normalStyle | self.joinStyle)
ja = kw.get( 'joinStyle')
# ja can be TUBE_JN_RAW, TUBE_JN_ANGLE, TUBE_JN_CUT, TUBE_JN_ROUND,
# TUBE_JN_CAP
if ja:
self.joinStyle = self.joinStyle & ~gle.TUBE_JN_MASK
self.joinStyle = self.joinStyle | ja
gle.gleSetJoinStyle (self.normalStyle | self.joinStyle)
return self.redoNow(redo, updateOwnGui, redoFlags)
def extrude(self):
"""Virtual Method to do the extrusion along a 3D path with a 2D shape
using the gle extrusion. We then get the geometry information
using the extrusion method in Feedback mode. This will then be
used to build a triangle strip."""
pass
def asIndexedPolygons(self, run=1, quality=None, **kw):
""" run=0 returns 1 if this geom can be represented as an
IndexedPolygon and None if not. run=1 returns the IndexedPolygon
object."""
if run==0:
return 1 # yes, I can be represented as IndexedPolygons
faces = self.faceSet.faces.array
verts = self.vertexSet.vertices.array
size = faces.shape
# number of triangles in each face (containing triangle strip
# vertices) from faces array.
ntr = size[1]-2
# new length of triangles array
nfaces = size[0]*ntr
new_faces = numpy.zeros((nfaces, 3), 'i')
i = 0
for f in faces:
for n in range(ntr):
if (n/2)*2 == n:
new_faces[i] = [f[n], f[n+1], f[n+2]]
else:
new_faces[i] = [f[n+2], f[n+1], f[n]]
i = i + 1
from DejaVu2.IndexedPolygons import IndexedPolygons
new_obj = IndexedPolygons('gleobj', vertices = verts,
faces = new_faces, visible=1,
invertNormals=self.invertNormals)
return new_obj
class GleExtrude(GleObject):
keywords = GleObject.keywords + [
'shape2D',
'trace3D',
'contourUp',
'capsFlag'
]
def __init__(self, name=None, check=1, **kw):
if __debug__:
if check:
apply( checkKeywords, (name,self.keywords), kw)
apply( GleObject.__init__, (self, name, 0), kw)
self.Set(trace3D = kw.get('trace3D'),
shape2D = kw.get('shape2D'),
contourUp = kw.get( 'contourUp'),
capsFlag = kw.get('capsFlag'))
def Set(self, check=1, redo=1, updateOwnGui=True, **kw):
"""set data for this object:
check=1 : verify that all the keywords present can be handle by this func
redo=1 : append self to viewer.objectsNeedingRedo
updateOwnGui=True : allow to update owngui at the end this func
"""
redoFlags = 0
capsFlag = kw.get('capsFlag')
if capsFlag is None:
if not hasattr(self, 'capsFlag'):
self.capsFlag = 0
else:
self.capsFlag = capsFlag
shape2D = kw.get('shape2D')
if shape2D is None:
if not hasattr(self, 'shape2D'):
self.shape2D = None
else:
self.shape2D = shape2D
contourUp = kw.get('contourUp')
if contourUp is None:
if not hasattr(self, 'contourUp'):
self.contourUp= (0.,0.,1.)
else:
self.contourUp = contourUp
trace3D = kw.get('trace3D')
if trace3D is None:
if not hasattr(self, 'trace3D'):
self.trace3D = numpy.zeros( (0,3), 'f')
else:
self.trace3D = trace3D
if kw.has_key('materials') and kw['materials'] is not None:
materials = numpy.array((kw['materials']),'f')
redoFlags |= self._redoFlags['redoDisplayListFlag']
else:
materials = numpy.array(((0.,0.,1.,1.),),'f')
if not shape2D is None:
v,n,s = self.extrude()
if self.capsFlag == 1:
v, n, s = self.addCaps(v, n, s)
redoFlags |= self._redoFlags['redoDisplayListFlag']
if v is not None:
kw['vertices']=v
if n is not None:
kw['vnormals']=n
if s is not None:
kw['stripBegin']=[0] + list( s[:-1,0] )
kw['stripEnd'] = list( s[:,0])
redoFlags |= apply( GleObject.Set, (self, check, 0), kw )
return self.redoNow(redo, updateOwnGui, redoFlags)
def addCaps(self, v, n, s):
""" Method to add front and end caps to the extruded geometry."""
# calculate the length of each strip
lenStrip = 2*self.shape2D.lenShape
# 1- Front Cap:
#================
# Get the midPoint of the front cap
frontMid = self.trace3D[1]
# Get the coordinates of the contourPoints of the cap
shapePoints = v[1:lenStrip:2]
# Organize the points so the strip creates the cap
frontCapPts = []
for point in shapePoints.tolist():
frontCapPts.append(point)
frontCapPts.append(frontMid)
# Add the new strip to the front of the vertices
vertices = numpy.concatenate( (frontCapPts, v) )
#Compute normal of the cap by computing the cross product of (M3 M1).
if self.shape2D.vertDup == 0:
fm1 = shapePoints[0] - frontMid
fm3 = shapePoints[1] - frontMid
elif self.shape2D.vertDup == 1:
fm1 = shapePoints[0] - frontMid
fm3 = shapePoints[2] - frontMid
# Cross product
nc = [[(fm3[1]*fm1[2] - fm3[2]*fm1[1]),
(fm3[0]*fm1[2] - fm3[2]*fm1[0]),
(fm3[0]*fm1[1] - fm3[0]*fm1[1])],]
frontNorm = numpy.array(nc*lenStrip, 'd')
# Add the normals to the normal array
normals = numpy.concatenate( (frontNorm, n) )
lastVert = s[-1][0]+lenStrip
strip = numpy.concatenate((s, [[lastVert,lastVert],]))
# 2- End cap:
#================
# Get the midPoint of the end cap
endMid = self.trace3D[-2]
# Get the coordinates of the contourPoints of the last cap
endShape = v[-lenStrip:-1:2]
# Organize the points so the strip creates the cap
endCapPts = []
# Definition of the strip
for point in endShape.tolist():
endCapPts.append(endMid)
endCapPts.append(point)
# Add the new strip to the front of the vertices
vertices = numpy.concatenate( (vertices, endCapPts) )
#Compute normal of the cap by computing the cross product of 2 vectors\
# defined by the mid cap point and a point of the shape.
if self.shape2D.vertDup == 0:
em1 = endShape[0] - endMid
em3 = endShape[1] - endMid
elif self.shape2D.vertDup == 1:
em1 = endShape[2] - endMid
em3 = endShape[0] - endMid
# Cross product
nc = [[(em3[1]*em1[2] - em3[2]*em1[1]),
(em3[0]*em1[2] - em3[2]*em1[0]),
(em3[0]*em1[1] - em3[0]*em1[1])],]
endNorm = numpy.array(nc*lenStrip, 'd')
# Add the normals to the normal array
normals = numpy.concatenate( (normals, endNorm) )
lastVert = strip[-1][0]+lenStrip
strip = numpy.concatenate((strip, [[lastVert,lastVert],]))
return vertices, normals, strip
def extrude(self):
"""Virtual Method to do the extrusion along a 3D path with a 2D shape
using the gle extrusion. We then get the geometry information
using the extrusion method in Feedback mode. This will then be
used to build a triangle strip."""
from gle import glec
gle.gleSetJoinStyle ( self.normalStyle | self.joinStyle )
glec.gleFeedBack()
contpts = numpy.array(self.shape2D.contpts)
contourPoints = contpts[:,:2]
contnorm = numpy.array(self.shape2D.contnorm)
contourNormals = contnorm[:,:2]
gle.gleExtrusion(contourPoints, contourNormals,
self.contourUp,
self.trace3D,
self.materials[1028].prop[0][:,:3] )
glec.gleTextureMode(0)
v,n,s = glec.gleGetTriangleMesh()
vinv = numpy.zeros( v.shape, 'd')
vinv[::2] = v[1::2]
vinv[1::2] = v[::2]
ninv = numpy.zeros( n.shape, 'd')
ninv[::2] = n[1::2]
ninv[1::2] = n[::2]
return vinv, ninv, s
def getFaces(self):
"""returns a handle to the faces array"""
return self.IndexedFaceSet.faces.array
#
# WARNING the extrusion in this object ONLY works after this object has
# been added to a viewer
#
class GlePolyCylinder(GleExtrude):
keywords = GleExtrude.keywords + [
'trace3D',
'radius'
]
def __init__(self, name=None, check=1, **kw):
if __debug__:
if check:
apply( checkKeywords, (name,self.keywords), kw)
r = kw.get('radius')
if not r: r=1.0
self.radius = r
apply( GleExtrude.__init__, (self, name, 0), kw)
self.Set(trace3D = kw.get( 'trace3D'))
def Set(self, check=1, redo=1, updateOwnGui=True, **kw):
"""set data for this object
check=1 : verify that all the keywords present can be handle by this func
redo=1 : append self to viewer.objectsNeedingRedo
updateOwnGui=True : allow to update owngui at the end this func
"""
redoFlags = apply( GleExtrude.Set, (self, check, 0), kw)
if kw.has_key('materials') and kw['materials'] is not None:
materials = numpy.array((kw['materials']),'f')
else:
materials = numpy.array(((0.,0.,1.,1.),),'f')
self.trace3D = kw.get('trace3D')
if not self.trace3D:
v,n,s = (None,None,None)
redoFlags |= self._redoFlags['redoDisplayListFlag']
else:
v,n,s = self.extrude()
redoFlags |= self._redoFlags['redoDisplayListFlag']
if v is not None:
kw['vertices']=v
if n is not None:
kw['vnormals']=n
if s is not None:
kw['stripBegin']=[0] + list( s[:,0] )
return self.redoNow(redo, updateOwnGui, redoFlags)
def extrude(self):
"""Virtual Method to do the extrusion along a 3D path with a 2D shape
using the gle extrusion. We then get the geometry information
using the extrusion method in Feedback mode. This will then be
used to build a triangle strip."""
from gle import glec
gle.gleSetJoinStyle ( self.joinStyle | self.normalStyle )
glec.gleFeedBack()
#DisplayFunction of the old GlePolyCylinder
GL.glColorMaterial (GL.GL_FRONT_AND_BACK, GL.GL_AMBIENT)
GL.glEnable (GL.GL_COLOR_MATERIAL)
#glEnable(GL_LIGHTING)
if self.viewer is not None:
self.viewer.enableOpenglLighting()
colors = self.materials[GL.GL_FRONT].prop[0][:,:3]
gle.glePolyCylinder(self.trace3D, colors, self.radius)
GL.glDisable (GL.GL_COLOR_MATERIAL)
glec.gleTextureMode(0)
v,n,s = glec.gleGetTriangleMesh()
return v,n,s
class GlePolyCone(GlePolyCylinder):
keywords = GleExtrude.keywords + [
'trace3D',
'radii'
]
def __init__(self, name=None, check=1, **kw):
if __debug__:
if check:
apply( checkKeywords, (name,self.keywords), kw)
apply( GlePolyCylinder.__init__, (self, name, 0), kw )
apply( self.Set, (), kw)
def Set(self, check=1, redo=1, updateOwnGui=True, **kw):
"""set data for this object:
check=1 : verify that all the keywords present can be handle by this func
redo=1 : append self to viewer.objectsNeedingRedo
updateOwnGui=True : allow to update owngui at the end this func
"""
redoFlags = apply( GlePolyCylinder.Set, (self, check, 0), kw )
r = kw.get('radii')
if r is not None:
assert len(r)
self.radii = r
return self.redoNow(redo, updateOwnGui, redoFlags)
def extrude(self):
"""Extrude a cone with radii specified at each point
of the extrusion"""
assert len(self.radii)==len(self.trace3D)
from gle import glec
gle.gleSetJoinStyle ( self.joinStyle | self.normalStyle )
glec.gleFeedBack()
#DisplayFunction of the old GlePolyCylinder
GL.glColorMaterial (GL.GL_FRONT_AND_BACK, GL.GL_AMBIENT)
GL.glEnable (GL.GL_COLOR_MATERIAL)
if self.viewer is not None:
self.viewer.enableOpenglLighting()
colors = self.materials[GL.GL_FRONT].prop[0][:,:3]
gle.glePolyCone(self.trace3D, colors, self.radii)
GL.glDisable (GL.GL_COLOR_MATERIAL)
glec.gleTextureMode(0)
v,n,s = glec.gleGetTriangleMesh()
return v,n,s
| 34.222698 | 101 | 0.569703 |
c8ed0caadf3e020d468f51c06913f0261529b49e | 48,122 | py | Python | lasagne/layers/special.py | goncaloperes/Library_Lasagne | 5d3c63cb315c50b1cbd27a6bc8664b406f34dd99 | [
"MIT"
] | 3,986 | 2015-04-09T17:00:42.000Z | 2022-03-30T08:21:55.000Z | lasagne/layers/special.py | goncaloperes/Library_Lasagne | 5d3c63cb315c50b1cbd27a6bc8664b406f34dd99 | [
"MIT"
] | 736 | 2015-04-09T16:23:00.000Z | 2021-01-02T01:35:45.000Z | lasagne/layers/special.py | goncaloperes/Library_Lasagne | 5d3c63cb315c50b1cbd27a6bc8664b406f34dd99 | [
"MIT"
] | 1,311 | 2015-04-09T17:05:38.000Z | 2022-03-27T03:41:01.000Z | import theano
import theano.tensor as T
import numpy as np
from .. import init
from .. import nonlinearities
from ..utils import as_tuple, floatX, int_types
from ..random import get_rng
from .base import Layer, MergeLayer
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
__all__ = [
"NonlinearityLayer",
"BiasLayer",
"ScaleLayer",
"standardize",
"ExpressionLayer",
"InverseLayer",
"TransformerLayer",
"TPSTransformerLayer",
"ParametricRectifierLayer",
"prelu",
"RandomizedRectifierLayer",
"rrelu",
]
class NonlinearityLayer(Layer):
"""
lasagne.layers.NonlinearityLayer(incoming,
nonlinearity=lasagne.nonlinearities.rectify, **kwargs)
A layer that just applies a nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
"""
def __init__(self, incoming, nonlinearity=nonlinearities.rectify,
**kwargs):
super(NonlinearityLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
def get_output_for(self, input, **kwargs):
return self.nonlinearity(input)
class BiasLayer(Layer):
"""
lasagne.layers.BiasLayer(incoming, b=lasagne.init.Constant(0),
shared_axes='auto', **kwargs)
A layer that just adds a (trainable) bias term.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases and pass through its input
unchanged. Otherwise, the bias shape must match the incoming shape,
skipping those axes the biases are shared over (see the example below).
See :func:`lasagne.utils.create_param` for more information.
shared_axes : 'auto', int or tuple of int
The axis or axes to share biases over. If ``'auto'`` (the default),
share over all axes except for the second: this will share biases over
the minibatch dimension for dense layers, and additionally over all
spatial dimensions for convolutional layers.
Notes
-----
The bias parameter dimensionality is the input dimensionality minus the
number of axes the biases are shared over, which matches the bias parameter
conventions of :class:`DenseLayer` or :class:`Conv2DLayer`. For example:
>>> layer = BiasLayer((20, 30, 40, 50), shared_axes=(0, 2))
>>> layer.b.get_value().shape
(30, 50)
"""
def __init__(self, incoming, b=init.Constant(0), shared_axes='auto',
**kwargs):
super(BiasLayer, self).__init__(incoming, **kwargs)
if shared_axes == 'auto':
# default: share biases over all but the second axis
shared_axes = (0,) + tuple(range(2, len(self.input_shape)))
elif isinstance(shared_axes, int_types):
shared_axes = (shared_axes,)
self.shared_axes = shared_axes
if b is None:
self.b = None
else:
# create bias parameter, ignoring all dimensions in shared_axes
shape = [size for axis, size in enumerate(self.input_shape)
if axis not in self.shared_axes]
if any(size is None for size in shape):
raise ValueError("BiasLayer needs specified input sizes for "
"all axes that biases are not shared over.")
self.b = self.add_param(b, shape, 'b', regularizable=False)
def get_output_for(self, input, **kwargs):
if self.b is not None:
bias_axes = iter(range(self.b.ndim))
pattern = ['x' if input_axis in self.shared_axes
else next(bias_axes)
for input_axis in range(input.ndim)]
return input + self.b.dimshuffle(*pattern)
else:
return input
class ScaleLayer(Layer):
"""
lasagne.layers.ScaleLayer(incoming, scales=lasagne.init.Constant(1),
shared_axes='auto', **kwargs)
A layer that scales its inputs by learned coefficients.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
scales : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for the scale. The scale
shape must match the incoming shape, skipping those axes the scales are
shared over (see the example below). See
:func:`lasagne.utils.create_param` for more information.
shared_axes : 'auto', int or tuple of int
The axis or axes to share scales over. If ``'auto'`` (the default),
share over all axes except for the second: this will share scales over
the minibatch dimension for dense layers, and additionally over all
spatial dimensions for convolutional layers.
Notes
-----
The scales parameter dimensionality is the input dimensionality minus the
number of axes the scales are shared over, which matches the bias parameter
conventions of :class:`DenseLayer` or :class:`Conv2DLayer`. For example:
>>> layer = ScaleLayer((20, 30, 40, 50), shared_axes=(0, 2))
>>> layer.scales.get_value().shape
(30, 50)
"""
def __init__(self, incoming, scales=init.Constant(1), shared_axes='auto',
**kwargs):
super(ScaleLayer, self).__init__(incoming, **kwargs)
if shared_axes == 'auto':
# default: share scales over all but the second axis
shared_axes = (0,) + tuple(range(2, len(self.input_shape)))
elif isinstance(shared_axes, int_types):
shared_axes = (shared_axes,)
self.shared_axes = shared_axes
# create scales parameter, ignoring all dimensions in shared_axes
shape = [size for axis, size in enumerate(self.input_shape)
if axis not in self.shared_axes]
if any(size is None for size in shape):
raise ValueError("ScaleLayer needs specified input sizes for "
"all axes that scales are not shared over.")
self.scales = self.add_param(
scales, shape, 'scales', regularizable=False)
def get_output_for(self, input, **kwargs):
axes = iter(range(self.scales.ndim))
pattern = ['x' if input_axis in self.shared_axes
else next(axes) for input_axis in range(input.ndim)]
return input * self.scales.dimshuffle(*pattern)
def standardize(layer, offset, scale, shared_axes='auto'):
"""
Convenience function for standardizing inputs by applying a fixed offset
and scale. This is usually useful when you want the input to your network
to, say, have zero mean and unit standard deviation over the feature
dimensions. This layer allows you to include the appropriate statistics to
achieve this normalization as part of your network, and applies them to its
input. The statistics are supplied as the `offset` and `scale` parameters,
which are applied to the input by subtracting `offset` and dividing by
`scale`, sharing dimensions as specified by the `shared_axes` argument.
Parameters
----------
layer : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
offset : Theano shared variable or numpy array
The offset to apply (via subtraction) to the axis/axes being
standardized.
scale : Theano shared variable or numpy array
The scale to apply (via division) to the axis/axes being standardized.
shared_axes : 'auto', int or tuple of int
The axis or axes to share the offset and scale over. If ``'auto'`` (the
default), share over all axes except for the second: this will share
scales over the minibatch dimension for dense layers, and additionally
over all spatial dimensions for convolutional layers.
Examples
--------
Assuming your training data exists in a 2D numpy ndarray called
``training_data``, you can use this function to scale input features to the
[0, 1] range based on the training set statistics like so:
>>> import lasagne
>>> import numpy as np
>>> training_data = np.random.standard_normal((100, 20))
>>> input_shape = (None, training_data.shape[1])
>>> l_in = lasagne.layers.InputLayer(input_shape)
>>> offset = training_data.min(axis=0)
>>> scale = training_data.max(axis=0) - training_data.min(axis=0)
>>> l_std = standardize(l_in, offset, scale, shared_axes=0)
Alternatively, to z-score your inputs based on training set statistics, you
could set ``offset = training_data.mean(axis=0)`` and
``scale = training_data.std(axis=0)`` instead.
"""
# Subtract the offset
layer = BiasLayer(layer, -offset, shared_axes)
# Do not optimize the offset parameter
layer.params[layer.b].remove('trainable')
# Divide by the scale
layer = ScaleLayer(layer, floatX(1.)/scale, shared_axes)
# Do not optimize the scales parameter
layer.params[layer.scales].remove('trainable')
return layer
class ExpressionLayer(Layer):
"""
This layer provides boilerplate for a custom layer that applies a
simple transformation to the input.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
function : callable
A function to be applied to the output of the previous layer.
output_shape : None, callable, tuple, or 'auto'
Specifies the output shape of this layer. If a tuple, this fixes the
output shape for any input shape (the tuple can contain None if some
dimensions may vary). If a callable, it should return the calculated
output shape given the input shape. If None, the output shape is
assumed to be the same as the input shape. If 'auto', an attempt will
be made to automatically infer the correct output shape.
Notes
-----
An :class:`ExpressionLayer` that does not change the shape of the data
(i.e., is constructed with the default setting of ``output_shape=None``)
is functionally equivalent to a :class:`NonlinearityLayer`.
Examples
--------
>>> from lasagne.layers import InputLayer, ExpressionLayer
>>> l_in = InputLayer((32, 100, 20))
>>> l1 = ExpressionLayer(l_in, lambda X: X.mean(-1), output_shape='auto')
>>> l1.output_shape
(32, 100)
"""
def __init__(self, incoming, function, output_shape=None, **kwargs):
super(ExpressionLayer, self).__init__(incoming, **kwargs)
if output_shape is None:
self._output_shape = None
elif output_shape == 'auto':
self._output_shape = 'auto'
elif hasattr(output_shape, '__call__'):
self.get_output_shape_for = output_shape
else:
self._output_shape = tuple(output_shape)
self.function = function
def get_output_shape_for(self, input_shape):
if self._output_shape is None:
return input_shape
elif self._output_shape is 'auto':
input_shape = (0 if s is None else s for s in input_shape)
X = theano.tensor.alloc(0, *input_shape)
output_shape = self.function(X).shape.eval()
output_shape = tuple(s if s else None for s in output_shape)
return output_shape
else:
return self._output_shape
def get_output_for(self, input, **kwargs):
return self.function(input)
class InverseLayer(MergeLayer):
"""
The :class:`InverseLayer` class performs inverse operations
for a single layer of a neural network by applying the
partial derivative of the layer to be inverted with
respect to its input: transposed layer
for a :class:`DenseLayer`, deconvolutional layer for
:class:`Conv2DLayer`, :class:`Conv1DLayer`; or
an unpooling layer for :class:`MaxPool2DLayer`.
It is specially useful for building (convolutional)
autoencoders with tied parameters.
Note that if the layer to be inverted contains a nonlinearity
and/or a bias, the :class:`InverseLayer` will include the derivative
of that in its computation.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
layer : a :class:`Layer` instance or a tuple
The layer with respect to which the instance of the
:class:`InverseLayer` is inverse to.
Examples
--------
>>> import lasagne
>>> from lasagne.layers import InputLayer, Conv2DLayer, DenseLayer
>>> from lasagne.layers import InverseLayer
>>> l_in = InputLayer((100, 3, 28, 28))
>>> l1 = Conv2DLayer(l_in, num_filters=16, filter_size=5)
>>> l2 = DenseLayer(l1, num_units=20)
>>> l_u2 = InverseLayer(l2, l2) # backprop through l2
>>> l_u1 = InverseLayer(l_u2, l1) # backprop through l1
"""
def __init__(self, incoming, layer, **kwargs):
super(InverseLayer, self).__init__(
[incoming, layer, layer.input_layer], **kwargs)
def get_output_shape_for(self, input_shapes):
return input_shapes[2]
def get_output_for(self, inputs, **kwargs):
input, layer_out, layer_in = inputs
return theano.grad(None, wrt=layer_in, known_grads={layer_out: input})
class TransformerLayer(MergeLayer):
"""
Spatial transformer layer
The layer applies an affine transformation on the input. The affine
transformation is parameterized with six learned parameters [1]_.
The output is interpolated with a bilinear transformation.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_rows, input_columns)``.
localization_network : a :class:`Layer` instance
The network that calculates the parameters of the affine
transformation. See the example for how to initialize to the identity
transform.
downsample_factor : float or iterable of float
A float or a 2-element tuple specifying the downsample factor for the
output image (in both spatial dimensions). A value of 1 will keep the
original size of the input. Values larger than 1 will downsample the
input. Values below 1 will upsample the input.
border_mode : 'nearest', 'mirror', or 'wrap'
Determines how border conditions are handled during interpolation. If
'nearest', points outside the grid are clipped to the boundary. If
'mirror', points are mirrored across the boundary. If 'wrap', points
wrap around to the other side of the grid. See
http://stackoverflow.com/q/22669252/22670830#22670830 for details.
References
----------
.. [1] Max Jaderberg, Karen Simonyan, Andrew Zisserman,
Koray Kavukcuoglu (2015):
Spatial Transformer Networks. NIPS 2015,
http://papers.nips.cc/paper/5854-spatial-transformer-networks.pdf
Examples
--------
Here we set up the layer to initially do the identity transform, similarly
to [1]_. Note that you will want to use a localization with linear output.
If the output from the localization networks is [t1, t2, t3, t4, t5, t6]
then t1 and t5 determines zoom, t2 and t4 determines skewness, and t3 and
t6 move the center position.
>>> import numpy as np
>>> import lasagne
>>> b = np.zeros((2, 3), dtype='float32')
>>> b[0, 0] = 1
>>> b[1, 1] = 1
>>> b = b.flatten() # identity transform
>>> W = lasagne.init.Constant(0.0)
>>> l_in = lasagne.layers.InputLayer((None, 3, 28, 28))
>>> l_loc = lasagne.layers.DenseLayer(l_in, num_units=6, W=W, b=b,
... nonlinearity=None)
>>> l_trans = lasagne.layers.TransformerLayer(l_in, l_loc)
"""
def __init__(self, incoming, localization_network, downsample_factor=1,
border_mode='nearest', **kwargs):
super(TransformerLayer, self).__init__(
[incoming, localization_network], **kwargs)
self.downsample_factor = as_tuple(downsample_factor, 2)
self.border_mode = border_mode
input_shp, loc_shp = self.input_shapes
if loc_shp[-1] != 6 or len(loc_shp) != 2:
raise ValueError("The localization network must have "
"output shape: (batch_size, 6)")
if len(input_shp) != 4:
raise ValueError("The input network must have a 4-dimensional "
"output shape: (batch_size, num_input_channels, "
"input_rows, input_columns)")
def get_output_shape_for(self, input_shapes):
shape = input_shapes[0]
factors = self.downsample_factor
return (shape[:2] + tuple(None if s is None else int(s // f)
for s, f in zip(shape[2:], factors)))
def get_output_for(self, inputs, **kwargs):
# see eq. (1) and sec 3.1 in [1]
input, theta = inputs
return _transform_affine(theta, input, self.downsample_factor,
self.border_mode)
def _transform_affine(theta, input, downsample_factor, border_mode):
num_batch, num_channels, height, width = input.shape
theta = T.reshape(theta, (-1, 2, 3))
# grid of (x_t, y_t, 1), eq (1) in ref [1]
out_height = T.cast(height // downsample_factor[0], 'int64')
out_width = T.cast(width // downsample_factor[1], 'int64')
grid = _meshgrid(out_height, out_width)
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = T.dot(theta, grid)
x_s = T_g[:, 0]
y_s = T_g[:, 1]
x_s_flat = x_s.flatten()
y_s_flat = y_s.flatten()
# dimshuffle input to (bs, height, width, channels)
input_dim = input.dimshuffle(0, 2, 3, 1)
input_transformed = _interpolate(
input_dim, x_s_flat, y_s_flat,
out_height, out_width, border_mode)
output = T.reshape(
input_transformed, (num_batch, out_height, out_width, num_channels))
output = output.dimshuffle(0, 3, 1, 2) # dimshuffle to conv format
return output
def _interpolate(im, x, y, out_height, out_width, border_mode):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, theano.config.floatX)
width_f = T.cast(width, theano.config.floatX)
# scale coordinates from [-1, 1] to [0, width/height - 1]
x = (x + 1) / 2 * (width_f - 1)
y = (y + 1) / 2 * (height_f - 1)
# obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
# we need those in floatX for interpolation and in int64 for indexing.
x0_f = T.floor(x)
y0_f = T.floor(y)
x1_f = x0_f + 1
y1_f = y0_f + 1
# for indexing, we need to take care of the border mode for outside pixels.
if border_mode == 'nearest':
x0 = T.clip(x0_f, 0, width_f - 1)
x1 = T.clip(x1_f, 0, width_f - 1)
y0 = T.clip(y0_f, 0, height_f - 1)
y1 = T.clip(y1_f, 0, height_f - 1)
elif border_mode == 'mirror':
w = 2 * (width_f - 1)
x0 = T.minimum(x0_f % w, -x0_f % w)
x1 = T.minimum(x1_f % w, -x1_f % w)
h = 2 * (height_f - 1)
y0 = T.minimum(y0_f % h, -y0_f % h)
y1 = T.minimum(y1_f % h, -y1_f % h)
elif border_mode == 'wrap':
x0 = T.mod(x0_f, width_f)
x1 = T.mod(x1_f, width_f)
y0 = T.mod(y0_f, height_f)
y1 = T.mod(y1_f, height_f)
else:
raise ValueError("border_mode must be one of "
"'nearest', 'mirror', 'wrap'")
x0, x1, y0, y1 = (T.cast(v, 'int64') for v in (x0, x1, y0, y1))
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = T.repeat(
T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# calculate interpolated values
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
return output
def _linspace(start, stop, num):
# Theano linspace. Behaves similar to np.linspace
start = T.cast(start, theano.config.floatX)
stop = T.cast(stop, theano.config.floatX)
num = T.cast(num, theano.config.floatX)
step = (stop-start)/(num-1)
return T.arange(num, dtype=theano.config.floatX)*step+start
def _meshgrid(height, width):
# This function is the grid generator from eq. (1) in reference [1].
# It is equivalent to the following numpy code:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
# It is implemented in Theano instead to support symbolic grid sizes.
# Note: If the image size is known at layer construction time, we could
# compute the meshgrid offline in numpy instead of doing it dynamically
# in Theano. However, it hardly affected performance when we tried.
x_t = T.dot(T.ones((height, 1)),
_linspace(-1.0, 1.0, width).dimshuffle('x', 0))
y_t = T.dot(_linspace(-1.0, 1.0, height).dimshuffle(0, 'x'),
T.ones((1, width)))
x_t_flat = x_t.reshape((1, -1))
y_t_flat = y_t.reshape((1, -1))
ones = T.ones_like(x_t_flat)
grid = T.concatenate([x_t_flat, y_t_flat, ones], axis=0)
return grid
class TPSTransformerLayer(MergeLayer):
"""
Spatial transformer layer
The layer applies a thin plate spline transformation [2]_ on the input
as in [1]_. The thin plate spline transform is determined based on the
movement of some number of control points. The starting positions for
these control points are fixed. The output is interpolated with a
bilinear transformation.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_rows, input_columns)``.
localization_network : a :class:`Layer` instance
The network that calculates the parameters of the thin plate spline
transformation as the x and y coordinates of the destination offsets of
each control point. The output of the localization network should
be a 2D tensor, with shape ``(batch_size, 2 * num_control_points)``
downsample_factor : float or iterable of float
A float or a 2-element tuple specifying the downsample factor for the
output image (in both spatial dimensions). A value of 1 will keep the
original size of the input. Values larger than 1 will downsample the
input. Values below 1 will upsample the input.
control_points : integer
The number of control points to be used for the thin plate spline
transformation. These points will be arranged as a grid along the
image, so the value must be a perfect square. Default is 16.
precompute_grid : 'auto' or boolean
Flag to precompute the U function [2]_ for the grid and source
points. If 'auto', will be set to true as long as the input height
and width are specified. If true, the U function is computed when the
layer is constructed for a fixed input shape. If false, grid will be
computed as part of the Theano computational graph, which is
substantially slower as this computation scales with
num_pixels*num_control_points. Default is 'auto'.
border_mode : 'nearest', 'mirror', or 'wrap'
Determines how border conditions are handled during interpolation. If
'nearest', points outside the grid are clipped to the boundary'. If
'mirror', points are mirrored across the boundary. If 'wrap', points
wrap around to the other side of the grid. See
http://stackoverflow.com/q/22669252/22670830#22670830 for details.
References
----------
.. [1] Max Jaderberg, Karen Simonyan, Andrew Zisserman,
Koray Kavukcuoglu (2015):
Spatial Transformer Networks. NIPS 2015,
http://papers.nips.cc/paper/5854-spatial-transformer-networks.pdf
.. [2] Fred L. Bookstein (1989):
Principal warps: thin-plate splines and the decomposition of
deformations. IEEE Transactions on
Pattern Analysis and Machine Intelligence.
http://doi.org/10.1109/34.24792
Examples
--------
Here, we'll implement an identity transform using a thin plate spline
transform. First we'll create the destination control point offsets. To
make everything invariant to the shape of the image, the x and y range
of the image is normalized to [-1, 1] as in ref [1]_. To replicate an
identity transform, we'll set the bias to have all offsets be 0. More
complicated transformations can easily be implemented using different x
and y offsets (importantly, each control point can have it's own pair of
offsets).
>>> import numpy as np
>>> import lasagne
>>>
>>> # Create the network
>>> # we'll initialize the weights and biases to zero, so it starts
>>> # as the identity transform (all control point offsets are zero)
>>> W = b = lasagne.init.Constant(0.0)
>>>
>>> # Set the number of points
>>> num_points = 16
>>>
>>> l_in = lasagne.layers.InputLayer((None, 3, 28, 28))
>>> l_loc = lasagne.layers.DenseLayer(l_in, num_units=2*num_points,
... W=W, b=b, nonlinearity=None)
>>> l_trans = lasagne.layers.TPSTransformerLayer(l_in, l_loc,
... control_points=num_points)
"""
def __init__(self, incoming, localization_network, downsample_factor=1,
control_points=16, precompute_grid='auto',
border_mode='nearest', **kwargs):
super(TPSTransformerLayer, self).__init__(
[incoming, localization_network], **kwargs)
self.border_mode = border_mode
self.downsample_factor = as_tuple(downsample_factor, 2)
self.control_points = control_points
input_shp, loc_shp = self.input_shapes
# Error checking
if loc_shp[-1] != 2 * control_points or len(loc_shp) != 2:
raise ValueError("The localization network must have "
"output shape: (batch_size, "
"2*control_points)")
if round(np.sqrt(control_points)) != np.sqrt(
control_points):
raise ValueError("The number of control points must be"
" a perfect square.")
if len(input_shp) != 4:
raise ValueError("The input network must have a 4-dimensional "
"output shape: (batch_size, num_input_channels, "
"input_rows, input_columns)")
# Process precompute grid
can_precompute_grid = all(s is not None for s in input_shp[2:])
if precompute_grid == 'auto':
precompute_grid = can_precompute_grid
elif precompute_grid and not can_precompute_grid:
raise ValueError("Grid can only be precomputed if the input "
"height and width are pre-specified.")
self.precompute_grid = precompute_grid
# Create source points and L matrix
self.right_mat, self.L_inv, self.source_points, self.out_height, \
self.out_width = _initialize_tps(
control_points, input_shp, self.downsample_factor,
precompute_grid)
def get_output_shape_for(self, input_shapes):
shape = input_shapes[0]
factors = self.downsample_factor
return (shape[:2] + tuple(None if s is None else int(s // f)
for s, f in zip(shape[2:], factors)))
def get_output_for(self, inputs, **kwargs):
# see eq. (1) and sec 3.1 in [1]
# Get input and destination control points
input, dest_offsets = inputs
return _transform_thin_plate_spline(
dest_offsets, input, self.right_mat, self.L_inv,
self.source_points, self.out_height, self.out_width,
self.precompute_grid, self.downsample_factor, self.border_mode)
def _transform_thin_plate_spline(
dest_offsets, input, right_mat, L_inv, source_points, out_height,
out_width, precompute_grid, downsample_factor, border_mode):
num_batch, num_channels, height, width = input.shape
num_control_points = source_points.shape[1]
# reshape destination offsets to be (num_batch, 2, num_control_points)
# and add to source_points
dest_points = source_points + T.reshape(
dest_offsets, (num_batch, 2, num_control_points))
# Solve as in ref [2]
coefficients = T.dot(dest_points, L_inv[:, 3:].T)
if precompute_grid:
# Transform each point on the source grid (image_size x image_size)
right_mat = T.tile(right_mat.dimshuffle('x', 0, 1), (num_batch, 1, 1))
transformed_points = T.batched_dot(coefficients, right_mat)
else:
# Transformed grid
out_height = T.cast(height // downsample_factor[0], 'int64')
out_width = T.cast(width // downsample_factor[1], 'int64')
orig_grid = _meshgrid(out_height, out_width)
orig_grid = orig_grid[0:2, :]
orig_grid = T.tile(orig_grid, (num_batch, 1, 1))
# Transform each point on the source grid (image_size x image_size)
transformed_points = _get_transformed_points_tps(
orig_grid, source_points, coefficients, num_control_points,
num_batch)
# Get out new points
x_transformed = transformed_points[:, 0].flatten()
y_transformed = transformed_points[:, 1].flatten()
# dimshuffle input to (bs, height, width, channels)
input_dim = input.dimshuffle(0, 2, 3, 1)
input_transformed = _interpolate(
input_dim, x_transformed, y_transformed,
out_height, out_width, border_mode)
output = T.reshape(input_transformed,
(num_batch, out_height, out_width, num_channels))
output = output.dimshuffle(0, 3, 1, 2) # dimshuffle to conv format
return output
def _get_transformed_points_tps(new_points, source_points, coefficients,
num_points, batch_size):
"""
Calculates the transformed points' value using the provided coefficients
:param new_points: num_batch x 2 x num_to_transform tensor
:param source_points: 2 x num_points array of source points
:param coefficients: coefficients (should be shape (num_batch, 2,
control_points + 3))
:param num_points: the number of points
:return: the x and y coordinates of each transformed point. Shape (
num_batch, 2, num_to_transform)
"""
# Calculate the U function for the new point and each source point as in
# ref [2]
# The U function is simply U(r) = r^2 * log(r^2), where r^2 is the
# squared distance
# Calculate the squared dist between the new point and the source points
to_transform = new_points.dimshuffle(0, 'x', 1, 2)
stacked_transform = T.tile(to_transform, (1, num_points, 1, 1))
r_2 = T.sum(((stacked_transform - source_points.dimshuffle(
'x', 1, 0, 'x')) ** 2), axis=2)
# Take the product (r^2 * log(r^2)), being careful to avoid NaNs
log_r_2 = T.log(r_2)
distances = T.switch(T.isnan(log_r_2), r_2 * log_r_2, 0.)
# Add in the coefficients for the affine translation (1, x, and y,
# corresponding to a_1, a_x, and a_y)
upper_array = T.concatenate([T.ones((batch_size, 1, new_points.shape[2]),
dtype=theano.config.floatX),
new_points], axis=1)
right_mat = T.concatenate([upper_array, distances], axis=1)
# Calculate the new value as the dot product
new_value = T.batched_dot(coefficients, right_mat)
return new_value
def _U_func_numpy(x1, y1, x2, y2):
"""
Function which implements the U function from Bookstein paper
:param x1: x coordinate of the first point
:param y1: y coordinate of the first point
:param x2: x coordinate of the second point
:param y2: y coordinate of the second point
:return: value of z
"""
# Return zero if same point
if x1 == x2 and y1 == y2:
return 0.
# Calculate the squared Euclidean norm (r^2)
r_2 = (x2 - x1) ** 2 + (y2 - y1) ** 2
# Return the squared norm (r^2 * log r^2)
return r_2 * np.log(r_2)
def _initialize_tps(num_control_points, input_shape, downsample_factor,
precompute_grid):
"""
Initializes the thin plate spline calculation by creating the source
point array and the inverted L matrix used for calculating the
transformations as in ref [2]_
:param num_control_points: the number of control points. Must be a
perfect square. Points will be used to generate an evenly spaced grid.
:param input_shape: tuple with 4 elements specifying the input shape
:param downsample_factor: tuple with 2 elements specifying the
downsample for the height and width, respectively
:param precompute_grid: boolean specifying whether to precompute the
grid matrix
:return:
right_mat: shape (num_control_points + 3, out_height*out_width) tensor
L_inv: shape (num_control_points + 3, num_control_points + 3) tensor
source_points: shape (2, num_control_points) tensor
out_height: tensor constant specifying the ouptut height
out_width: tensor constant specifying the output width
"""
# break out input_shape
_, _, height, width = input_shape
# Create source grid
grid_size = np.sqrt(num_control_points)
x_control_source, y_control_source = np.meshgrid(
np.linspace(-1, 1, grid_size),
np.linspace(-1, 1, grid_size))
# Create 2 x num_points array of source points
source_points = np.vstack(
(x_control_source.flatten(), y_control_source.flatten()))
# Convert to floatX
source_points = source_points.astype(theano.config.floatX)
# Get number of equations
num_equations = num_control_points + 3
# Initialize L to be num_equations square matrix
L = np.zeros((num_equations, num_equations), dtype=theano.config.floatX)
# Create P matrix components
L[0, 3:num_equations] = 1.
L[1:3, 3:num_equations] = source_points
L[3:num_equations, 0] = 1.
L[3:num_equations, 1:3] = source_points.T
# Loop through each pair of points and create the K matrix
for point_1 in range(num_control_points):
for point_2 in range(point_1, num_control_points):
L[point_1 + 3, point_2 + 3] = _U_func_numpy(
source_points[0, point_1], source_points[1, point_1],
source_points[0, point_2], source_points[1, point_2])
if point_1 != point_2:
L[point_2 + 3, point_1 + 3] = L[point_1 + 3, point_2 + 3]
# Invert
L_inv = np.linalg.inv(L)
if precompute_grid:
# Construct grid
out_height = np.array(height // downsample_factor[0]).astype('int64')
out_width = np.array(width // downsample_factor[1]).astype('int64')
x_t, y_t = np.meshgrid(np.linspace(-1, 1, out_width),
np.linspace(-1, 1, out_height))
ones = np.ones(np.prod(x_t.shape))
orig_grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
orig_grid = orig_grid[0:2, :]
orig_grid = orig_grid.astype(theano.config.floatX)
# Construct right mat
# First Calculate the U function for the new point and each source
# point as in ref [2]
# The U function is simply U(r) = r^2 * log(r^2), where r^2 is the
# squared distance
to_transform = orig_grid[:, :, np.newaxis].transpose(2, 0, 1)
stacked_transform = np.tile(to_transform, (num_control_points, 1, 1))
stacked_source_points = \
source_points[:, :, np.newaxis].transpose(1, 0, 2)
r_2 = np.sum((stacked_transform - stacked_source_points) ** 2, axis=1)
# Take the product (r^2 * log(r^2)), being careful to avoid NaNs
log_r_2 = np.log(r_2)
log_r_2[np.isinf(log_r_2)] = 0.
distances = r_2 * log_r_2
# Add in the coefficients for the affine translation (1, x, and y,
# corresponding to a_1, a_x, and a_y)
upper_array = np.ones(shape=(1, orig_grid.shape[1]),
dtype=theano.config.floatX)
upper_array = np.concatenate([upper_array, orig_grid], axis=0)
right_mat = np.concatenate([upper_array, distances], axis=0)
# Convert to tensors
out_height = T.as_tensor_variable(out_height)
out_width = T.as_tensor_variable(out_width)
right_mat = T.as_tensor_variable(right_mat)
else:
out_height = None
out_width = None
right_mat = None
# Convert to tensors
L_inv = T.as_tensor_variable(L_inv)
source_points = T.as_tensor_variable(source_points)
return right_mat, L_inv, source_points, out_height, out_width
class ParametricRectifierLayer(Layer):
"""
lasagne.layers.ParametricRectifierLayer(incoming,
alpha=init.Constant(0.25), shared_axes='auto', **kwargs)
A layer that applies parametric rectify nonlinearity to its input
following [1]_.
Equation for the parametric rectifier linear unit:
:math:`\\varphi(x) = \\max(x,0) + \\alpha \\min(x,0)`
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
alpha : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the alpha values. The
shape must match the incoming shape, skipping those axes the alpha
values are shared over (see the example below).
See :func:`lasagne.utils.create_param` for more information.
shared_axes : 'auto', 'all', int or tuple of int
The axes along which the parameters of the rectifier units are
going to be shared. If ``'auto'`` (the default), share over all axes
except for the second - this will share the parameter over the
minibatch dimension for dense layers, and additionally over all
spatial dimensions for convolutional layers. If ``'all'``, share over
all axes, which corresponds to a single scalar parameter.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
References
----------
.. [1] K He, X Zhang et al. (2015):
Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification,
http://arxiv.org/abs/1502.01852
Notes
-----
The alpha parameter dimensionality is the input dimensionality minus the
number of axes it is shared over, which matches the same convention as
the :class:`BiasLayer`.
>>> layer = ParametricRectifierLayer((20, 3, 28, 28), shared_axes=(0, 3))
>>> layer.alpha.get_value().shape
(3, 28)
"""
def __init__(self, incoming, alpha=init.Constant(0.25), shared_axes='auto',
**kwargs):
super(ParametricRectifierLayer, self).__init__(incoming, **kwargs)
if shared_axes == 'auto':
self.shared_axes = (0,) + tuple(range(2, len(self.input_shape)))
elif shared_axes == 'all':
self.shared_axes = tuple(range(len(self.input_shape)))
elif isinstance(shared_axes, int_types):
self.shared_axes = (shared_axes,)
else:
self.shared_axes = shared_axes
shape = [size for axis, size in enumerate(self.input_shape)
if axis not in self.shared_axes]
if any(size is None for size in shape):
raise ValueError("ParametricRectifierLayer needs input sizes for "
"all axes that alpha's are not shared over.")
self.alpha = self.add_param(alpha, shape, name="alpha",
regularizable=False)
def get_output_for(self, input, **kwargs):
axes = iter(range(self.alpha.ndim))
pattern = ['x' if input_axis in self.shared_axes
else next(axes)
for input_axis in range(input.ndim)]
alpha = self.alpha.dimshuffle(pattern)
return theano.tensor.nnet.relu(input, alpha)
def prelu(layer, **kwargs):
"""
Convenience function to apply parametric rectify to a given layer's output.
Will set the layer's nonlinearity to identity if there is one and will
apply the parametric rectifier instead.
Parameters
----------
layer: a :class:`Layer` instance
The `Layer` instance to apply the parametric rectifier layer to;
note that it will be irreversibly modified as specified above
**kwargs
Any additional keyword arguments are passed to the
:class:`ParametericRectifierLayer`
Examples
--------
Note that this function modifies an existing layer, like this:
>>> from lasagne.layers import InputLayer, DenseLayer, prelu
>>> layer = InputLayer((32, 100))
>>> layer = DenseLayer(layer, num_units=200)
>>> layer = prelu(layer)
In particular, :func:`prelu` can *not* be passed as a nonlinearity.
"""
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = nonlinearities.identity
return ParametricRectifierLayer(layer, **kwargs)
class RandomizedRectifierLayer(Layer):
"""
A layer that applies a randomized leaky rectify nonlinearity to its input.
The randomized leaky rectifier was first proposed and used in the Kaggle
NDSB Competition, and later evaluated in [1]_. Compared to the standard
leaky rectifier :func:`leaky_rectify`, it has a randomly sampled slope
for negative input during training, and a fixed slope during evaluation.
Equation for the randomized rectifier linear unit during training:
:math:`\\varphi(x) = \\max((\\sim U(lower, upper)) \\cdot x, x)`
During evaluation, the factor is fixed to the arithmetic mean of `lower`
and `upper`.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
lower : Theano shared variable, expression, or constant
The lower bound for the randomly chosen slopes.
upper : Theano shared variable, expression, or constant
The upper bound for the randomly chosen slopes.
shared_axes : 'auto', 'all', int or tuple of int
The axes along which the random slopes of the rectifier units are
going to be shared. If ``'auto'`` (the default), share over all axes
except for the second - this will share the random slope over the
minibatch dimension for dense layers, and additionally over all
spatial dimensions for convolutional layers. If ``'all'``, share over
all axes, thus using a single random slope.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
References
----------
.. [1] Bing Xu, Naiyan Wang et al. (2015):
Empirical Evaluation of Rectified Activations in Convolutional Network,
http://arxiv.org/abs/1505.00853
"""
def __init__(self, incoming, lower=0.3, upper=0.8, shared_axes='auto',
**kwargs):
super(RandomizedRectifierLayer, self).__init__(incoming, **kwargs)
self._srng = RandomStreams(get_rng().randint(1, 2147462579))
self.lower = lower
self.upper = upper
if not isinstance(lower > upper, theano.Variable) and lower > upper:
raise ValueError("Upper bound for RandomizedRectifierLayer needs "
"to be higher than lower bound.")
if shared_axes == 'auto':
self.shared_axes = (0,) + tuple(range(2, len(self.input_shape)))
elif shared_axes == 'all':
self.shared_axes = tuple(range(len(self.input_shape)))
elif isinstance(shared_axes, int_types):
self.shared_axes = (shared_axes,)
else:
self.shared_axes = shared_axes
def get_output_for(self, input, deterministic=False, **kwargs):
"""
Parameters
----------
input : tensor
output from the previous layer
deterministic : bool
If true, the arithmetic mean of lower and upper are used for the
leaky slope.
"""
if deterministic or self.upper == self.lower:
return theano.tensor.nnet.relu(input, (self.upper+self.lower)/2.0)
else:
shape = list(self.input_shape)
if any(s is None for s in shape):
shape = list(input.shape)
for ax in self.shared_axes:
shape[ax] = 1
rnd = self._srng.uniform(tuple(shape),
low=self.lower,
high=self.upper,
dtype=theano.config.floatX)
rnd = theano.tensor.addbroadcast(rnd, *self.shared_axes)
return theano.tensor.nnet.relu(input, rnd)
def rrelu(layer, **kwargs):
"""
Convenience function to apply randomized rectify to a given layer's output.
Will set the layer's nonlinearity to identity if there is one and will
apply the randomized rectifier instead.
Parameters
----------
layer: a :class:`Layer` instance
The `Layer` instance to apply the randomized rectifier layer to;
note that it will be irreversibly modified as specified above
**kwargs
Any additional keyword arguments are passed to the
:class:`RandomizedRectifierLayer`
Examples
--------
Note that this function modifies an existing layer, like this:
>>> from lasagne.layers import InputLayer, DenseLayer, rrelu
>>> layer = InputLayer((32, 100))
>>> layer = DenseLayer(layer, num_units=200)
>>> layer = rrelu(layer)
In particular, :func:`rrelu` can *not* be passed as a nonlinearity.
"""
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = nonlinearities.identity
return RandomizedRectifierLayer(layer, **kwargs)
| 40.575042 | 79 | 0.645214 |
b98cdb49e0afcc25317b5b06b8a1f0c2b9dd093d | 1,935 | py | Python | apps/users/migrations/0001_initial.py | yasir-khilji-64/squibler-platform-section | ade0387b3b84c147181f7aaf3741b97da83499c6 | [
"MIT"
] | 2 | 2022-03-06T18:28:54.000Z | 2022-03-06T20:37:46.000Z | apps/users/migrations/0001_initial.py | yasir-khilji-64/squibler-platform-section | ade0387b3b84c147181f7aaf3741b97da83499c6 | [
"MIT"
] | null | null | null | apps/users/migrations/0001_initial.py | yasir-khilji-64/squibler-platform-section | ade0387b3b84c147181f7aaf3741b97da83499c6 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-06 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(db_index=True, max_length=255, unique=True, verbose_name='email address')),
('gravatar_url', models.URLField(max_length=128, verbose_name='gravatar url')),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 53.75 | 266 | 0.644444 |
ef62a39b9ea87beff919d3a1a49049092dd58859 | 13,861 | py | Python | notion/client.py | ciskander/notion-py | ac4a50c75ffd34d5a029055d594c5bdecb69b134 | [
"MIT"
] | null | null | null | notion/client.py | ciskander/notion-py | ac4a50c75ffd34d5a029055d594c5bdecb69b134 | [
"MIT"
] | null | null | null | notion/client.py | ciskander/notion-py | ac4a50c75ffd34d5a029055d594c5bdecb69b134 | [
"MIT"
] | null | null | null | import hashlib
import json
import re
import uuid
from requests import Session, HTTPError
from requests.cookies import cookiejar_from_dict
from urllib.parse import urljoin
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from .block import Block, BLOCK_TYPES
from .collection import (
Collection,
CollectionView,
CollectionRowBlock,
COLLECTION_VIEW_TYPES,
TemplateBlock,
)
from .logger import logger
from .monitor import Monitor
from .operations import operation_update_last_edited, build_operation
from .settings import API_BASE_URL
from .space import Space
from .store import RecordStore
from .user import User
from .utils import extract_id, now
def create_session():
"""
retry on 502
"""
session = Session()
retry = Retry(
5,
backoff_factor=0.3,
status_forcelist=(502,),
# CAUTION: adding 'POST' to this list which is not technically idempotent
method_whitelist=("POST", "HEAD", "TRACE", "GET", "PUT", "OPTIONS", "DELETE"),
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("https://", adapter)
return session
class NotionClient(object):
"""
This is the entry point to using the API. Create an instance of this class, passing it the value of the
"token_v2" cookie from a logged-in browser session on Notion.so. Most of the methods on here are primarily
for internal use -- the main one you'll likely want to use is `get_block`.
"""
def __init__(
self,
token_v2=None,
monitor=False,
start_monitoring=False,
enable_caching=False,
cache_key=None,
):
self.session = create_session()
self.session.cookies = cookiejar_from_dict({"token_v2": token_v2})
if enable_caching:
cache_key = cache_key or hashlib.sha256(token_v2.encode()).hexdigest()
self._store = RecordStore(self, cache_key=cache_key)
else:
self._store = RecordStore(self)
if monitor:
self._monitor = Monitor(self)
if start_monitoring:
self.start_monitoring()
else:
self._monitor = None
if token_v2:
self._update_user_info()
def start_monitoring(self):
self._monitor.poll_async()
def _update_user_info(self):
records = self.post("loadUserContent", {}).json()["recordMap"]
self._store.store_recordmap(records)
self.current_user = self.get_user(list(records["notion_user"].keys())[0])
self.current_space = self.get_space(list(records["space"].keys())[0])
return records
def get_email_uid(self):
response = self.post("getSpaces", {}).json()
return {
response[uid]["notion_user"][uid]["value"]["email"]: uid
for uid in response.keys()
}
def set_user_by_uid(self, user_id):
self.session.headers.update({"x-notion-active-user-header": user_id})
self._update_user_info()
def set_user_by_email(self, email):
email_uid_dict = self.get_email_uid()
uid = email_uid_dict.get(email)
if not uid:
raise Exception(f"Not Found {email}, Available IDs: {list(email_uid_dict)}")
self.set_user_by_uid(uid)
def get_top_level_pages(self):
records = self._update_user_info()
return [self.get_block(bid) for bid in records["block"].keys()]
def get_record_data(self, table, id, force_refresh=False):
return self._store.get(table, id, force_refresh=force_refresh)
def get_block(self, url_or_id, force_refresh=False):
"""
Retrieve an instance of a subclass of Block that maps to the block/page identified by the URL or ID passed in.
"""
block_id = extract_id(url_or_id)
block = self.get_record_data("block", block_id, force_refresh=force_refresh)
if not block:
return None
if block.get("parent_table") == "collection":
if block.get("is_template"):
block_class = TemplateBlock
else:
block_class = CollectionRowBlock
else:
block_class = BLOCK_TYPES.get(block.get("type", ""), Block)
return block_class(self, block_id)
def get_collection(self, collection_id, force_refresh=False):
"""
Retrieve an instance of Collection that maps to the collection identified by the ID passed in.
"""
coll = self.get_record_data(
"collection", collection_id, force_refresh=force_refresh
)
return Collection(self, collection_id) if coll else None
def get_user(self, user_id, force_refresh=False):
"""
Retrieve an instance of User that maps to the notion_user identified by the ID passed in.
"""
user = self.get_record_data("notion_user", user_id, force_refresh=force_refresh)
return User(self, user_id) if user else None
def get_space(self, space_id, force_refresh=False):
"""
Retrieve an instance of Space that maps to the space identified by the ID passed in.
"""
space = self.get_record_data("space", space_id, force_refresh=force_refresh)
return Space(self, space_id) if space else None
def get_collection_view(self, url_or_id, collection=None, force_refresh=False):
"""
Retrieve an instance of a subclass of CollectionView that maps to the appropriate type.
The `url_or_id` argument can either be the URL for a database page, or the ID of a collection_view (in which case
you must also pass the collection)
"""
# if it's a URL for a database page, try extracting the collection and view IDs
if url_or_id.startswith("http"):
match = re.search("([a-f0-9]{32})\?v=([a-f0-9]{32})", url_or_id)
if not match:
raise Exception("Invalid collection view URL")
block_id, view_id = match.groups()
collection = self.get_block(
block_id, force_refresh=force_refresh
).collection
else:
view_id = url_or_id
assert (
collection is not None
), "If 'url_or_id' is an ID (not a URL), you must also pass the 'collection'"
view = self.get_record_data(
"collection_view", view_id, force_refresh=force_refresh
)
return (
COLLECTION_VIEW_TYPES.get(view.get("type", ""), CollectionView)(
self, view_id, collection=collection
)
if view
else None
)
def refresh_records(self, **kwargs):
"""
The keyword arguments map table names into lists of (or singular) record IDs to load for that table.
Use `True` instead of a list to refresh all known records for that table.
"""
self._store.call_get_record_values(**kwargs)
def refresh_collection_rows(self, collection_id):
row_ids = [row.id for row in self.get_collection(collection_id).get_rows()]
self._store.set_collection_rows(collection_id, row_ids)
def post(self, endpoint, data):
"""
All API requests on Notion.so are done as POSTs (except the websocket communications).
"""
url = urljoin(API_BASE_URL, endpoint)
response = self.session.post(url, json=data)
if response.status_code == 400:
logger.error(
"Got 400 error attempting to POST to {}, with data: {}".format(
endpoint, json.dumps(data, indent=2)
)
)
raise HTTPError(
response.json().get(
"message", "There was an error (400) submitting the request."
)
)
response.raise_for_status()
return response
def submit_transaction(self, operations, update_last_edited=True):
if not operations:
return
if isinstance(operations, dict):
operations = [operations]
if update_last_edited:
updated_blocks = set(
[op["id"] for op in operations if op["table"] == "block"]
)
operations += [
operation_update_last_edited(self.current_user.id, block_id)
for block_id in updated_blocks
]
# if we're in a transaction, just add these operations to the list; otherwise, execute them right away
if self.in_transaction():
self._transaction_operations += operations
else:
data = {"operations": operations}
self.post("submitTransaction", data)
self._store.run_local_operations(operations)
def query_collection(self, *args, **kwargs):
return self._store.call_query_collection(*args, **kwargs)
def as_atomic_transaction(self):
"""
Returns a context manager that buffers up all calls to `submit_transaction` and sends them as one big transaction
when the context manager exits.
"""
return Transaction(client=self)
def in_transaction(self):
"""
Returns True if we're currently in a transaction, otherwise False.
"""
return hasattr(self, "_transaction_operations")
def search_pages_with_parent(self, parent_id, search=""):
data = {
"query": search,
"parentId": parent_id,
"limit": 10000,
"spaceId": self.current_space.id,
}
response = self.post("searchPagesWithParent", data).json()
self._store.store_recordmap(response["recordMap"])
return response["results"]
def search_blocks(self, search, limit=25):
return self.search(query=search, limit=limit)
def search(
self,
query="",
search_type="BlocksInSpace",
limit=100,
sort="Relevance",
source="quick_find",
isDeletedOnly=False,
excludeTemplates=False,
isNavigableOnly=False,
requireEditPermissions=False,
ancestors=[],
createdBy=[],
editedBy=[],
lastEditedTime={},
createdTime={},
):
data = {
"type": search_type,
"query": query,
"spaceId": self.current_space.id,
"limit": limit,
"filters": {
"isDeletedOnly": isDeletedOnly,
"excludeTemplates": excludeTemplates,
"isNavigableOnly": isNavigableOnly,
"requireEditPermissions": requireEditPermissions,
"ancestors": ancestors,
"createdBy": createdBy,
"editedBy": editedBy,
"lastEditedTime": lastEditedTime,
"createdTime": createdTime,
},
"sort": sort,
"source": source,
}
response = self.post("search", data).json()
self._store.store_recordmap(response["recordMap"])
return [self.get_block(result["id"]) for result in response["results"]]
def create_record(self, table, parent, **kwargs):
# make up a new UUID; apparently we get to choose our own!
record_id = str(uuid.uuid4())
child_list_key = kwargs.get("child_list_key") or parent.child_list_key
args = {
"id": record_id,
"version": 1,
"alive": True,
"created_by_id": self.current_user.id,
"created_by_table": "notion_user",
"created_time": now(),
"parent_id": parent.id,
"parent_table": parent._table,
}
args.update(kwargs)
with self.as_atomic_transaction():
# create the new record
self.submit_transaction(
build_operation(
args=args, command="set", id=record_id, path=[], table=table
)
)
# add the record to the content list of the parent, if needed
if child_list_key:
self.submit_transaction(
build_operation(
id=parent.id,
path=[child_list_key],
args={"id": record_id},
command="listAfter",
table=parent._table,
)
)
return record_id
class Transaction(object):
is_dummy_nested_transaction = False
def __init__(self, client):
self.client = client
def __enter__(self):
if hasattr(self.client, "_transaction_operations"):
# client is already in a transaction, so we'll just make this one a nullop and let the outer one handle it
self.is_dummy_nested_transaction = True
return
self.client._transaction_operations = []
self.client._pages_to_refresh = []
self.client._blocks_to_refresh = []
def __exit__(self, exc_type, exc_value, traceback):
if self.is_dummy_nested_transaction:
return
operations = self.client._transaction_operations
del self.client._transaction_operations
# only actually submit the transaction if there was no exception
if not exc_type:
self.client.submit_transaction(operations)
self.client._store.handle_post_transaction_refreshing()
| 35.816537 | 122 | 0.588991 |
c0346858b2429e4b30d8850264dbd563b5d256df | 49 | py | Python | Python_Projects/OS-app/startup.py | ArturWagnerBusiness/Projects-2018-2020 | 37a217dc325f3ba42d8a7a1a743e5b6f8fab5df4 | [
"MIT"
] | null | null | null | Python_Projects/OS-app/startup.py | ArturWagnerBusiness/Projects-2018-2020 | 37a217dc325f3ba42d8a7a1a743e5b6f8fab5df4 | [
"MIT"
] | null | null | null | Python_Projects/OS-app/startup.py | ArturWagnerBusiness/Projects-2018-2020 | 37a217dc325f3ba42d8a7a1a743e5b6f8fab5df4 | [
"MIT"
] | null | null | null | from os import system
system("python client.py")
| 16.333333 | 26 | 0.77551 |
0bb01c8fe3b2b28a2e9ec8ebe0a8d93821f49cb3 | 870 | py | Python | python/team B/iq.py | friendlyghst/asuustrikeworkshop | 7a3519bf1b1811054bcffd04ba2262798d67add1 | [
"MIT"
] | 4 | 2019-03-04T07:46:10.000Z | 2020-05-27T17:11:50.000Z | python/team B/iq.py | friendlyghst/asuustrikeworkshop | 7a3519bf1b1811054bcffd04ba2262798d67add1 | [
"MIT"
] | null | null | null | python/team B/iq.py | friendlyghst/asuustrikeworkshop | 7a3519bf1b1811054bcffd04ba2262798d67add1 | [
"MIT"
] | 1 | 2019-02-22T05:38:45.000Z | 2019-02-22T05:38:45.000Z | from QUESTION import question
from test import evaluate
from newfile import enoch
class Question:
def quest(self):
print("Pick an Option")
list = [
"Oliver Twist was published by who? (A) Oliver Twist, (B) Charles Dickens: ",
"What was the name of Alexander the great's horse? (A) Alonso, (B) Bucaphalus: ",
"According to greek mythology, who is the god of thunder? (A) Ogun, (B) Thor: "
]
score = []
for el in list:
question1 = question.ask_question(el)
scoree = evaluate.evaluate_response(question1,"B")
score.append(scoree)
print(score)
tiq = sum(score)
print("Your IQ is:", tiq)
Question = Question()
if __name__ == "__main__":
enoch.welcome()
Question.quest()
enoch.goodbye() | 23.513514 | 89 | 0.570115 |
8fcf860c8363eb65609d26846744c3f562521d29 | 32,867 | py | Python | pystella/rf/light_curve_plot.py | baklanovp/pystella | 47a8b9c3dcd343bf80fba80c8468b803f0f842ce | [
"MIT"
] | 1 | 2019-08-08T13:11:57.000Z | 2019-08-08T13:11:57.000Z | pystella/rf/light_curve_plot.py | cradesto/pystella | f6f44ed12d9648585a52a09e15d494daa4c70c59 | [
"MIT"
] | 9 | 2015-07-11T16:39:57.000Z | 2021-11-23T07:31:49.000Z | pystella/rf/light_curve_plot.py | cradesto/pystella | f6f44ed12d9648585a52a09e15d494daa4c70c59 | [
"MIT"
] | 1 | 2019-08-08T13:08:55.000Z | 2019-08-08T13:08:55.000Z | import os
import sys
from itertools import cycle
from collections import OrderedDict
import numpy as np
from pystella.rf import band
try:
import matplotlib.pyplot as plt
from matplotlib import gridspec
except ImportError as ex:
# import traceback
exc_type, exc_obj, exc_tb = sys.exc_info()
fn = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fn, exc_tb.tb_lineno, ex)
print(' Probably, you should install module: {}'.format('matplotlib'))
# print(ex)
plt = None
gridspec = None
pass
__author__ = 'bakl'
lc_colors = band.colors()
lc_lntypes = band.lntypes()
linestyles = ('-', '--', '-.', ':')
linestyles_extend = list( OrderedDict( # ref https://stackoverflow.com/a/54804349
[
('solid', (0, ())),
('dashed', (0, (5, 5))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dotted', (0, (1, 1))),
('densely dashed', (0, (5, 1))),
('dotted', (0, (1, 5))),
('loosely dashed', (0, (5, 10))),
('loosely dotted', (0, (1, 10))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]).values())
markers = {u'D': u'diamond', 6: u'caretup', u's': u'square', u'x': u'x',
5: u'caretright', u'^': u'triangle_up', u'd': u'thin_diamond', u'h': u'hexagon1',
u'+': u'plus', u'*': u'star', u'o': u'circle', u'p': u'pentagon', u'3': u'tri_left',
u'H': u'hexagon2', u'v': u'triangle_down', u'8': u'octagon', u'<': u'triangle_left'}
markers = list(markers.keys())
# def lbl(b, band_shift):
# shift = band_shift[b]
# s = b
# if shift == int(shift):
# shift = int(shift)
# if shift > 0:
# s += '+' + str(shift)
# elif shift < 0:
# s += '-' + str(abs(shift))
# return s
def lbl(b, band_shift, length=0):
shift = band_shift[b]
if shift == int(shift):
shift = int(shift)
# if shift == 0:
# return b
s = b
if shift > 0:
s += '+'
s += str(abs(shift))
elif shift < 0:
s += '-'
s += str(abs(shift))
if length > 0:
s = ("{0:<" + str(length) + "s}").format(s)
return s
def lbl_length(bshifts):
return max((len(lbl(b, bshifts)) for b in bshifts.keys()))
def plot_ubv_models(ax, models_dic, bands, **kwargs):
# bshift=None, xlim=None, ylim=None, colors=lc_colors, is_time_points=False):
global linestyles
xlim = kwargs.get('xlim', None)
ylim = kwargs.get('ylim', None)
bshift = kwargs.get('bshift', None)
ls1 = kwargs.get('ls1', "-")
ls_multi = kwargs.get('ls_multi', ":")
lw = kwargs.get('lw', 2)
markersize = kwargs.get('markersize', 6)
is_time_points = kwargs.get('is_time_points', False)
is_dashes = kwargs.get('is_dashes', False)
line_styles = kwargs.get('linestyles', linestyles)
# linestyles = kwargs.get('linestyles', ['-'])
is_compute_x_lim = xlim is None
is_compute_y_lim = ylim is None
t_points = [0.2, 1, 2, 3, 4, 5, 10, 20, 40, 80, 150]
colors = band.colors()
band_shift = dict((k, 0) for k, v in colors.items()) # no y-shift
if bshift is not None:
for k, v in bshift.items():
band_shift[k] = v
lbl_len = lbl_length(band_shift)
mi = 0
x_max = []
y_mid = []
lc_min = {}
line_cycle = cycle(line_styles)
dashes = get_dashes(len(bands) + 1, scale=2)
for mname, mdic in models_dic.items():
mi += 1
ls = next(line_cycle)
for ib, bname in enumerate(bands):
lc = mdic[bname]
x = lc.Time
y = lc.Mag + band_shift[bname]
bcolor = colors[bname]
dash = dashes[ib]
if len(models_dic) == 1:
if is_dashes:
ax.plot(x, y, label='%s %s' % (lbl(bname, band_shift, lbl_len), mname), color=bcolor, ls=ls1,
linewidth=lw, dashes=dash)
else:
ax.plot(x, y, label='%s %s' % (lbl(bname, band_shift, lbl_len), mname), color=bcolor, ls=ls,
linewidth=lw)
# ax.plot(x, y, label='%s %s' % (lbl(bname, band_shift), mname), color=bcolor, ls=ls1, linewidth=lw)
elif len(models_dic) <= len(line_styles):
ax.plot(x, y, label='%s %s' % (lbl(bname, band_shift, lbl_len), mname), color=bcolor, ls=ls,
linewidth=lw)
else:
ax.plot(x, y, marker=markers[mi % (len(markers) - 1)],
label='%s %s' % (lbl(bname, band_shift, lbl_len), mname),
markersize=markersize, color=bcolor, ls=ls_multi, linewidth=lw)
if is_time_points:
integers = [np.abs(x - t).argmin() for t in t_points] # set time points
for (X, Y) in zip(x[integers], y[integers]):
ax.annotate('{:.0f}'.format(X), xy=(X, Y), xytext=(-10, 20), ha='right',
textcoords='offset points', color=bcolor,
arrowprops=dict(arrowstyle='->', shrinkA=0))
idx = np.argmin(y)
lc_min[bname] = (x[idx], y[idx])
if is_compute_x_lim:
x_max.append(np.max(x))
if is_compute_y_lim:
y_mid.append(np.min(y))
if is_compute_x_lim:
xlim = [-10, np.max(x_max) + 10.]
if is_compute_y_lim:
ylim = [np.min(y_mid) + 7., np.min(y_mid) - 2.]
ax.set_xlim(xlim)
ax.invert_yaxis()
ax.set_ylim(ylim)
return lc_min
def plot_models_band(ax, models_dic, bname, **kwargs):
# , xlim=None, ylim=None,colors=lc_colors, is_time_points=False):
xlim = kwargs.get('xlim', None)
ylim = kwargs.get('ylim', None)
sep = kwargs.get('sep', 'm') # line separator
is_compute_x_lim = xlim is None
is_compute_y_lim = ylim is None
lw = 1.5
mi = 0
x_min = []
x_max = []
y_mid = []
lc_min = {}
dashes = get_dashes(len(models_dic))
dashes_cycler = cycle(dashes)
for mname, mdic in models_dic.items():
mi += 1
lc = mdic[bname]
x = lc.Time
y = lc.Mag
# bcolor = colors[mi % len(colors)]
if len(models_dic) == 1:
ax.plot(x, y, label='%s %s' % (bname, mname), ls="-", linewidth=lw)
else:
if sep == 'm':
ax.plot(x, y, marker=markers[mi % (len(markers) - 1)], label='%s %s' % (bname, mname),
markersize=4, ls=":", linewidth=lw)
else:
ax.plot(x, y, label='%s %s' % (bname, mname), dashes=next(dashes_cycler), linewidth=lw)
idx = np.argmin(y)
lc_min[bname] = (x[idx], y[idx])
x_min.append(np.min(x))
x_max.append(np.max(x))
if is_compute_y_lim:
y_mid.append(np.min(y))
# x-axe
if is_compute_x_lim:
xlim = [-10, np.max(x_max) + 10.]
elif xlim[0] == float('-inf'):
xlim[0] = np.min(x_min)
elif xlim[1] == float('inf'):
xlim[1] = np.max(x_max)
ax.set_xlim(xlim)
# y-axe
ax.invert_yaxis()
if is_compute_y_lim:
ylim = [np.min(y_mid) + 7., np.min(y_mid) - 2.]
ax.set_ylim(ylim)
return lc_min
def get_dashes(nums, scale=1):
dashes = []
for i in range(nums):
if i < 5:
dashes.append((4, scale * (1 + int(i / 2))))
elif i < 10:
dashes.append((i - 3, scale * (1 + int(i / 2)), 2, 1 + i))
else:
dashes.append((i - 8, 1, 2, scale * (1 + int(i / 2)), 2, 1 + i))
return dashes
def plot_models_curves(ax, models_curves, band_shift=None, xlim=None, ylim=None, lc_types=None, colors=lc_colors,
lw=2.):
is_compute_x_lim = xlim is None
is_compute_y_lim = ylim is None
if lc_types is None:
lc_types = dict((name, '-') for name in models_curves.keys()) # solid line
mi, ib = 0, 0
x_max = []
y_mid = []
bshifts = band_shift
for mname, curves in models_curves.items():
mi += 1
bands = curves.BandNames
if band_shift is None:
bshifts = {bname: 0. for bname in bands} # no y-shift
for bname in bands:
ib += 1
mshift = bshifts[bname]
x = curves.TimeCommon
y = curves[bname].Mag + mshift
ax.plot(x, y, label='%s %s' % (lbl(bname, bshifts), mname),
color=colors[bname], ls=lc_types[mname], linewidth=lw)
if is_compute_x_lim:
x_max.append(np.max(x))
if is_compute_y_lim:
y_mid.append(np.min(y))
if is_compute_x_lim:
xlim = [-10, np.max(x_max) + 10.]
if is_compute_y_lim:
ylim = [np.min(y_mid) + 7., np.min(y_mid) - 2.]
ax.set_xlim(xlim)
# ax.invert_yaxis()
ax.set_ylim(ylim)
def plot_models_curves_fixed_bands(ax, models_curves, bands, band_shift=None, xlim=None, ylim=None, lc_types=None,
colors=lc_colors,
lw=2.):
is_compute_x_lim = xlim is None
is_compute_y_lim = ylim is None
if band_shift is None:
band_shift = dict((bname, 0) for bname in bands) # no y-shift
if lc_types is None:
lc_types = dict((name, '-') for name in models_curves.keys()) # solid line
mi, ib = 0, 0
x_max = []
y_mid = []
for mname, curves in models_curves.items():
mi += 1
for bname in bands:
ib += 1
x = curves.TimeCommon
y = curves[bname].Mag
ax.plot(x, y, label='%s %s' % (lbl(bname, band_shift), mname),
color=colors[bname], ls=lc_types[mname], linewidth=lw)
if is_compute_x_lim:
x_max.append(np.max(x))
if is_compute_y_lim:
y_mid.append(np.min(y))
if is_compute_x_lim:
xlim = [-10, np.max(x_max) + 10.]
if is_compute_y_lim:
ylim = [np.min(y_mid) + 7., np.min(y_mid) - 2.]
ax.set_xlim(xlim)
ax.invert_yaxis()
ax.set_ylim(ylim)
def plot_bands(dict_mags, bands, title='', fname='', distance=10., xlim=(-10, 200), ylim=(-12, -19),
is_time_points=True):
fig = plt.figure()
ax = fig.add_axes((0.1, 0.3, 0.8, 0.65))
# ax.set_title(''.join(bands) + ' filter response')
# colors = band.bands_colors()
# colors = dict(U="blue", B="cyan", V="black", R="red", I="magenta",
# J="blue", H="cyan", K="black",
# UVM2="green", UVW1="red", UVW2="blue",
# g="black", r="red", i="magenta", u="blue", z="magenta")
# band_shift = dict(U=6.9, B=3.7, V=0, R=-2.4, I=-4.7,
# UVM2=11.3, UVW1=10, UVW2=13.6,
# u=3.5, g=2.5, r=-1.2, i=-3.7, z=-4.2)
band_shift = dict((k, 0) for k, v in lc_colors.items()) # no y-shift
t_points = [2, 5, 10, 20, 40, 80, 150]
dm = 5 * np.log10(distance) - 5 # distance module
# dm = 0
ylim += dm
is_auto_lim = True
if is_auto_lim:
ylim = [0, 0]
x = dict_mags['time']
is_first = True
for n in bands:
y = dict_mags[n]
y += dm + band_shift[n]
ax.plot(x, y, label=lbl(n, band_shift), color=lc_colors[n], ls=lc_lntypes[n], linewidth=2.0)
# ax.plot(x, y, label=lbl(n, band_shift), color=colors[n], ls=lntypes[n], linewidth=2.0, marker='s')
if is_time_points and is_first:
is_first = False
integers = [np.abs(x - t).argmin() for t in t_points] # set time points
for (X, Y) in zip(x[integers], y[integers]):
plt.annotate('{:.0f}'.format(X), xy=(X, Y), xytext=(10, -30), ha='right',
textcoords='offset points',
arrowprops=dict(arrowstyle='->', shrinkA=0))
if is_auto_lim:
if ylim[0] < max(y[len(y) / 2:]) or ylim[0] == 0:
ylim[0] = max(y[len(y) / 2:])
if ylim[1] > min(y) or ylim[1] == 0:
ylim[1] = min(y)
ylim = np.add(ylim, [1, -1])
ax.invert_yaxis()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.legend()
ax.set_ylabel('Magnitude')
ax.set_xlabel('Time [days]')
# ax.set_title(title)
fig.text(0.17, 0.07, title, family='monospace')
ax.grid()
if fname != '':
fig.savefig("ubv_%s.png" % fname, format='png')
# ax.show()
# plt.close()
return ax
def curves_plot(curves, ax=None, xlim=None, ylim=None, title=None, fname=None, **kwargs):
"""
Plot curves.
If err = -1, it's upper limit, if err = -2 it's lower limit
:param curves:
:param ax: Axis. If ax is None, it would be created.
:param xlim:
:param ylim:
:param title:
:param fname:
:param kwargs:
linewidth = kwargs.get('linewidth', 2.0)
markersize = kwargs.get('markersize', 5)
fontsize = kwargs.get('fontsize', 18)
figsize = kwargs.get('figsize', (20, 10))
legncol = kwargs.get('legncol', 1)
legloc = kwargs.get('legloc', 1)
alpha = kwargs.get('alpha', 1.)
is_legend = kwargs.get('is_legend', True)
is_line = kwargs.get('is_line', True)
is_fill = kwargs.get('is_fill', False)
if 'marker' in kwargs:
is_line = False
marker = kwargs.get('marker', 'o')
if not isinstance(marker, (list, dict, tuple)):
marker = {lc.Band.Name: marker for lc in curves}
colors = like {'B': 'blue', 'V': 'green}
if not isinstance(colors, (list, dict, tuple)):
colors = {lc.Band.Name: colors for lc in curves}
:return: ax
"""
ls = kwargs.get('ls', {lc.Band.Name: '-' for lc in curves})
if isinstance(ls, str):
c = ls.strip()
ls = {lc.Band.Name: c for lc in curves}
is_legend = kwargs.get('is_legend', True)
is_line = kwargs.get('is_line', True)
is_fill = kwargs.get('is_fill', False)
if 'marker' in kwargs:
is_line = False
marker = kwargs.get('marker', 'o')
if not isinstance(marker, (list, dict, tuple)):
marker = {lc.Band.Name: marker for lc in curves}
colors = kwargs.get('colors', None)
if colors is not None and not isinstance(colors, (list, dict, tuple)):
colors = {lc.Band.Name: colors for lc in curves}
linewidth = kwargs.get('linewidth', 2.0)
markersize = kwargs.get('markersize', 5)
# rect = kwargs.get('rect', (0.1, 0.2, 0.8, 0.65))
fontsize = kwargs.get('fontsize', 18)
figsize = kwargs.get('figsize', (20, 10))
legncol = kwargs.get('legncol', 1)
legloc = kwargs.get('legloc', 1)
alpha = kwargs.get('alpha', 1.)
flabel = kwargs.get('flabel', None)
label = kwargs.get('label', None)
length_lo_up_lims = kwargs.get('length_lo_up_lims', 0.5)
is_new_fig = ax is None
if is_new_fig:
plt.matplotlib.rcParams.update({'font.size': 14})
fig = plt.figure(figsize=figsize)
# fig = plt.figure(num=None, figsize=(7, 11), dpi=100, facecolor='w', edgecolor='k')
# ax = fig.add_axes(rect)
ax = fig.add_subplot(1, 1, 1)
for item in ([ax.title, ax.xaxis.label,
ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# ax = fig.add_axes((0.1, 0.3, 0.8, 0.65))
# ax.set_title(''.join(bands) + ' filter response')
is_xlim = False
is_ylim = False
if xlim is None:
is_xlim = True
xlim = [float('inf'), float('-inf')]
if ylim is None:
is_ylim = True
ylim = [float('-inf'), float('inf')]
for lc in curves:
x = lc.Time
y = lc.Mag
bname = lc.Band.Name
lbl = '{0} {1}'.format(bname, curves.Name.replace("_", ""))
if flabel is not None:
lbl = flabel(bname)
elif label is not None:
lbl = label.format(bname)
if colors is not None:
color = colors[bname]
else:
color = band.colors(bname)
if is_line:
ax.plot(x, y, label=lbl, color=color, ls=ls[bname], linewidth=linewidth)
else:
if lc.IsErr:
y_el = np.copy(lc.MagErr)
y_eu = np.copy(lc.MagErr)
lolims = np.array(y_el == -2, dtype=bool)
uplims = np.array(y_eu == -1, dtype=bool)
y_el[lolims] = length_lo_up_lims
y_eu[uplims] = length_lo_up_lims
ax.errorbar(x, y, label=lbl, yerr=[y_el, y_eu], fmt=marker[bname],
lolims=lolims, uplims=uplims, xlolims=lolims, xuplims=uplims,
color=color, ls='', markersize=markersize, )
else:
# ax.plot(x, y, label='{0} {1}'.format(bname, fname), color=bcolors[bname], ls='',
# marker=marker, markersize=markersize)
ax.plot(x, y, label=lbl, color=color, ls='', marker=marker[bname], markersize=markersize)
if is_fill and lc.IsErr:
yy_err = abs(lc.MagErr)
# ax.fill(np.concatenate([x, x[::-1]]), np.concatenate([y - yyerr, (y + yyerr)[::-1]]),
# alpha=.3, fc=color, ec='None', label=label) # '95% confidence interval')
ax.fill_between(x, y - yy_err, y + yy_err, facecolor=color, alpha=alpha)
if is_xlim:
xlim[0] = min(xlim[0], np.min(x))
xlim[1] = max(xlim[1], np.max(x))
if is_ylim:
ylim[0] = max(ylim[0], np.max(y))
ylim[1] = min(ylim[1], np.min(y))
if is_ylim:
ylim = [ylim[1] + 10, ylim[1] - 2]
# ylim = np.add(ylim, [1, -1])
ax.invert_yaxis()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if is_legend:
ax.legend(ncol=legncol, loc=legloc)
ax.set_ylabel('Magnitude')
ax.set_xlabel('Time [days]')
# ax.grid()
if title is not None:
ax.get_figure().title(title)
# ax.text(0.17, 0.07, title, family='monospace')
if fname is not None:
print('Save plot to {}'.format(fname))
ax.get_figure().savefig(fname)
return ax
def lc_plot(lc, ax=None, xlim=None, ylim=None, title=None, fname=None, **kwargs):
ls = kwargs.get('ls', {lc.Band.Name: '-'})
if isinstance(ls, str):
c = ls.strip()
ls = {lc.Band.Name: c}
is_legend = kwargs.get('is_legend', True)
is_line = kwargs.get('is_line', True)
if 'lt' in kwargs:
is_line = False
lt = kwargs.get('lt', {lc.Band.Name: 'o'})
if isinstance(lt, str):
c = lt.strip()
lt = {lc.Band.Name: c}
colors = kwargs.get('colors', lc_colors)
linewidth = kwargs.get('linewidth', 2.0)
markersize = kwargs.get('markersize', 5)
rect = kwargs.get('rect', (0.1, 0.3, 0.8, 0.65))
fontsize = kwargs.get('fontsize', 18)
figsize = kwargs.get('figsize', (20, 10))
is_new_fig = ax is None
if is_new_fig:
plt.matplotlib.rcParams.update({'font.size': 14})
fig = plt.figure(figsize=figsize)
# fig = plt.figure(num=None, figsize=(7, 11), dpi=100, facecolor='w', edgecolor='k')
# ax = fig.add_axes()
ax = fig.add_axes(rect)
for item in ([ax.title, ax.xaxis.label,
ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# ax = fig.add_axes((0.1, 0.3, 0.8, 0.65))
# plt.title(''.join(bands) + ' filter response')
is_xlim = False
is_ylim = False
if xlim is None:
is_xlim = True
xlim = [float('inf'), float('-inf')]
if ylim is None:
is_ylim = True
ylim = [float('-inf'), float('inf')]
x = lc.Time
y = lc.Mag
bname = lc.Band.Name
if is_line:
ax.plot(x, y, label='{0} {1}'.format(bname, lc.Name),
color=colors[bname], ls=ls[bname], linewidth=linewidth)
else:
if lc.IsErr:
yyerr = abs(lc.Err)
ax.errorbar(x, y, label='{0} {1}'.format(bname, fname), yerr=yyerr, fmt=lt[bname],
color=colors[bname], ls='', markersize=markersize)
else:
# ax.plot(x, y, label='{0} {1}'.format(bname, fname), color=bcolors[bname], ls='',
# marker=marker, markersize=markersize)
ax.plot(x, y, label='{0} {1}'.format(bname, lc.Name),
color=colors[bname], ls='', marker=lt[bname], markersize=markersize)
if is_xlim:
xlim[0] = np.min(x)
xlim[1] = np.max(x)
if is_ylim:
ylim[0] = np.max(y)
ylim[1] = np.min(y)
if is_ylim:
ylim = [ylim[1] + 10, ylim[1] - 2]
# ylim = np.add(ylim, [1, -1])
ax.invert_yaxis()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if is_legend:
ax.legend()
ax.set_ylabel('Magnitude')
ax.set_xlabel('Time [days]')
ax.grid()
if title is not None:
plt.title(title)
# ax.text(0.17, 0.07, title, family='monospace')
if fname is not None:
# plt.savefig("ubv_%s.png" % fname, format='png')
plt.savefig(fname)
return ax
def ticks_on(ax, minor=3, major=6):
ax.minorticks_on()
ax.tick_params(direction='in', which='minor', length=minor)
ax.tick_params(direction='in', which='major', length=major)
return ax
def plot_shock_details(swd, times, **kwargs):
from pystella.model import sn_swd
is_legend = kwargs.get('is_legend', False)
# ylim_par = kwargs.get('ylim_par', (0.001, 11))
font_size = kwargs.get('font_size', 12)
# is_grid = kwargs.get('is_grid', False)
is_adjust = kwargs.get('is_adjust', True)
is_axes = kwargs.get('is_axes', False)
dic_axes = kwargs.get('dic_axes', None)
is_ax_old = False
xlim = None
ylim_rho = None
nrow = len(times)
ncol = 2
axes1 = []
if dic_axes is None:
dic_axes = {'r': [], 'm': []}
fig = plt.figure(figsize=(12, nrow * 4))
# plt.minorticks_on()
# fig = plt.figure(num=None, figsize=(12, len(times) * 4), dpi=100, facecolor='w', edgecolor='k')
# gs1 = gridspec.GridSpec(len(times), 2)
else:
fig = (dic_axes['r'][0]['rho']).get_figure()
is_ax_old = True
is_adjust = False
# is_legend = False
kwargs['is_day'] = False
plt.matplotlib.rcParams.update({'font.size': font_size})
# plot radius column
for i, t in enumerate(times):
if is_ax_old:
axrho, axpar = dic_axes['r'][i]['rho'], dic_axes['r'][i]['par']
else:
axrho = fig.add_subplot(nrow, ncol, ncol * i + 1, label='radius {}'.format(i))
axpar = None
axes1.append(axrho)
legmask = sn_swd.LEGEND_MASK_None
if is_legend and i == 0:
legmask = sn_swd.LEGEND_MASK_Rho
# plot swd(radius)
b = swd.block_nearest(t)
axrho, axpar = sn_swd.plot_swd((axrho, axpar), b, name=swd.Name, is_xlabel=(i == len(times) - 1),
legmask=legmask, is_yrlabel=False, text_posy=0.88,
**kwargs)
if not is_ax_old:
x = axrho.get_xlim()
if xlim is None:
xlim = x
else:
xlim = (min(x[0], xlim[0]), max(x[1], xlim[1]))
y = axrho.get_ylim()
if ylim_rho is None:
ylim_rho = y
else:
ylim_rho = (min(y[0], ylim_rho[0]), max(y[1], ylim_rho[1]))
# axpar.tick_params(direction='in', which='both', length=4)
ticks_on(axrho)
ticks_on(axpar)
dic_axes['r'].append({'itime': i, 't': t, 'rho': axrho, 'par': axpar})
if 'rnorm' in kwargs:
kwargs.pop('rnorm')
axes2 = []
# Plot mass column
for i, t in enumerate(times):
if is_ax_old:
# ax2 = dic_axes['m'][i]['rho']
axrho, axpar = dic_axes['m'][i]['rho'], dic_axes['m'][i]['par']
else:
axrho = fig.add_subplot(nrow, ncol, ncol * i + 2, label='mass {}'.format(i))
axrho.tick_params(direction='in', which='minor', length=3)
axpar = None
axes2.append(axrho)
legmask = sn_swd.LEGEND_MASK_None
if is_legend and i == 0:
legmask = sn_swd.LEGEND_MASK_Vars
b = swd.block_nearest(t)
axrho, axpar = sn_swd.plot_swd((axrho, axpar), b, name=swd.Name, is_xlabel=(i == len(times) - 1),
rnorm='m', legmask=legmask, is_yllabel=False, text_posy=0.88,
**kwargs)
if not is_ax_old:
dic_axes['m'].append({'itime': i, 't': t, 'rho': axrho, 'par': axpar})
ticks_on(axrho)
axpar.tick_params(direction='in', which='major', length=5)
ticks_on(axpar)
# Set limits
for i, ax in enumerate(axes1):
ax.set_xlim(xlim)
ax.set_ylim(ylim_rho)
# remove labels between subplots
if not i == len(times) - 1:
plt.setp(ax.get_xticklabels(), visible=False)
for i, ax2 in enumerate(axes2):
ax2.set_ylim(ylim_rho)
# remove labels between subplots
if not i == len(times) - 1:
plt.setp(ax2.get_xticklabels(), visible=False)
if is_adjust:
fig.subplots_adjust(wspace=0., hspace=0.)
# print(len(axes1), len(axes2))
if is_axes:
return fig, dic_axes
return fig
def plot_swd_chem(dic_axes, argsrho, path_relativ, alpha=0.25):
"""Add chemical data to the plot_shock_details plot
:type dic_axes: dict, dic_axes['r' or 'm'].append({'itime': i, 't': t, 'rho': axrho, 'par': axpar})
:param argsrho: rho-file + Elements
:param path_relativ:
:param alpha: the transparanc
"""
from pystella.model import sn_eve
from pystella.util.phys_var import phys
colors = sn_eve.eve_colors
s_elements = None
if '+' in argsrho:
frho, s_elements = argsrho.split('+')
else:
frho = argsrho
frho = os.path.join(path_relativ, frho)
pathrho, namerho = os.path.split(frho)
eve = sn_eve.load_rho(os.path.join(pathrho, namerho))
elements = eve.Elements
if s_elements is not None:
elements = s_elements.split(':')
print('Add chem [{}] from {}'.format(':'.join(elements), frho))
x = eve.m / phys.M_sun
if min(x) > 0.1:
print('Chem is shifted at {} Msun'.format(-min(x)))
x = x - min(x)
def two_y(x, lims, up=0.85):
return (lims[1]*up - lims[0]) * x + lims[0]
for i, d in enumerate(dic_axes['m']):
ax = d['par']
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# print(ylim)
xx = np.linspace(xlim[1]*0.8, xlim[0], len(elements))
yy_tot = np.zeros_like(x) # + ylim[1]
yy_prev = np.zeros_like(x) # + ylim[0]
for ie, el in enumerate(elements):
# for el in reversed(elements):
y = eve.el(el)
# yy += y
# yy = y
# yyy = yy
yy_tot += y
# yyy = np.log10(yy) + ylim[1]-1
ax.fill_between(x, y1=two_y(yy_tot, ylim), y2=two_y(yy_prev, ylim), color=colors[el], alpha=alpha)
# ax.plot(x, yyy, color=colors[el], alpha=alpha)
# Print the element name
# xp = np.average(x, weights=y)
# yp = np.average(yyy, weights=y) * 0.9
xp = xx[ie]
ax.text(xp, ylim[1] * 0.9, el, color=colors[el], fontsize=11)
yy_prev = yy_tot.copy()
def plot_swd_tau(dic_axes, stella, times, bnames=('B',), tau_ph=2. / 3., is_obs_time=False, **kwargs):
"""Add photospheric data to the plot_shock_details plot
:type dic_axes: object
:param stella:
:param times:
:param bnames:
:param tau_ph: the photosphere location. Default: 2/3
:param is_obs_time: If True to compute Obs.Time as ProperTime - R(N-1)/c and use them. Default: False
:param tnorm: the T normalization. Default: None = log10(T)
:param vnorm: the V normalization. Default: 1e8
:param alpha: the transparent. Default: 0.5
"""
from pystella.rf.band import band_by_name
tnorm = kwargs.get('tnorm', None)
vnorm = kwargs.get('vnorm', 1e8)
alpha = kwargs.get('alpha', 0.5)
markersize = kwargs.get('markersize', 6)
marker = kwargs.get('marker', 'o')
if not stella.is_tau:
print('There is no tau-file for model {} in path: {}'.format(stella.Name, stella.Path))
return
print('Add tau [{}] for {} at tau_ph= {:.3f}'.format(':'.join(bnames), stella.Name, tau_ph))
pars_data = ['T', 'V', 'R']
tau = stella.get_tau().load(is_info=False)
tau_data = tau.params_ph(pars=pars_data, moments=times, tau_ph=tau_ph, is_obs_time=is_obs_time)
# Extract phot data
data = {bn: {p: [] for p in pars_data} for bn in bnames}
for bname in bnames:
b = band_by_name(bname)
fr_eff = b.freq_eff
for p in pars_data:
for i, (t, freq, y) in enumerate(tau_data[p]):
s = '{:9.4f} '.format(t)
idx = (np.abs(freq - fr_eff)).argmin()
s += ' {:10e}'.format(y[idx])
data[bname][p].append(y[idx])
# Plot
for ii, d in enumerate(dic_axes['r']):
ax = d['par']
# t = d['t']
# print('{:9s} {}'.format('t_real', ' '.join([f'{p}_{b:10s}' for b in bnames])))
# s = '{:9.4f} '.format(t)
for i, bname in enumerate(bnames):
r_ph = np.array(data[bname]['R'])
color = band.colors(bname)
# print(bname)
xr = r_ph[ii]
ax.text(xr, 0.5 + i, bname, fontsize=12, color=color)
ax.axvline(x=xr, ymin=0., ymax=0.99, linestyle='--', color=color, alpha=alpha)
# Temperature
if tnorm is None:
yt = np.log10(data[bname]['T'][ii])
ax.plot(xr, yt, color='green', ls='', marker=marker, markersize=markersize, alpha=alpha)
else:
yt = data[bname]['T'][ii] / tnorm
ax.plot(xr, yt, color='green', ls='', marker=marker, markersize=markersize, alpha=alpha)
# Velocity
yv = data[bname]['V'][ii] / vnorm
ax.plot(xr, yv, color='blue', ls='', marker=marker, markersize=markersize, alpha=alpha)
# print(f't={t:9.3f} R= {xr:e} V= {yv:e} T= {yt:e}')
# s += f'R= {xr:e} V= {yv:e} T= {yt:e}'
# print(s)
# Print
for p in pars_data:
print('{:9s} {}'.format(
't_real', ' '.join([f'{p}({b:4s}:{band_by_name(b).wl_eff_angs:.0f})' for b in bnames])))
# print(p)
for ii, d in enumerate(dic_axes['r']):
t = d['t']
s = '{:9.4f} '.format(t)
for i, bname in enumerate(bnames):
v = np.array(data[bname][p][ii])
s += f'{v:12e} '
print(s)
########################################
# from https://stackoverflow.com/questions/7358118/matplotlib-black-white-colormap-with-dashes-dots-etc
def setAxLinesBW(ax, color='black', markersize=3):
"""
Take each Line2D in the axes, ax, and convert the line style to be
suitable for black and white viewing.
"""
# https://matplotlib.org/gallery/lines_bars_and_markers/linestyles.html
# dashList = [(5, 2), (2, 5), (4, 10), (3, 3, 2, 2), (5, 2, 20, 2)]
COLORMAP = {
'blue': {'marker': 's', 'dash': (5, 2, 5, 2, 5, 10)},
'darkgreen': {'marker': 'x', 'dash': (5, 5)},
'red': {'marker': '*', 'dash': (5, 3, 1, 3)},
'cyan': {'marker': 'd', 'dash': (1, 3, 1, 1)},
'magenta': {'marker': 'o', 'dash': (2, 5)},
'yellow': {'marker': '<', 'dash': (5, 3, 1, 2, 1, 10)},
'k': {'marker': 'P', 'dash': (5, 2)} # (3, 3, 2, 2) [1,2,1,10]}
}
lines_to_adjust = ax.get_lines()
# try:
# lines_to_adjust += ax.get_legend().get_lines()
# except AttributeError:
# pass
for line in lines_to_adjust:
origColor = line.get_color()
origLineType = line.get_linestyle()
line.set_color(color)
if origLineType is not None:
line.set_dashes(COLORMAP[origColor]['dash'])
else:
line.set_marker(COLORMAP[origColor]['marker'])
line.set_markersize(markersize)
line.set_linestyle('')
def setFigLinesBW(fig):
"""
Take each axes in the figure, and for each line in the axes, make the
line viewable in black and white.
"""
for ax in fig.get_axes():
setAxLinesBW(ax)
| 35.417026 | 114 | 0.535705 |
b006f9e27fd23300f7e8b7ad96c1e6f5920c991b | 14,819 | py | Python | UnleashClient/__init__.py | Unleash/unleash-client-pyhton | 2489ae9a2069628ae1d312855fc375baddaca004 | [
"MIT"
] | null | null | null | UnleashClient/__init__.py | Unleash/unleash-client-pyhton | 2489ae9a2069628ae1d312855fc375baddaca004 | [
"MIT"
] | null | null | null | UnleashClient/__init__.py | Unleash/unleash-client-pyhton | 2489ae9a2069628ae1d312855fc375baddaca004 | [
"MIT"
] | null | null | null | # pylint: disable=invalid-name
import warnings
from datetime import datetime, timezone
from typing import Callable, Optional
from apscheduler.job import Job
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from UnleashClient.api import register_client
from UnleashClient.periodic_tasks import fetch_and_load_features, aggregate_and_send_metrics
from UnleashClient.strategies import ApplicationHostname, Default, GradualRolloutRandom, \
GradualRolloutSessionId, GradualRolloutUserId, UserWithId, RemoteAddress, FlexibleRollout
from UnleashClient.constants import METRIC_LAST_SENT_TIME, DISABLED_VARIATION, ETAG
from UnleashClient.loader import load_features
from .utils import LOGGER
from .deprecation_warnings import strategy_v2xx_deprecation_check
from .cache import BaseCache, FileCache
# pylint: disable=dangerous-default-value
class UnleashClient:
"""
A client for the Unleash feature toggle system.
:param url: URL of the unleash server, required.
:param app_name: Name of the application using the unleash client, required.
:param environment: Name of the environment using the unleash client, optional & defaults to "default".
:param instance_id: Unique identifier for unleash client instance, optional & defaults to "unleash-client-python"
:param refresh_interval: Provisioning refresh interval in seconds, optional & defaults to 15 seconds
:param refresh_jitter: Provisioning refresh interval jitter in seconds, optional & defaults to None
:param metrics_interval: Metrics refresh interval in seconds, optional & defaults to 60 seconds
:param metrics_jitter: Metrics refresh interval jitter in seconds, optional & defaults to None
:param disable_metrics: Disables sending metrics to unleash server, optional & defaults to false.
:param custom_headers: Default headers to send to unleash server, optional & defaults to empty.
:param custom_options: Default requests parameters, optional & defaults to empty. Can be used to skip SSL verification.
:param custom_strategies: Dictionary of custom strategy names : custom strategy objects.
:param cache_directory: Location of the cache directory. When unset, FCache will determine the location.
:param verbose_log_level: Numerical log level (https://docs.python.org/3/library/logging.html#logging-levels) for cases where checking a feature flag fails.
:param cache: Custom cache implementation that extends UnleashClient.cache.BaseCache. When unset, UnleashClient will use Fcache.
:param bootstraped: Whether cache has been boostrapped (i.e. pre-seeded) with Unleash configuration. When true, UnleashClient will use initial configuration until the client is initialized. See FileCache object for more information about bootstrapping.
"""
def __init__(self,
url: str,
app_name: str,
environment: str = "default",
instance_id: str = "unleash-client-python",
refresh_interval: int = 15,
refresh_jitter: Optional[int] = None,
metrics_interval: int = 60,
metrics_jitter: Optional[int] = None,
disable_metrics: bool = False,
disable_registration: bool = False,
custom_headers: Optional[dict] = None,
custom_options: Optional[dict] = None,
custom_strategies: Optional[dict] = None,
cache_directory: Optional[str] = None,
project_name: str = None,
verbose_log_level: int = 30,
cache: Optional[BaseCache] = None) -> None:
custom_headers = custom_headers or {}
custom_options = custom_options or {}
custom_strategies = custom_strategies or {}
# Configuration
self.unleash_url = url.rstrip('/')
self.unleash_app_name = app_name
self.unleash_environment = environment
self.unleash_instance_id = instance_id
self.unleash_refresh_interval = refresh_interval
self.unleash_refresh_jitter = int(refresh_jitter) if refresh_jitter is not None else None
self.unleash_metrics_interval = metrics_interval
self.unleash_metrics_jitter = int(metrics_jitter) if metrics_jitter is not None else None
self.unleash_disable_metrics = disable_metrics
self.unleash_disable_registration = disable_registration
self.unleash_custom_headers = custom_headers
self.unleash_custom_options = custom_options
self.unleash_static_context = {
"appName": self.unleash_app_name,
"environment": self.unleash_environment
}
self.unleash_project_name = project_name
self.unleash_verbose_log_level = verbose_log_level
# Class objects
self.features: dict = {}
self.scheduler = BackgroundScheduler()
self.fl_job: Job = None
self.metric_job: Job = None
self.cache = cache or FileCache(self.unleash_app_name, directory=cache_directory)
self.cache.mset({
METRIC_LAST_SENT_TIME: datetime.now(timezone.utc),
ETAG: ''
})
self.unleash_bootstrapped = self.cache.bootstrapped
# Mappings
default_strategy_mapping = {
"applicationHostname": ApplicationHostname,
"default": Default,
"gradualRolloutRandom": GradualRolloutRandom,
"gradualRolloutSessionId": GradualRolloutSessionId,
"gradualRolloutUserId": GradualRolloutUserId,
"remoteAddress": RemoteAddress,
"userWithId": UserWithId,
"flexibleRollout": FlexibleRollout
}
if custom_strategies:
strategy_v2xx_deprecation_check([x for x in custom_strategies.values()]) # pylint: disable=R1721
self.strategy_mapping = {**custom_strategies, **default_strategy_mapping}
# Client status
self.is_initialized = False
# Bootstrapping
if self.unleash_bootstrapped:
load_features(cache=self.cache, feature_toggles=self.features, strategy_mapping=self.strategy_mapping)
def initialize_client(self, fetch_toggles: bool = True) -> None:
"""
Initializes client and starts communication with central unleash server(s).
This kicks off:
* Client registration
* Provisioning poll
* Stats poll
If `fetch_toggles` is `False`, feature toggle polling will be turned off
and instead the client will only load features from the cache. This is
usually used to cater the multi-process setups, e.g. Django, Celery,
etc.
This will raise an exception on registration if the URL is invalid. It is done automatically if called inside a context manager as in:
.. code-block:: python
with UnleashClient(
url="https://foo.bar",
app_name="myClient1",
instance_id="myinstanceid"
) as client:
pass
"""
# Only perform initialization steps if client is not initialized.
if not self.is_initialized:
try:
# Setup
metrics_args = {
"url": self.unleash_url,
"app_name": self.unleash_app_name,
"instance_id": self.unleash_instance_id,
"custom_headers": self.unleash_custom_headers,
"custom_options": self.unleash_custom_options,
"features": self.features,
"cache": self.cache
}
# Register app
if not self.unleash_disable_registration:
register_client(self.unleash_url, self.unleash_app_name, self.unleash_instance_id,
self.unleash_metrics_interval, self.unleash_custom_headers,
self.unleash_custom_options, self.strategy_mapping)
if fetch_toggles:
job_args = {
"url": self.unleash_url,
"app_name": self.unleash_app_name,
"instance_id": self.unleash_instance_id,
"custom_headers": self.unleash_custom_headers,
"custom_options": self.unleash_custom_options,
"cache": self.cache,
"features": self.features,
"strategy_mapping": self.strategy_mapping,
"project": self.unleash_project_name,
}
job_func: Callable = fetch_and_load_features
else:
job_args = {
"cache": self.cache,
"feature_toggles": self.features,
"strategy_mapping": self.strategy_mapping,
}
job_func = load_features
job_func(**job_args) # type: ignore
# Start periodic jobs
self.scheduler.start()
self.fl_job = self.scheduler.add_job(job_func,
trigger=IntervalTrigger(
seconds=int(self.unleash_refresh_interval),
jitter=self.unleash_refresh_jitter,
),
kwargs=job_args)
if not self.unleash_disable_metrics:
self.metric_job = self.scheduler.add_job(aggregate_and_send_metrics,
trigger=IntervalTrigger(
seconds=int(self.unleash_metrics_interval),
jitter=self.unleash_metrics_jitter,
),
kwargs=metrics_args)
except Exception as excep:
# Log exceptions during initialization. is_initialized will remain false.
LOGGER.warning("Exception during UnleashClient initialization: %s", excep)
raise excep
else:
# Set is_iniialized to true if no exception is encountered.
self.is_initialized = True
else:
warnings.warn("Attempted to initialize an Unleash Client instance that has already been initialized.")
def destroy(self) -> None:
"""
Gracefully shuts down the Unleash client by stopping jobs, stopping the scheduler, and deleting the cache.
You shouldn't need this too much!
"""
self.fl_job.remove()
if self.metric_job:
self.metric_job.remove()
self.scheduler.shutdown()
self.cache.destroy()
@staticmethod
def _get_fallback_value(fallback_function: Callable, feature_name: str, context: dict) -> bool:
if fallback_function:
fallback_value = fallback_function(feature_name, context)
else:
fallback_value = False
return fallback_value
# pylint: disable=broad-except
def is_enabled(self,
feature_name: str,
context: Optional[dict] = None,
fallback_function: Callable = None) -> bool:
"""
Checks if a feature toggle is enabled.
Notes:
* If client hasn't been initialized yet or an error occurs, flat will default to false.
:param feature_name: Name of the feature
:param context: Dictionary with context (e.g. IPs, email) for feature toggle.
:param default_value: Allows override of default value. (DEPRECIATED, used fallback_function instead!)
:param fallback_function: Allows users to provide a custom function to set default value.
:return: Feature flag result
"""
context = context or {}
# Update context with static values
context.update(self.unleash_static_context)
if self.unleash_bootstrapped or self.is_initialized:
try:
return self.features[feature_name].is_enabled(context)
except Exception as excep:
LOGGER.log(self.unleash_verbose_log_level, "Returning default value for feature: %s", feature_name)
LOGGER.log(self.unleash_verbose_log_level, "Error checking feature flag: %s", excep)
return self._get_fallback_value(fallback_function, feature_name, context)
else:
LOGGER.log(self.unleash_verbose_log_level, "Returning default value for feature: %s", feature_name)
LOGGER.log(self.unleash_verbose_log_level, "Attempted to get feature_flag %s, but client wasn't initialized!", feature_name)
return self._get_fallback_value(fallback_function, feature_name, context)
# pylint: disable=broad-except
def get_variant(self,
feature_name: str,
context: Optional[dict] = None) -> dict:
"""
Checks if a feature toggle is enabled. If so, return variant.
Notes:
* If client hasn't been initialized yet or an error occurs, flat will default to false.
:param feature_name: Name of the feature
:param context: Dictionary with context (e.g. IPs, email) for feature toggle.
:return: Variant and feature flag status.
"""
context = context or {}
context.update(self.unleash_static_context)
if self.unleash_bootstrapped or self.is_initialized:
try:
return self.features[feature_name].get_variant(context)
except Exception as excep:
LOGGER.log(self.unleash_verbose_log_level, "Returning default flag/variation for feature: %s", feature_name)
LOGGER.log(self.unleash_verbose_log_level, "Error checking feature flag variant: %s", excep)
return DISABLED_VARIATION
else:
LOGGER.log(self.unleash_verbose_log_level, "Returning default flag/variation for feature: %s", feature_name)
LOGGER.log(self.unleash_verbose_log_level, "Attempted to get feature flag/variation %s, but client wasn't initialized!", feature_name)
return DISABLED_VARIATION
def __enter__(self) -> "UnleashClient":
self.initialize_client()
return self
def __exit__(self, *args, **kwargs):
self.destroy()
return False
| 48.428105 | 258 | 0.624941 |
3f7313342d4beb6f3562f50f0f09f409df02fa81 | 1,502 | py | Python | cocotb/test_dut.py | CospanDesign/sdio-device | b945ce644b27bc6de62a8bd0042c7b696b9b5afc | [
"MIT"
] | 13 | 2016-05-08T14:21:12.000Z | 2021-10-07T04:15:02.000Z | cocotb/test_dut.py | ianhan/sdio-device | b945ce644b27bc6de62a8bd0042c7b696b9b5afc | [
"MIT"
] | 1 | 2018-07-25T01:12:16.000Z | 2018-07-25T13:19:35.000Z | cocotb/test_dut.py | CospanDesign/nysa-sdio-device | b997f59909b38017eaba38dbce103dfdca1c61c5 | [
"MIT"
] | 11 | 2016-07-06T05:24:42.000Z | 2021-08-22T05:07:17.000Z | # Simple tests for an adder module
import os
import sys
import cocotb
import logging
from cocotb.result import TestFailure
from nysa.host.sim.sim_host import NysaSim
from cocotb.clock import Clock
import time
from array import array as Array
from dut_driver import wb_sdio_deviceDriver
SIM_CONFIG = "sim_config.json"
CLK_PERIOD = 10
MODULE_PATH = os.path.join(os.path.dirname(__file__), os.pardir, "rtl")
MODULE_PATH = os.path.abspath(MODULE_PATH)
def setup_dut(dut):
cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())
@cocotb.coroutine
def wait_ready(nysa, dut):
#while not dut.hd_ready.value.get_value():
# yield(nysa.wait_clocks(1))
#yield(nysa.wait_clocks(100))
pass
@cocotb.test(skip = False)
def first_test(dut):
"""
Description:
Very Basic Functionality
Startup Nysa
Test ID: 0
Expected Results:
Write to all registers
"""
dut.test_id = 0
print "module path: %s" % MODULE_PATH
nysa = NysaSim(dut, SIM_CONFIG, CLK_PERIOD, user_paths = [MODULE_PATH])
setup_dut(dut)
yield(nysa.reset())
nysa.read_sdb()
yield (nysa.wait_clocks(10))
nysa.pretty_print_sdb()
driver = wb_sdio_deviceDriver(nysa, nysa.find_device(wb_sdio_deviceDriver)[0])
print "here!"
yield cocotb.external(driver.set_control)(0x01)
yield (nysa.wait_clocks(100))
v = yield cocotb.external(driver.get_control)()
dut.log.info("V: %d" % v)
dut.log.info("DUT Opened!")
dut.log.info("Ready")
| 23.107692 | 82 | 0.697071 |
5c75eea69915fbb4b8da63ee6ce68f3eee3343dd | 24,475 | py | Python | pysnmp-with-texts/ASCEND-MIBCLTM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/ASCEND-MIBCLTM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/ASCEND-MIBCLTM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module ASCEND-MIBCLTM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBCLTM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:26:52 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, NotificationType, ModuleIdentity, Counter64, Gauge32, TimeTicks, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, iso, Bits, Counter32, IpAddress, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "NotificationType", "ModuleIdentity", "Counter64", "Gauge32", "TimeTicks", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "iso", "Bits", "Counter32", "IpAddress", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DisplayString(OctetString):
pass
mibcltmCmd = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 66))
mibcltmCmdTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 66, 1), )
if mibBuilder.loadTexts: mibcltmCmdTable.setStatus('mandatory')
if mibBuilder.loadTexts: mibcltmCmdTable.setDescription('A list of mibcltmCmd profile entries.')
mibcltmCmdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1), ).setIndexNames((0, "ASCEND-MIBCLTM-MIB", "cltmCmd-Index-o"))
if mibBuilder.loadTexts: mibcltmCmdEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mibcltmCmdEntry.setDescription('A mibcltmCmd entry containing objects that maps to the parameters of mibcltmCmd profile.')
cltmCmd_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 1), Integer32()).setLabel("cltmCmd-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: cltmCmd_Index_o.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_Index_o.setDescription('')
cltmCmd_CltmSlot = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 14, 15, 16, 17))).clone(namedValues=NamedValues(("anySlot", 1), ("slot1", 2), ("slot2", 3), ("slot3", 4), ("slot4", 5), ("slot5", 6), ("slot6", 7), ("slot7", 8), ("slot10", 11), ("slot11", 12), ("slot12", 13), ("slot13", 14), ("slot14", 15), ("slot15", 16), ("slot16", 17)))).setLabel("cltmCmd-CltmSlot").setMaxAccess("readonly")
if mibBuilder.loadTexts: cltmCmd_CltmSlot.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_CltmSlot.setDescription('Identify the CLTM slot within the system.')
cltmCmd_TestTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 3), Integer32()).setLabel("cltmCmd-TestTimeStamp").setMaxAccess("readonly")
if mibBuilder.loadTexts: cltmCmd_TestTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TestTimeStamp.setDescription('Value of the sysUpTime when the last test command was issued. This parameter is cleared when any of the test parameters are changed.')
cltmCmd_TestSequence = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 4), Integer32()).setLabel("cltmCmd-TestSequence").setMaxAccess("readonly")
if mibBuilder.loadTexts: cltmCmd_TestSequence.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TestSequence.setDescription('Sequence of the last issued test command.')
cltmCmd_TestOperation = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40))).clone(namedValues=NamedValues(("none", 1), ("dmmTest", 2), ("lineInlsTest", 3), ("lineBgnsTest", 4), ("lineSignsTest", 5), ("lineLpresTest", 6), ("lineCldetTest", 7), ("lineImpstartTest", 8), ("lineImpstopTest", 9), ("lineImpreadTest", 10), ("calibTest", 11), ("toneSend", 12), ("toneRecv", 13), ("tdrSet", 14), ("tdrGet", 15), ("cltmReset", 16), ("cltmVersion", 17), ("cltmDownload", 18), ("dmmDcdelTest", 19), ("dmmCapeTest", 20), ("dmmAllTest", 21), ("txCtrlToneTest", 22), ("txTraceToneTest", 23), ("stopToneTest", 24), ("detRingerTest", 25), ("detAturTest", 26), ("btapTest", 27), ("voiceDetTest", 28), ("lineFcllocTest", 29), ("lineShortlocTest", 30), ("setResponderTest", 31), ("setBypassTest", 32), ("splitterDetectTest", 33), ("dmmAcdelTest", 34), ("dmmLbalTest", 35), ("dmmSoakTest", 36), ("sendVoiceTest", 37), ("measVoiceTest", 38), ("measDtaTest", 39), ("detaptorTest", 40)))).setLabel("cltmCmd-TestOperation").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TestOperation.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TestOperation.setDescription("The current operation that is active on the CLT Module. Defaultls to 'none'. Set to 'none' to stop the test procedure.")
cltmCmd_DmmType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("resistance", 1), ("dcVoltage", 2), ("acVoltage", 3), ("capacitance", 4)))).setLabel("cltmCmd-DmmType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_DmmType.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_DmmType.setDescription('DMM Measurement Type.')
cltmCmd_DmmLead = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("tipRing", 1), ("tipSleeve", 2), ("ringSleeve", 3)))).setLabel("cltmCmd-DmmLead").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_DmmLead.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_DmmLead.setDescription('DMM Measurement Leads.')
cltmCmd_BackgroundNoiseFilter = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("psd", 1), ("e", 2), ("f", 3), ("g", 4)))).setLabel("cltmCmd-BackgroundNoiseFilter").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_BackgroundNoiseFilter.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_BackgroundNoiseFilter.setDescription('Line Filter Type for Background Noise Test.')
cltmCmd_BackgroundNoiseTermination = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("term100", 1), ("term135", 2), ("bridge100", 3), ("bridge135", 4)))).setLabel("cltmCmd-BackgroundNoiseTermination").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_BackgroundNoiseTermination.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_BackgroundNoiseTermination.setDescription('Line Termination Type for Background Noise Test.')
cltmCmd_LoopResistanceUnit = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("metric", 1), ("english", 2)))).setLabel("cltmCmd-LoopResistanceUnit").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_LoopResistanceUnit.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_LoopResistanceUnit.setDescription('Measurement System for Loop Resistance Test.')
cltmCmd_LoopResistanceTemp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 11), Integer32()).setLabel("cltmCmd-LoopResistanceTemp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_LoopResistanceTemp.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_LoopResistanceTemp.setDescription('Line Temperature for Loop Resistance Test. Assigned in degree celsius/fahrenheit.')
cltmCmd_ImpulseNoiseStartThresh = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 12), Integer32()).setLabel("cltmCmd-ImpulseNoiseStartThresh").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ImpulseNoiseStartThresh.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ImpulseNoiseStartThresh.setDescription('Threshold for Impulse Noise Test. The Range is 50-100 dBrm.')
cltmCmd_ImpulseNoiseStartDelta = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 13), Integer32()).setLabel("cltmCmd-ImpulseNoiseStartDelta").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ImpulseNoiseStartDelta.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ImpulseNoiseStartDelta.setDescription('Delta Value for Impulse Noise Test. The delta range is 2-6 dB')
cltmCmd_ImpulseNoiseStartMaxCount = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 14), Integer32()).setLabel("cltmCmd-ImpulseNoiseStartMaxCount").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ImpulseNoiseStartMaxCount.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ImpulseNoiseStartMaxCount.setDescription('Max Count for Impulse Noise Test. The range of the value is 1-1999.')
cltmCmd_ImpulseNoiseStartDeadTime = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 15), Integer32()).setLabel("cltmCmd-ImpulseNoiseStartDeadTime").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ImpulseNoiseStartDeadTime.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ImpulseNoiseStartDeadTime.setDescription('Dead Time Value for Impulse Noise Start Test. The value is assigned in 0.1 ms increments.')
cltmCmd_ImpulseNoiseStartTimer = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 16), Integer32()).setLabel("cltmCmd-ImpulseNoiseStartTimer").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ImpulseNoiseStartTimer.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ImpulseNoiseStartTimer.setDescription('Timer values for Impulse Noise Start Test. The value is assigned in 1 minute increment.')
cltmCmd_CalibrationType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("insertionLoss", 1), ("backgroundNoise", 2)))).setLabel("cltmCmd-CalibrationType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_CalibrationType.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_CalibrationType.setDescription('Calibration Type')
cltmCmd_ToneSendFreq = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 18), Integer32()).setLabel("cltmCmd-ToneSendFreq").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ToneSendFreq.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ToneSendFreq.setDescription('Tone Send Frequency in KHz.')
cltmCmd_ToneSendLevel = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 19), Integer32()).setLabel("cltmCmd-ToneSendLevel").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ToneSendLevel.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ToneSendLevel.setDescription('Tone Send Level in dBm.')
cltmCmd_ToneSendPeriod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 20), Integer32()).setLabel("cltmCmd-ToneSendPeriod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ToneSendPeriod.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ToneSendPeriod.setDescription('Amount of time a tone is sent (1-20 Minutes).')
cltmCmd_TdrUnit = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("metric", 1), ("english", 2)))).setLabel("cltmCmd-TdrUnit").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TdrUnit.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TdrUnit.setDescription('Measurement System for TDR Test.')
cltmCmd_TdrGauge = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 22), Integer32()).setLabel("cltmCmd-TdrGauge").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TdrGauge.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TdrGauge.setDescription('TDR gauge. This is either 22/24/26 AWG for English system or 6/5/4 (0.1mm) for the metric system.')
cltmCmd_TdrVp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 23), Integer32()).setLabel("cltmCmd-TdrVp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TdrVp.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TdrVp.setDescription('TDR VP as percentage of the speed of light.')
cltmCmd_TdrAvg = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 24), Integer32()).setLabel("cltmCmd-TdrAvg").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TdrAvg.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TdrAvg.setDescription('Number of trials to get the avarage.')
cltmCmd_TdrGetType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("automatic", 1), ("manual", 2)))).setLabel("cltmCmd-TdrGetType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TdrGetType.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TdrGetType.setDescription('TDR Test Range Measurement Type. If set to automatic cltmCmdTdrStartLen and cltmCmdTdrMeasureLen do not need to be set.')
cltmCmd_TdrStartDistance = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 26), Integer32()).setLabel("cltmCmd-TdrStartDistance").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TdrStartDistance.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TdrStartDistance.setDescription('TDR Start Length for MANUAL mode, the value is given in feet (15..20000) for the English system and cm (460..609750) for the metric system.')
cltmCmd_TdrMeasurementLength = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 27), Integer32()).setLabel("cltmCmd-TdrMeasurementLength").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TdrMeasurementLength.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TdrMeasurementLength.setDescription('TDR Measurement Length for MANUAL mode, the value is given in feet (300..20000) for the English system and cm (9150..609750) for the metric system.')
cltmCmd_DmmdcdPeriod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 28), Integer32()).setLabel("cltmCmd-DmmdcdPeriod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_DmmdcdPeriod.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_DmmdcdPeriod.setDescription('Amount of time measurement is made (0,1-5 100ms;0=MAX).')
cltmCmd_DmmdcdVoltage = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 29), Integer32()).setLabel("cltmCmd-DmmdcdVoltage").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_DmmdcdVoltage.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_DmmdcdVoltage.setDescription('Test voltage to be used (-230 to 230 Volts).')
cltmCmd_DmmdcdImpedance = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 30), Integer32()).setLabel("cltmCmd-DmmdcdImpedance").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_DmmdcdImpedance.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_DmmdcdImpedance.setDescription('Output Impedance to be used (10 to 1000 Kohms).')
cltmCmd_DmmcapPeriod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 31), Integer32()).setLabel("cltmCmd-DmmcapPeriod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_DmmcapPeriod.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_DmmcapPeriod.setDescription('Amount of time measurement is made (0,1-5 100ms;0=MAX).')
cltmCmd_DmmallType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("resistance", 1), ("dcVoltage", 2), ("acVoltage", 3), ("capacitance", 4)))).setLabel("cltmCmd-DmmallType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_DmmallType.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_DmmallType.setDescription('DMM Measurement Type.')
cltmCmd_DmmallPeriod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 33), Integer32()).setLabel("cltmCmd-DmmallPeriod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_DmmallPeriod.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_DmmallPeriod.setDescription('Amount of time measurement is made (0,1-5 100ms;0=MAX).')
cltmCmd_DmmallInputImp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 34), Integer32()).setLabel("cltmCmd-DmmallInputImp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_DmmallInputImp.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_DmmallInputImp.setDescription('Input Impedance (100, 1000 Kohm).')
cltmCmd_CtoneType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 35), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("adsl", 1), ("glite", 2)))).setLabel("cltmCmd-CtoneType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_CtoneType.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_CtoneType.setDescription('Control tone, type of DSL service (ADSL, GLITE).')
cltmCmd_CtoneTone = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("quiet", 1), ("restore", 2)))).setLabel("cltmCmd-CtoneTone").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_CtoneTone.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_CtoneTone.setDescription('Control tone, type of Tone (QUIET, RESTORE).')
cltmCmd_TtoneLead = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 37), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("tipRing", 1), ("tipSleeve", 2), ("ringSleeve", 3)))).setLabel("cltmCmd-TtoneLead").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TtoneLead.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TtoneLead.setDescription('trace tone, Measurement Leads.')
cltmCmd_TtoneLevel = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 38), Integer32()).setLabel("cltmCmd-TtoneLevel").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TtoneLevel.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TtoneLevel.setDescription('trace tone, Tone Send Level in dBm.')
cltmCmd_TtonePeriod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 39), Integer32()).setLabel("cltmCmd-TtonePeriod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_TtonePeriod.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_TtonePeriod.setDescription('trace tone, Amount of time a tone is sent (1-20 Minutes).')
cltmCmd_BtapStartLength = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 40), Integer32()).setLabel("cltmCmd-BtapStartLength").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_BtapStartLength.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_BtapStartLength.setDescription('Measurement start length (15 - 20000 ft. or 5 - 6097 meter).')
cltmCmd_BtapMeasureLength = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 41), Integer32()).setLabel("cltmCmd-BtapMeasureLength").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_BtapMeasureLength.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_BtapMeasureLength.setDescription('Measurement length (100 - 20000 ft. or 32 - 6097 meter).')
cltmCmd_FcllocUnit = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 43), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("metric", 1), ("english", 2)))).setLabel("cltmCmd-FcllocUnit").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_FcllocUnit.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_FcllocUnit.setDescription('Measurement System for FCLLOC Test.')
cltmCmd_FcllocGauge = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 44), Integer32()).setLabel("cltmCmd-FcllocGauge").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_FcllocGauge.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_FcllocGauge.setDescription('FCLLOC gauge. This is either 22/24/26 AWG for English system or 6/5/4 (0.1mm) for the metric system.')
cltmCmd_ShortlocUnit = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 45), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("metric", 1), ("english", 2)))).setLabel("cltmCmd-ShortlocUnit").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ShortlocUnit.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ShortlocUnit.setDescription('Measurement System for SHORTLOC Test.')
cltmCmd_ShortlocGauge = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 46), Integer32()).setLabel("cltmCmd-ShortlocGauge").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ShortlocGauge.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ShortlocGauge.setDescription('SHORTLOC gauge. This is either 22/24/26 AWG for English system or 6/5/4 (0.1mm) for the metric system.')
cltmCmd_ShortlocType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 47), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("detect", 1), ("noDetect", 2)))).setLabel("cltmCmd-ShortlocType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_ShortlocType.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_ShortlocType.setDescription('SHORTLOC test type.')
cltmCmd_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 66, 1, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("cltmCmd-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: cltmCmd_Action_o.setStatus('mandatory')
if mibBuilder.loadTexts: cltmCmd_Action_o.setDescription('')
mibBuilder.exportSymbols("ASCEND-MIBCLTM-MIB", cltmCmd_TdrGetType=cltmCmd_TdrGetType, cltmCmd_DmmallType=cltmCmd_DmmallType, mibcltmCmdTable=mibcltmCmdTable, cltmCmd_DmmdcdPeriod=cltmCmd_DmmdcdPeriod, cltmCmd_CalibrationType=cltmCmd_CalibrationType, cltmCmd_TtonePeriod=cltmCmd_TtonePeriod, cltmCmd_TdrStartDistance=cltmCmd_TdrStartDistance, cltmCmd_DmmallInputImp=cltmCmd_DmmallInputImp, cltmCmd_ShortlocGauge=cltmCmd_ShortlocGauge, cltmCmd_ToneSendFreq=cltmCmd_ToneSendFreq, cltmCmd_DmmLead=cltmCmd_DmmLead, cltmCmd_LoopResistanceUnit=cltmCmd_LoopResistanceUnit, cltmCmd_ShortlocType=cltmCmd_ShortlocType, cltmCmd_DmmallPeriod=cltmCmd_DmmallPeriod, cltmCmd_Index_o=cltmCmd_Index_o, cltmCmd_TdrVp=cltmCmd_TdrVp, cltmCmd_DmmType=cltmCmd_DmmType, cltmCmd_ImpulseNoiseStartMaxCount=cltmCmd_ImpulseNoiseStartMaxCount, mibcltmCmdEntry=mibcltmCmdEntry, cltmCmd_LoopResistanceTemp=cltmCmd_LoopResistanceTemp, cltmCmd_FcllocUnit=cltmCmd_FcllocUnit, mibcltmCmd=mibcltmCmd, cltmCmd_CtoneTone=cltmCmd_CtoneTone, cltmCmd_TestOperation=cltmCmd_TestOperation, cltmCmd_DmmdcdVoltage=cltmCmd_DmmdcdVoltage, cltmCmd_BackgroundNoiseFilter=cltmCmd_BackgroundNoiseFilter, cltmCmd_TdrUnit=cltmCmd_TdrUnit, DisplayString=DisplayString, cltmCmd_ImpulseNoiseStartDelta=cltmCmd_ImpulseNoiseStartDelta, cltmCmd_TestTimeStamp=cltmCmd_TestTimeStamp, cltmCmd_TestSequence=cltmCmd_TestSequence, cltmCmd_ShortlocUnit=cltmCmd_ShortlocUnit, cltmCmd_DmmdcdImpedance=cltmCmd_DmmdcdImpedance, cltmCmd_BackgroundNoiseTermination=cltmCmd_BackgroundNoiseTermination, cltmCmd_FcllocGauge=cltmCmd_FcllocGauge, cltmCmd_BtapMeasureLength=cltmCmd_BtapMeasureLength, cltmCmd_DmmcapPeriod=cltmCmd_DmmcapPeriod, cltmCmd_TdrGauge=cltmCmd_TdrGauge, cltmCmd_TdrMeasurementLength=cltmCmd_TdrMeasurementLength, cltmCmd_ToneSendLevel=cltmCmd_ToneSendLevel, cltmCmd_ImpulseNoiseStartThresh=cltmCmd_ImpulseNoiseStartThresh, cltmCmd_ImpulseNoiseStartTimer=cltmCmd_ImpulseNoiseStartTimer, cltmCmd_TdrAvg=cltmCmd_TdrAvg, cltmCmd_Action_o=cltmCmd_Action_o, cltmCmd_BtapStartLength=cltmCmd_BtapStartLength, cltmCmd_TtoneLead=cltmCmd_TtoneLead, cltmCmd_CtoneType=cltmCmd_CtoneType, cltmCmd_ToneSendPeriod=cltmCmd_ToneSendPeriod, cltmCmd_ImpulseNoiseStartDeadTime=cltmCmd_ImpulseNoiseStartDeadTime, cltmCmd_CltmSlot=cltmCmd_CltmSlot, cltmCmd_TtoneLevel=cltmCmd_TtoneLevel)
| 146.556886 | 2,311 | 0.777896 |
1d59e2f0d3e4c753b36059641cd4ef48f8c1d960 | 26,773 | py | Python | src/slim/deployment/model_deploy_test.py | uchuhimo/Ptolemy | 5c8ae188af30ee49d38f27d54c67af2eab9489e7 | [
"Apache-2.0"
] | 15 | 2020-08-24T07:11:20.000Z | 2021-09-13T08:03:42.000Z | src/slim/deployment/model_deploy_test.py | uchuhimo/Ptolemy | 5c8ae188af30ee49d38f27d54c67af2eab9489e7 | [
"Apache-2.0"
] | 5 | 2021-02-28T17:30:26.000Z | 2021-06-15T09:33:00.000Z | src/slim/deployment/model_deploy_test.py | uchuhimo/Ptolemy | 5c8ae188af30ee49d38f27d54c67af2eab9489e7 | [
"Apache-2.0"
] | 3 | 2020-10-22T09:11:11.000Z | 2021-01-16T14:49:34.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_deploy."""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from deployment import model_deploy
slim = tf.contrib.slim
class DeploymentConfigTest(tf.test.TestCase):
def testDefaults(self):
deploy_config = model_deploy.DeploymentConfig()
self.assertEqual(slim.get_variables(), [])
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testCPUonly(self):
deploy_config = model_deploy.DeploymentConfig(clone_on_cpu=True)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'CPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testMultiGPU(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1), 'GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def testMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2, num_ps_tasks=1)
self.assertEqual(deploy_config.caching_device()(tf.no_op()), '')
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_clones=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testVariablesPS(self):
deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def LogisticClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'LogisticClassifier', [inputs, labels],
reuse=reuse):
predictions = slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
def BatchNormClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'BatchNormClassifier', [inputs, labels],
reuse=reuse):
inputs = slim.batch_norm(inputs, decay=0.1, fused=True)
predictions = slim.fully_connected(inputs, 1,
activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
class CreatecloneTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(len(clones), num_clones)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
self.assertEqual(len(update_ops), 2)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, 'GPU:%d' % i)
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
def testCreateMulticloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
num_ps_tasks=2)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for i, v in enumerate(slim.get_variables()):
t = i % 2
self.assertDeviceEqual(v.device, '/job:ps/task:%d/device:CPU:0' % t)
self.assertDeviceEqual(v.device, v.value().device)
self.assertEqual(len(clones), 2)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:%d' % i)
class OptimizeclonesTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 2)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticloneCPU(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones,
clone_on_cpu=True)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '/job:worker/device:GPU:0')
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
class DeployTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _addBesselsCorrection(self, sample_size, expected_var):
correction_factor = sample_size / (sample_size - 1)
expected_var *= correction_factor
return expected_var
def testLocalTrainOp(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
clone_on_cpu=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
self.assertEqual(slim.get_variables(), [])
model = model_deploy.deploy(deploy_config, model_fn, model_args,
optimizer=optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 4)
self.assertEqual(len(model.clones), 2)
self.assertEqual(model.total_loss.op.name, 'total_loss')
self.assertEqual(model.summary_op.op.name, 'summary_op/summary_op')
self.assertEqual(model.train_op.op.name, 'train_op')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
moving_mean = tf.contrib.framework.get_variables_by_name(
'moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')[0]
initial_loss = sess.run(model.total_loss)
initial_mean, initial_variance = sess.run([moving_mean,
moving_variance])
self.assertAllClose(initial_mean, [0.0, 0.0, 0.0, 0.0])
self.assertAllClose(initial_variance, [1.0, 1.0, 1.0, 1.0])
for _ in range(10):
sess.run(model.train_op)
final_loss = sess.run(model.total_loss)
self.assertLess(final_loss, initial_loss / 5.0)
final_mean, final_variance = sess.run([moving_mean,
moving_variance])
expected_mean = np.array([0.125, 0.25, 0.375, 0.25])
expected_var = np.array([0.109375, 0.1875, 0.234375, 0.1875])
expected_var = self._addBesselsCorrection(16, expected_var)
self.assertAllClose(final_mean, expected_mean)
self.assertAllClose(final_variance, expected_var)
def testNoSummariesOnGPU(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = tf.contrib.layers.l2_regularizer(0.001)
tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)
model = model_deploy.deploy(
deploy_config, ModelFn,
optimizer=tf.train.GradientDescentOptimizer(1.0))
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
def testNoSummariesOnGPUForEvals(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = tf.contrib.layers.l2_regularizer(0.001)
tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)
# No optimizer here, it's an eval.
model = model_deploy.deploy(deploy_config, ModelFn)
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
if __name__ == '__main__':
tf.test.main()
| 46.970175 | 86 | 0.601875 |
2e18d120f9c93d7ea012d5095ec393804e44bde5 | 7,336 | py | Python | old-stuff-for-reference/nightjar-base/nightjar-src/python-src/nightjar/backend/api/deployment_map/service_data.py | groboclown/nightjar-mesh | 3655307b4a0ad00a0f18db835b3a0d04cb8e9615 | [
"MIT"
] | 3 | 2019-12-23T23:46:02.000Z | 2020-08-07T23:10:20.000Z | old-stuff-for-reference/nightjar-base/nightjar-src/python-src/nightjar/backend/api/deployment_map/service_data.py | groboclown/nightjar-mesh | 3655307b4a0ad00a0f18db835b3a0d04cb8e9615 | [
"MIT"
] | 2 | 2020-02-07T15:59:15.000Z | 2020-08-05T21:55:27.000Z | old-stuff-for-reference/nightjar-base/nightjar-src/python-src/nightjar/backend/api/deployment_map/service_data.py | groboclown/nightjar-mesh | 3655307b4a0ad00a0f18db835b3a0d04cb8e9615 | [
"MIT"
] | 1 | 2020-05-28T00:46:05.000Z | 2020-05-28T00:46:05.000Z |
"""
Data types for the service.
"""
from typing import List, Iterable, Dict, Union, Optional, Any
from ....msg import fatal, note
class EnvoyRoute:
"""
Defines the URL path to cluster matching.
"""
__slots__ = ('path_prefix', 'cluster_weights', 'total_weight',)
def __init__(
self, path_prefix: str, cluster_weights: Dict[str, int],
) -> None:
"""
Create a weighted route.
The path_prefix must start with a '/' or '*'.
The cluster_weight is an association of cluster to the relative weight
of that cluster routing. If there are no cluster weights, then this
route will not be generated.
"local" routes are for connections between services within the same
mesh. Gateway proxies must always set this to False.
"""
assert path_prefix[0] in '/*'
self.path_prefix = path_prefix
self.cluster_weights = cluster_weights
self.total_weight = sum(cluster_weights.values())
def get_context(self) -> Optional[Dict[str, Any]]:
"""Get the JSON context data for this route."""
cluster_count = len(self.cluster_weights)
if cluster_count <= 0:
return None
return {
'route_path': self.path_prefix,
'has_one_cluster': cluster_count == 1,
'has_many_clusters': cluster_count > 1,
'total_cluster_weight': self.total_weight,
'clusters': [{
'cluster_name': cn,
'route_weight': cw,
} for cn, cw in self.cluster_weights.items()],
}
class EnvoyListener:
"""
Defines a port listener in envoy, which corresponds to a namespace.
"""
__slots__ = ('port', 'routes',)
def __init__(self, port: Optional[int], routes: Iterable[EnvoyRoute]) -> None:
self.port = port
self.routes = list(routes)
def get_route_contexts(self) -> List[Dict[str, Any]]:
"""Get each route's JSON context data."""
ret: List[Dict[str, Any]] = []
for route in self.routes:
ctx = route.get_context()
if ctx:
ret.append(ctx)
return ret
def get_context(self) -> Dict[str, Any]:
"""Get the JSON context for this listener, including its routes."""
return {
'has_mesh_port': self.port is not None,
'mesh_port': self.port,
'routes': self.get_route_contexts(),
}
class EnvoyClusterEndpoint:
"""
An endpoint within an envoy cluster.
"""
__slots__ = ('host', 'port',)
def __init__(self, host: str, port: Union[int, str]) -> None:
self.host = host
self.port = port
def __eq__(self, other: Any) -> bool:
if not isinstance(other, EnvoyClusterEndpoint):
return False
return self.host == other.host and self.port == other.port
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return hash(self.host) + hash(self.port)
class EnvoyCluster:
"""
Defines a cluster within envoy. It's already been weighted according to the path.
"""
__slots__ = ('cluster_name', 'uses_http2', 'instances',)
def __init__(
self,
cluster_name: str,
uses_http2: bool,
instances: Iterable[EnvoyClusterEndpoint],
) -> None:
self.cluster_name = cluster_name
self.uses_http2 = uses_http2
self.instances: List[EnvoyClusterEndpoint] = list(instances)
def endpoint_count(self) -> int:
"""Count the number of endpoints."""
return len(self.instances)
def get_context(self) -> Dict[str, Any]:
"""Get the JSON context for this cluster."""
instances = self.instances
if not instances:
# We need something here, otherwise the route will say the cluster doesn't exist.
note("No instances known for cluster {c}; creating temporary one.", c=self.cluster_name)
return {
'name': self.cluster_name,
'uses_http2': self.uses_http2,
'endpoints': [{
'ipv4': inst.host,
'port': str(inst.port),
} for inst in instances],
}
class EnvoyConfig:
"""An entire configuration data schema for use to import into a mustache template."""
__slots__ = ('listeners', 'clusters',)
def __init__(
self,
listeners: Iterable[EnvoyListener],
clusters: Iterable[EnvoyCluster],
) -> None:
self.listeners = list(listeners)
self.clusters = list(clusters)
def get_context(
self, network_name: str, service_member: str,
admin_port: Optional[int],
) -> Dict[str, Any]:
"""Get the JSON context for this configuration."""
if not self.listeners:
fatal('No listeners; cannot be a proxy.')
cluster_endpoint_count = sum([c.endpoint_count() for c in self.clusters])
return {
'network_name': network_name,
'service_member': service_member,
'has_admin_port': admin_port is not None,
'admin_port': admin_port,
'listeners': [lt.get_context() for lt in self.listeners],
'has_clusters': cluster_endpoint_count > 0,
'clusters': [c.get_context() for c in self.clusters],
}
@staticmethod
def join(configs: Iterable['EnvoyConfig']) -> 'EnvoyConfig':
"""Join multiple configurations into a single configuration."""
clusters: Dict[str, EnvoyCluster] = {}
listeners: List[EnvoyListener] = []
for config in configs:
remapped_cluster_names: Dict[str, str] = {}
for cluster in config.clusters:
# TODO The cluster names need to be unique if the endpoints are unique.
if cluster.cluster_name in clusters:
i = 0
new_name = cluster.cluster_name + '_' + str(i)
while new_name not in clusters:
i += 1
new_name = cluster.cluster_name + '_' + str(i)
remapped_cluster_names[cluster.cluster_name] = new_name
clusters[new_name] = EnvoyCluster(
new_name, cluster.uses_http2, cluster.instances,
)
else:
clusters[cluster.cluster_name] = cluster
for listener in config.listeners:
new_routes: List[EnvoyRoute] = []
for route in listener.routes:
new_weights: Dict[str, int] = {}
for cluster_name, weight in route.cluster_weights.items():
if cluster_name in remapped_cluster_names:
new_weights[remapped_cluster_names[cluster_name]] = weight
else:
new_weights[cluster_name] = weight
new_routes.append(EnvoyRoute(route.path_prefix, new_weights))
# TODO if the listener ports overlap, then that's an error.
listeners.append(EnvoyListener(listener.port, new_routes))
return EnvoyConfig(listeners, clusters.values())
| 35.785366 | 100 | 0.578517 |
334bc752b6f13a3fa414a83022292e97eac7f773 | 2,861 | py | Python | third_party/zhon/zhon/hanzi.py | zh794390558/DeepSpeech | 34178893327ad359cb816e55d7c66a10244fa08a | [
"Apache-2.0"
] | null | null | null | third_party/zhon/zhon/hanzi.py | zh794390558/DeepSpeech | 34178893327ad359cb816e55d7c66a10244fa08a | [
"Apache-2.0"
] | null | null | null | third_party/zhon/zhon/hanzi.py | zh794390558/DeepSpeech | 34178893327ad359cb816e55d7c66a10244fa08a | [
"Apache-2.0"
] | null | null | null |
"""Constants for working with Chinese characters."""
import sys
#: Character code ranges for pertinent CJK ideograph Unicode blocks.
characters = cjk_ideographs = (
'\u3007' # Ideographic number zero, see issue #17
'\u4E00-\u9FFF' # CJK Unified Ideographs
'\u3400-\u4DBF' # CJK Unified Ideographs Extension A
'\uF900-\uFAFF' # CJK Compatibility Ideographs
)
if sys.maxunicode > 0xFFFF:
characters += (
'\U00020000-\U0002A6DF' # CJK Unified Ideographs Extension B
'\U0002A700-\U0002B73F' # CJK Unified Ideographs Extension C
'\U0002B740-\U0002B81F' # CJK Unified Ideographs Extension D
'\U0002F800-\U0002FA1F' # CJK Compatibility Ideographs Supplement
)
#: Character code ranges for the Kangxi radicals and CJK Radicals Supplement.
radicals = (
'\u2F00-\u2FD5' # Kangxi Radicals
'\u2E80-\u2EF3' # CJK Radicals Supplement
)
#: A string containing Chinese punctuation marks (non-stops).
non_stops = (
# Fullwidth ASCII variants
'\uFF02\uFF03\uFF04\uFF05\uFF06\uFF07\uFF08\uFF09\uFF0A\uFF0B\uFF0C\uFF0D'
'\uFF0F\uFF1A\uFF1B\uFF1C\uFF1D\uFF1E\uFF20\uFF3B\uFF3C\uFF3D\uFF3E\uFF3F'
'\uFF40\uFF5B\uFF5C\uFF5D\uFF5E\uFF5F\uFF60'
# Halfwidth CJK punctuation
'\uFF62\uFF63\uFF64'
# CJK symbols and punctuation
'\u3000\u3001\u3003'
# CJK angle and corner brackets
'\u3008\u3009\u300A\u300B\u300C\u300D\u300E\u300F\u3010\u3011'
# CJK brackets and symbols/punctuation
'\u3014\u3015\u3016\u3017\u3018\u3019\u301A\u301B\u301C\u301D\u301E\u301F'
# Other CJK symbols
'\u3030'
# Special CJK indicators
'\u303E\u303F'
# Dashes
'\u2013\u2014'
# Quotation marks and apostrophe
'\u2018\u2019\u201B\u201C\u201D\u201E\u201F'
# General punctuation
'\u2026\u2027'
# Overscores and underscores
'\uFE4F'
# Small form variants
'\uFE51\uFE54'
# Latin punctuation
'\u00B7'
)
#: A string of Chinese stops.
stops = (
'\uFF01' # Fullwidth exclamation mark
'\uFF1F' # Fullwidth question mark
'\uFF61' # Halfwidth ideographic full stop
'\u3002' # Ideographic full stop
)
#: A string containing all Chinese punctuation.
punctuation = non_stops + stops
# A sentence end is defined by a stop followed by zero or more
# container-closing marks (e.g. quotation or brackets).
_sentence_end = '[{stops}]*'.format(stops=stops) + '[」﹂”』’》)]}〕〗〙〛〉】]*'
#: A regular expression pattern for a Chinese sentence. A sentence is defined
#: as a series of characters and non-stop punctuation marks followed by a stop
#: and zero or more container-closing punctuation marks (e.g. apostrophe or
# brackets).
sent = sentence = '[{characters}{radicals}{non_stops}]*{sentence_end}'.format(
characters=characters, radicals=radicals, non_stops=non_stops,
sentence_end=_sentence_end) | 31.43956 | 78 | 0.695211 |
ad7984a535e5fc96b9aa48dc3d8060ea666e94ed | 696 | py | Python | tensorflow_undo_labeling.py | trevorhobenshield/QuantLabel | 45aa9799e99ba90a2bbbc856c19839c87392b83f | [
"MIT"
] | null | null | null | tensorflow_undo_labeling.py | trevorhobenshield/QuantLabel | 45aa9799e99ba90a2bbbc856c19839c87392b83f | [
"MIT"
] | null | null | null | tensorflow_undo_labeling.py | trevorhobenshield/QuantLabel | 45aa9799e99ba90a2bbbc856c19839c87392b83f | [
"MIT"
] | null | null | null | import shutil
from pathlib import Path
def undo_labeling(img_directory: str) -> None:
"""
Undo labeling performed by `tensorflow_label.py`
:param img_directory: path to image directory that previously had it's contents labeled
:return: None
"""
# move images out of directories
for d in [f for f in Path(img_directory).iterdir() if f.is_dir()]:
for img in d.iterdir():
new = d.parent / img.name
print('*', img, '->', new)
img.rename(new)
# remove directories
[shutil.rmtree(p) for p in Path(img_directory).iterdir() if p.is_dir()]
def main():
undo_labeling('images')
if __name__ == '__main__':
main()
| 24.857143 | 91 | 0.62931 |
2cdaf8f48851a5a3c4aaa7fbd5f9804f1391f42d | 176 | py | Python | ABC048/ABC048b.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | ABC048/ABC048b.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | ABC048/ABC048b.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | #ABC048b
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
a, b, x = map(int, input().split())
ans = b // x - a // x
if (a % x == 0):
ans += 1
print(ans) | 16 | 35 | 0.585227 |
9c28d5a0050e216f625987a901ad705ccc7e2078 | 7,491 | py | Python | noxfile-template.py | TestShared-demo/python-docs-samples | c03bb27e87f50c31cd8b9e509dca2d0e0eec37ab | [
"Apache-2.0"
] | 1 | 2021-09-27T10:21:18.000Z | 2021-09-27T10:21:18.000Z | noxfile-template.py | TestShared-demo/python-docs-samples | c03bb27e87f50c31cd8b9e509dca2d0e0eec37ab | [
"Apache-2.0"
] | 2 | 2020-05-05T05:16:18.000Z | 2020-05-18T08:16:38.000Z | noxfile-template.py | TestShared-demo/python-docs-samples | c03bb27e87f50c31cd8b9e509dca2d0e0eec37ab | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
from pathlib import Path
import sys
from typing import Callable, Dict, List, Optional
import nox
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
# DO NOT EDIT THIS FILE EVER!
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
# with `noxfile_config.py`. Users will copy `noxfile_config.py` into
# their directory and modify it.
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
'ignored_versions': ["2.7"],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
'enforce_type_hints': False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
'envs': {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
sys.path.append('.')
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
TEST_CONFIG_OVERRIDE = {}
# Update the TEST_CONFIG with the user supplied values.
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
env_key = TEST_CONFIG['gcloud_project_env']
# This should error out if not set.
ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
ret['GCLOUD_PROJECT'] = os.environ[env_key] # deprecated
# Apply user supplied envs.
ret.update(TEST_CONFIG['envs'])
return ret
# DO NOT EDIT - automatically generated.
# All versions used to tested samples.
ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
#
# Style Checks
#
def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
properly checked.
"""
file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)]
return [
basename
for basename, extension in file_ext_pairs
if extension == ".py"
or os.path.isdir(os.path.join(start_dir, basename))
and basename not in ("__pycache__")
]
# Linting with flake8.
#
# We ignore the following rules:
# E203: whitespace before ‘:’
# E266: too many leading ‘#’ for block comment
# E501: line too long
# I202: Additional newline in a section of imports
#
# We also need to specify the rules which are ignored by default:
# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121']
FLAKE8_COMMON_ARGS = [
"--show-source",
"--builtin=gettext",
"--max-complexity=20",
"--import-order-style=google",
"--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py",
"--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202",
"--max-line-length=88",
]
@nox.session
def lint(session: nox.sessions.Session) -> None:
if not TEST_CONFIG['enforce_type_hints']:
session.install("flake8", "flake8-import-order")
else:
session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
"."
]
session.run("flake8", *args)
#
# Black
#
@nox.session
def blacken(session: nox.sessions.Session) -> None:
session.install("black")
python_files = [path for path in os.listdir(".") if path.endswith(".py")]
session.run("black", *python_files)
#
# Sample Tests
#
PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None:
"""Runs py.test for a particular project."""
if os.path.exists("requirements.txt"):
session.install("-r", "requirements.txt")
if os.path.exists("requirements-test.txt"):
session.install("-r", "requirements-test.txt")
if INSTALL_LIBRARY_FROM_SOURCE:
session.install("-e", _get_repo_root())
if post_install:
post_install(session)
session.run(
"pytest",
*(PYTEST_COMMON_ARGS + session.posargs),
# Pytest will return 5 when no tests are collected. This can happen
# on travis where slow and flaky tests are excluded.
# See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
success_codes=[0, 5],
env=get_pytest_env_vars()
)
@nox.session(python=ALL_VERSIONS)
def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
session.skip("SKIPPED: {} tests are disabled for this sample.".format(
session.python
))
#
# Readmegen
#
def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository.
# Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
for i in range(10):
if p is None:
break
if Path(p / ".git").exists():
return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")])
@nox.session
@nox.parametrize("path", GENERATED_READMES)
def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
if os.path.exists(os.path.join(dir_, "requirements.txt")):
session.install("-r", os.path.join(dir_, "requirements.txt"))
in_file = os.path.join(dir_, "README.rst.in")
session.run(
"python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file
)
| 30.327935 | 89 | 0.682552 |
34e7e34f38ac1ec4a7e66e25f8cc8a870376076c | 10,885 | py | Python | build_wrapper.py | plafl/aduana | b14bb32f9b92393db241733f94d06a286525a3c1 | [
"BSD-3-Clause"
] | 54 | 2015-05-17T19:17:58.000Z | 2019-12-26T01:47:46.000Z | build_wrapper.py | plafl/aduana | b14bb32f9b92393db241733f94d06a286525a3c1 | [
"BSD-3-Clause"
] | 19 | 2015-06-22T09:58:31.000Z | 2017-05-08T08:56:50.000Z | build_wrapper.py | plafl/aduana | b14bb32f9b92393db241733f94d06a286525a3c1 | [
"BSD-3-Clause"
] | 15 | 2015-07-03T14:17:38.000Z | 2021-06-30T15:48:17.000Z | import platform
import cffi
aduana_src_root = 'lib/src/'
aduana_lib_root = 'lib/lib/'
aduana_src = [
aduana_lib_root + x for x in [
'smaz.c',
'xxhash.c',
'lmdb/mdb.c',
'lmdb/midl.c',
]] + [
aduana_src_root + x for x in [
'mmap_array.c',
'page_db.c',
'hits.c',
'page_rank.c',
'scheduler.c',
'bf_scheduler.c',
'util.c',
'page_rank_scorer.c',
'hits_scorer.c',
'txn_manager.c',
'domain_temp.c',
'freq_scheduler.c',
'freq_algo.c'
]]
if platform.system() == 'Windows':
aduana_src.append(aduana_lib_root + 'mman.c')
aduana_include = [
aduana_lib_root,
aduana_lib_root + 'lmdb',
aduana_src_root
]
aduana_define = [
('MDB_MAXKEYSIZE', 500)
]
aduana_compile_args = [
'-std=c99',
'-m64',
'-msse2',
'-pthread'
]
aduana_libraries = ['m']
ffi = cffi.FFI()
ffi.set_source(
'_aduana',
'''
#include "bf_scheduler.h"
#include "domain_temp.h"
#include "hits.h"
#include "hits_scorer.h"
#include "link_stream.h"
#include "mmap_array.h"
#include "page_db.h"
#include "page_rank.h"
#include "page_rank_scorer.h"
#include "scheduler.h"
#include "txn_manager.h"
#include "util.h"
#include "freq_scheduler.h"
#include "freq_algo.h"
''',
sources = aduana_src,
include_dirs = aduana_include,
define_macros = aduana_define,
extra_compile_args = aduana_compile_args,
libraries = aduana_libraries
)
ffi.cdef(
"""
const char *
error_message(const void *error);
int
error_code(const void *error);
"""
)
ffi.cdef(
"""
typedef struct {
char *url;
uint64_t linked_from;
uint64_t depth;
double first_crawl;
double last_crawl;
uint64_t n_changes;
uint64_t n_crawls;
float score;
uint64_t content_hash_length;
char *content_hash;
} PageInfo;
float
page_info_rate(const PageInfo *pi);
int
page_info_is_seed(const PageInfo *pi);
void
page_info_delete(PageInfo *pi);
typedef struct {
char *url; /**< ASCII, null terminated string for the page URL*/
float score; /**< An estimated value of the link score */
} LinkInfo;
typedef struct {
LinkInfo *link_info; /**< Array of LinkInfo */
size_t n_links; /**< Number of items inside link_info */
size_t m_links; /**< Maximum number of items that can be stored inside link_info */
} PageLinks;
typedef struct {
char *url; /**< ASCII, null terminated string for the page URL*/
PageLinks *links; /**< List of links inside this page */
double time; /**< Number of seconds since epoch */
float score; /**< A number giving an idea of the page content's value */
char *content_hash; /**< A hash to detect content change since last crawl.
Arbitrary byte sequence */
size_t content_hash_length; /**< Number of byes of the content_hash */
} CrawledPage;
CrawledPage *
crawled_page_new(const char *url);
void
crawled_page_delete(CrawledPage *cp);
int
crawled_page_add_link(CrawledPage *cp, const char *url, float score);
size_t
crawled_page_n_links(const CrawledPage *cp);
int
crawled_page_set_hash64(CrawledPage *cp, uint64_t hash);
const LinkInfo *
crawled_page_get_link(const CrawledPage *cp, size_t i);
typedef enum {
page_db_error_ok = 0, /**< No error */
page_db_error_memory, /**< Error allocating memory */
page_db_error_invalid_path, /**< File system error */
page_db_error_internal, /**< Unexpected error */
page_db_error_no_page /**< A page was requested but could not be found */
} PageDBError;
typedef struct {
char *path;
void* txn_manager;
void *domain_temp;
void *error;
int persist;
} PageDB;
uint64_t
page_db_hash(const char *url);
PageDBError
page_db_new(PageDB **db, const char *path);
PageDBError
page_db_get_info(PageDB *db, uint64_t hash, PageInfo **pi);
PageDBError
page_db_add(PageDB *db, const CrawledPage *page, void **page_info_list);
PageDBError
page_db_delete(PageDB *db);
void
page_db_set_persist(PageDB *db, int value);
typedef enum {
stream_state_init,
stream_state_next,
stream_state_end,
stream_state_error
} StreamState;
typedef struct {
PageDB *db;
void *cur;
StreamState state;
} HashInfoStream;
PageDBError
hashinfo_stream_new(HashInfoStream **st, PageDB *db);
StreamState
hashinfo_stream_next(HashInfoStream *st, uint64_t *hash, PageInfo **pi);
void
hashinfo_stream_delete(HashInfoStream *st);
"""
)
ffi.cdef(
"""
typedef enum {
page_rank_scorer_error_ok = 0, /**< No error */
page_rank_scorer_error_memory, /**< Error allocating memory */
page_rank_scorer_error_internal, /**< Unexpected error */
page_rank_scorer_error_precision /**< Could not achieve precision in maximum number of loops */
} PageRankScorerError;
typedef struct {
void *page_rank;
PageDB *page_db;
void *error;
int persist;
int use_content_scores;
} PageRankScorer;
PageRankScorerError
page_rank_scorer_new(PageRankScorer **prs, PageDB *db);
PageRankScorerError
page_rank_scorer_delete(PageRankScorer *prs);
void
page_rank_scorer_setup(PageRankScorer *prs, void *scorer);
void
page_rank_scorer_set_persist(PageRankScorer *prs, int value);
void
page_rank_scorer_set_use_content_scores(PageRankScorer *prs, int value);
void
page_rank_scorer_set_damping(PageRankScorer *prs, float value);
"""
)
ffi.cdef(
"""
typedef enum {
hits_scorer_error_ok = 0, /**< No error */
hits_scorer_error_memory, /**< Error allocating memory */
hits_scorer_error_internal, /**< Unexpected error */
hits_scorer_error_precision /**< Could not achieve precision in maximum number of loops */
} HitsScorerError;
typedef struct {
void *hits;
PageDB *page_db;
void *error;
int persist;
int use_content_scores;
} HitsScorer;
HitsScorerError
hits_scorer_new(HitsScorer **hs, PageDB *db);
HitsScorerError
hits_scorer_delete(HitsScorer *hs);
void
hits_scorer_setup(HitsScorer *hs, void *scorer);
void
hits_scorer_set_persist(HitsScorer *hs, int value);
void
hits_scorer_set_use_content_scores(HitsScorer *hs, int value);
"""
)
ffi.cdef(
"""
typedef struct {
char **urls;
size_t n_urls;
} PageRequest;
PageRequest*
page_request_new(size_t n_urls);
void
page_request_delete(PageRequest *req);
int
page_request_add_url(PageRequest *req, const char *url);
"""
)
ffi.cdef(
"""
typedef enum {
bf_scheduler_error_ok = 0, /**< No error */
bf_scheduler_error_memory, /**< Error allocating memory */
bf_scheduler_error_invalid_path, /**< File system error */
bf_scheduler_error_internal, /**< Unexpected error */
bf_scheduler_error_thread /**< Error inside the threading library */
} BFSchedulerError;
typedef struct {
PageDB *page_db;
void *scorer;
void *txn_manager;
char *path;
void *update_thread;
void *error;
int persist;
float max_soft_domain_crawl_rate;
float max_hard_domain_crawl_rate;
uint64_t max_crawl_depth;
} BFScheduler;
BFSchedulerError
bf_scheduler_new(BFScheduler **sch, PageDB *db, const char *path);
BFSchedulerError
bf_scheduler_add(BFScheduler *sch, const CrawledPage *page);
BFSchedulerError
bf_scheduler_request(BFScheduler *sch, size_t n_pages, PageRequest **request);
void
bf_scheduler_delete(BFScheduler *sch);
BFSchedulerError
bf_scheduler_update_start(BFScheduler *sch);
BFSchedulerError
bf_scheduler_update_stop(BFScheduler *sch);
void
bf_scheduler_set_persist(BFScheduler *sch, int value);
BFSchedulerError
bf_scheduler_set_max_domain_crawl_rate(BFScheduler *sch,
float max_soft_crawl_rate,
float max_hard_crawl_rate);
void
bf_scheduler_set_max_crawl_depth(BFScheduler *sch, uint64_t value);
typedef int... time_t;
void
bf_scheduler_set_update_interval(BFScheduler *sch, time_t value);
"""
)
ffi.cdef(
"""
typedef enum {
freq_scheduler_error_ok = 0, /**< No error */
freq_scheduler_error_memory, /**< Error allocating memory */
freq_scheduler_error_invalid_path, /**< File system error */
freq_scheduler_error_internal /**< Unexpected error */
} FreqSchedulerError;
typedef struct {
char *path;
PageDB *page_db;
void *txn_manager;
void *error;
int persist;
float margin;
size_t max_n_crawls;
} FreqScheduler;
FreqSchedulerError
freq_scheduler_new(FreqScheduler **sch, PageDB *db, const char *path);
FreqSchedulerError
freq_scheduler_load_simple(FreqScheduler *sch,
float freq_default,
float freq_scale);
FreqSchedulerError
freq_scheduler_load_mmap(FreqScheduler *sch, void *freqs);
FreqSchedulerError
freq_scheduler_request(FreqScheduler *sch,
size_t max_requests,
PageRequest **request);
FreqSchedulerError
freq_scheduler_add(FreqScheduler *sch, const CrawledPage *page);
void
freq_scheduler_delete(FreqScheduler *sch);
FreqSchedulerError
freq_scheduler_cursor_open(FreqScheduler *sch, void **cursor);
FreqSchedulerError
freq_scheduler_cursor_commit(FreqScheduler *sch, void *cursor);
void
freq_scheduler_cursor_abort(FreqScheduler *sch, void *cursor);
FreqSchedulerError
freq_scheduler_cursor_write(FreqScheduler *sch,
void *cursor,
uint64_t hash,
float freq);
"""
)
ffi.cdef(
"""
int
freq_algo_simple(PageDB *db, void **freqs, const char *path, char **error_msg);
"""
)
if __name__ == '__main__':
ffi.compile()
| 25.793839 | 105 | 0.614975 |
1f47bb8fe486e871e18d2090ba1b8c6c5cda2430 | 24,623 | py | Python | yt/visualization/color_maps.py | aemerick/yt | 984484616d75c6d7603e71b9d45c5d617705a0e5 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/visualization/color_maps.py | aemerick/yt | 984484616d75c6d7603e71b9d45c5d617705a0e5 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/visualization/color_maps.py | aemerick/yt | 984484616d75c6d7603e71b9d45c5d617705a0e5 | [
"BSD-3-Clause-Clear"
] | null | null | null | import numpy as np
import matplotlib.colors as cc
import matplotlib.cm as mcm
from . import _colormap_data as _cm
try:
import cmocean
except ImportError:
cmocean = None
def is_colormap(cmap):
return isinstance(cmap,cc.Colormap)
def check_color(name):
try:
cc.colorConverter.to_rgb(name)
return True
except ValueError:
return False
yt_colormaps = {}
def add_cmap(name, cdict):
"""Deprecated alias, kept for backwards compatibility."""
from yt.funcs import issue_deprecation_warning
issue_deprecation_warning("Deprecated alias. Use add_colormap instead.")
add_colormap(name, cdict)
def add_colormap(name, cdict):
"""
Adds a colormap to the colormaps available in yt for this session
"""
yt_colormaps[name] = \
cc.LinearSegmentedColormap(name,cdict,256)
mcm.datad[name] = cdict
mcm.__dict__[name] = cdict
try: # API compatibility
mcm.register_cmap(name, yt_colormaps[name])
except AttributeError:
pass
# The format is as follows:
# First number is the number at which we are defining a color breakpoint
# Second number is the (0..1) number to interpolate to when coming *from below*
# Third number is the (0..1) number to interpolate to when coming *from above*
# Next up is boilerplate -- the name, the colormap dict we just made, and the
# number of segments we want. This is probably fine as is.
cdict = {'red': ((0.0, 80/256., 80/256.),
(0.2, 0.0, 0.0),
(0.4, 0.0, 0.0),
(0.6, 256/256., 256/256.),
(0.95, 256/256., 256/256.),
(1.0, 150/256., 150/256.)),
'green': ((0.0, 0/256., 0/256.),
(0.2, 0/256., 0/256.),
(0.4, 130/256., 130/256.),
(0.6, 256/256., 256/256.),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 80/256., 80/256.),
(0.2, 220/256., 220/256.),
(0.4, 0.0, 0.0),
(0.6, 20/256., 20/256.),
(1.0, 0.0, 0.0))}
add_colormap('bds_highcontrast', cdict)
add_colormap('algae', cdict)
# This next colormap was designed by Tune Kamae and converted here by Matt
_vs = np.linspace(0,1,255)
_kamae_red = np.minimum(255,
113.9*np.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
+ 3587.9*_vs+2563.4)/255.0
_kamae_grn = np.minimum(255,
70.0*np.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
_kamae_blu = np.minimum(255,
194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
+ 45.40*_vs**0.089+10.0)/255.0
cdict = {'red':np.transpose([_vs,_kamae_red,_kamae_red]),
'green':np.transpose([_vs,_kamae_grn,_kamae_grn]),
'blue':np.transpose([_vs,_kamae_blu,_kamae_blu])}
add_colormap('kamae', cdict)
# This one is a simple black & green map
cdict = {'red': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0))}
add_colormap('black_green', cdict)
cdict = {'red': ((0.0, 0.0, 0.0),
(1.0, 0.2, 0.2)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.2, 0.2)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 1.0, 1.0))}
add_colormap('black_blueish', cdict)
# This one is a variant of a colormap commonly
# used for X-ray observations by Maxim Markevitch
cdict = {'red': ((0.0, 0.0, 0.0),
(0.3, 0.0, 0.0),
(0.352, 0.245, 0.245),
(0.42, 0.5, 0.5),
(0.51, 0.706, 0.706),
(0.613, 0.882, 0.882),
(0.742, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.585, 0.0, 0.0),
(0.613, 0.196, 0.196),
(0.693, 0.48, 0.48),
(0.785, 0.696, 0.696),
(0.885, 0.882, 0.882),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.136, 0.0, 0.0),
(0.136, 0.373, 0.373),
(0.391, 1.0, 1.0),
(1.0, 1.0, 1.0))}
add_colormap("purple_mm", cdict)
# This one comes from
# http://permalink.gmane.org/gmane.comp.python.matplotlib.devel/10518
# and is an implementation of http://arxiv.org/abs/1108.5083
#
# cubehelix parameters
_gamma_cubehelix = 1.0
_s_cubehelix = 0.5
_r_cubehelix = -1.5
_h_cubehelix = 1.0
_cubehelix_data = {
'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
}
add_colormap("cubehelix", _cubehelix_data)
# The turbo colormap, by Anton Mikhailov.
# from: https://gist.github.com/mikhailov-work/ee72ba4191942acecc03fe6da94fc73f
_turbo_colormap_data = np.array(
[[0.18995,0.07176,0.23217],[0.19483,0.08339,0.26149],
[0.19956,0.09498,0.29024],[0.20415,0.10652,0.31844],
[0.20860,0.11802,0.34607],[0.21291,0.12947,0.37314],
[0.21708,0.14087,0.39964],[0.22111,0.15223,0.42558],
[0.22500,0.16354,0.45096],[0.22875,0.17481,0.47578],
[0.23236,0.18603,0.50004],[0.23582,0.19720,0.52373],
[0.23915,0.20833,0.54686],[0.24234,0.21941,0.56942],
[0.24539,0.23044,0.59142],[0.24830,0.24143,0.61286],
[0.25107,0.25237,0.63374],[0.25369,0.26327,0.65406],
[0.25618,0.27412,0.67381],[0.25853,0.28492,0.69300],
[0.26074,0.29568,0.71162],[0.26280,0.30639,0.72968],
[0.26473,0.31706,0.74718],[0.26652,0.32768,0.76412],
[0.26816,0.33825,0.78050],[0.26967,0.34878,0.79631],
[0.27103,0.35926,0.81156],[0.27226,0.36970,0.82624],
[0.27334,0.38008,0.84037],[0.27429,0.39043,0.85393],
[0.27509,0.40072,0.86692],[0.27576,0.41097,0.87936],
[0.27628,0.42118,0.89123],[0.27667,0.43134,0.90254],
[0.27691,0.44145,0.91328],[0.27701,0.45152,0.92347],
[0.27698,0.46153,0.93309],[0.27680,0.47151,0.94214],
[0.27648,0.48144,0.95064],[0.27603,0.49132,0.95857],
[0.27543,0.50115,0.96594],[0.27469,0.51094,0.97275],
[0.27381,0.52069,0.97899],[0.27273,0.53040,0.98461],
[0.27106,0.54015,0.98930],[0.26878,0.54995,0.99303],
[0.26592,0.55979,0.99583],[0.26252,0.56967,0.99773],
[0.25862,0.57958,0.99876],[0.25425,0.58950,0.99896],
[0.24946,0.59943,0.99835],[0.24427,0.60937,0.99697],
[0.23874,0.61931,0.99485],[0.23288,0.62923,0.99202],
[0.22676,0.63913,0.98851],[0.22039,0.64901,0.98436],
[0.21382,0.65886,0.97959],[0.20708,0.66866,0.97423],
[0.20021,0.67842,0.96833],[0.19326,0.68812,0.96190],
[0.18625,0.69775,0.95498],[0.17923,0.70732,0.94761],
[0.17223,0.71680,0.93981],[0.16529,0.72620,0.93161],
[0.15844,0.73551,0.92305],[0.15173,0.74472,0.91416],
[0.14519,0.75381,0.90496],[0.13886,0.76279,0.89550],
[0.13278,0.77165,0.88580],[0.12698,0.78037,0.87590],
[0.12151,0.78896,0.86581],[0.11639,0.79740,0.85559],
[0.11167,0.80569,0.84525],[0.10738,0.81381,0.83484],
[0.10357,0.82177,0.82437],[0.10026,0.82955,0.81389],
[0.09750,0.83714,0.80342],[0.09532,0.84455,0.79299],
[0.09377,0.85175,0.78264],[0.09287,0.85875,0.77240],
[0.09267,0.86554,0.76230],[0.09320,0.87211,0.75237],
[0.09451,0.87844,0.74265],[0.09662,0.88454,0.73316],
[0.09958,0.89040,0.72393],[0.10342,0.89600,0.71500],
[0.10815,0.90142,0.70599],[0.11374,0.90673,0.69651],
[0.12014,0.91193,0.68660],[0.12733,0.91701,0.67627],
[0.13526,0.92197,0.66556],[0.14391,0.92680,0.65448],
[0.15323,0.93151,0.64308],[0.16319,0.93609,0.63137],
[0.17377,0.94053,0.61938],[0.18491,0.94484,0.60713],
[0.19659,0.94901,0.59466],[0.20877,0.95304,0.58199],
[0.22142,0.95692,0.56914],[0.23449,0.96065,0.55614],
[0.24797,0.96423,0.54303],[0.26180,0.96765,0.52981],
[0.27597,0.97092,0.51653],[0.29042,0.97403,0.50321],
[0.30513,0.97697,0.48987],[0.32006,0.97974,0.47654],
[0.33517,0.98234,0.46325],[0.35043,0.98477,0.45002],
[0.36581,0.98702,0.43688],[0.38127,0.98909,0.42386],
[0.39678,0.99098,0.41098],[0.41229,0.99268,0.39826],
[0.42778,0.99419,0.38575],[0.44321,0.99551,0.37345],
[0.45854,0.99663,0.36140],[0.47375,0.99755,0.34963],
[0.48879,0.99828,0.33816],[0.50362,0.99879,0.32701],
[0.51822,0.99910,0.31622],[0.53255,0.99919,0.30581],
[0.54658,0.99907,0.29581],[0.56026,0.99873,0.28623],
[0.57357,0.99817,0.27712],[0.58646,0.99739,0.26849],
[0.59891,0.99638,0.26038],[0.61088,0.99514,0.25280],
[0.62233,0.99366,0.24579],[0.63323,0.99195,0.23937],
[0.64362,0.98999,0.23356],[0.65394,0.98775,0.22835],
[0.66428,0.98524,0.22370],[0.67462,0.98246,0.21960],
[0.68494,0.97941,0.21602],[0.69525,0.97610,0.21294],
[0.70553,0.97255,0.21032],[0.71577,0.96875,0.20815],
[0.72596,0.96470,0.20640],[0.73610,0.96043,0.20504],
[0.74617,0.95593,0.20406],[0.75617,0.95121,0.20343],
[0.76608,0.94627,0.20311],[0.77591,0.94113,0.20310],
[0.78563,0.93579,0.20336],[0.79524,0.93025,0.20386],
[0.80473,0.92452,0.20459],[0.81410,0.91861,0.20552],
[0.82333,0.91253,0.20663],[0.83241,0.90627,0.20788],
[0.84133,0.89986,0.20926],[0.85010,0.89328,0.21074],
[0.85868,0.88655,0.21230],[0.86709,0.87968,0.21391],
[0.87530,0.87267,0.21555],[0.88331,0.86553,0.21719],
[0.89112,0.85826,0.21880],[0.89870,0.85087,0.22038],
[0.90605,0.84337,0.22188],[0.91317,0.83576,0.22328],
[0.92004,0.82806,0.22456],[0.92666,0.82025,0.22570],
[0.93301,0.81236,0.22667],[0.93909,0.80439,0.22744],
[0.94489,0.79634,0.22800],[0.95039,0.78823,0.22831],
[0.95560,0.78005,0.22836],[0.96049,0.77181,0.22811],
[0.96507,0.76352,0.22754],[0.96931,0.75519,0.22663],
[0.97323,0.74682,0.22536],[0.97679,0.73842,0.22369],
[0.98000,0.73000,0.22161],[0.98289,0.72140,0.21918],
[0.98549,0.71250,0.21650],[0.98781,0.70330,0.21358],
[0.98986,0.69382,0.21043],[0.99163,0.68408,0.20706],
[0.99314,0.67408,0.20348],[0.99438,0.66386,0.19971],
[0.99535,0.65341,0.19577],[0.99607,0.64277,0.19165],
[0.99654,0.63193,0.18738],[0.99675,0.62093,0.18297],
[0.99672,0.60977,0.17842],[0.99644,0.59846,0.17376],
[0.99593,0.58703,0.16899],[0.99517,0.57549,0.16412],
[0.99419,0.56386,0.15918],[0.99297,0.55214,0.15417],
[0.99153,0.54036,0.14910],[0.98987,0.52854,0.14398],
[0.98799,0.51667,0.13883],[0.98590,0.50479,0.13367],
[0.98360,0.49291,0.12849],[0.98108,0.48104,0.12332],
[0.97837,0.46920,0.11817],[0.97545,0.45740,0.11305],
[0.97234,0.44565,0.10797],[0.96904,0.43399,0.10294],
[0.96555,0.42241,0.09798],[0.96187,0.41093,0.09310],
[0.95801,0.39958,0.08831],[0.95398,0.38836,0.08362],
[0.94977,0.37729,0.07905],[0.94538,0.36638,0.07461],
[0.94084,0.35566,0.07031],[0.93612,0.34513,0.06616],
[0.93125,0.33482,0.06218],[0.92623,0.32473,0.05837],
[0.92105,0.31489,0.05475],[0.91572,0.30530,0.05134],
[0.91024,0.29599,0.04814],[0.90463,0.28696,0.04516],
[0.89888,0.27824,0.04243],[0.89298,0.26981,0.03993],
[0.88691,0.26152,0.03753],[0.88066,0.25334,0.03521],
[0.87422,0.24526,0.03297],[0.86760,0.23730,0.03082],
[0.86079,0.22945,0.02875],[0.85380,0.22170,0.02677],
[0.84662,0.21407,0.02487],[0.83926,0.20654,0.02305],
[0.83172,0.19912,0.02131],[0.82399,0.19182,0.01966],
[0.81608,0.18462,0.01809],[0.80799,0.17753,0.01660],
[0.79971,0.17055,0.01520],[0.79125,0.16368,0.01387],
[0.78260,0.15693,0.01264],[0.77377,0.15028,0.01148],
[0.76476,0.14374,0.01041],[0.75556,0.13731,0.00942],
[0.74617,0.13098,0.00851],[0.73661,0.12477,0.00769],
[0.72686,0.11867,0.00695],[0.71692,0.11268,0.00629],
[0.70680,0.10680,0.00571],[0.69650,0.10102,0.00522],
[0.68602,0.09536,0.00481],[0.67535,0.08980,0.00449],
[0.66449,0.08436,0.00424],[0.65345,0.07902,0.00408],
[0.64223,0.07380,0.00401],[0.63082,0.06868,0.00401],
[0.61923,0.06367,0.00410],[0.60746,0.05878,0.00427],
[0.59550,0.05399,0.00453],[0.58336,0.04931,0.00486],
[0.57103,0.04474,0.00529],[0.55852,0.04028,0.00579],
[0.54583,0.03593,0.00638],[0.53295,0.03169,0.00705],
[0.51989,0.02756,0.00780],[0.50664,0.02354,0.00863],
[0.49321,0.01963,0.00955],[0.47960,0.01583,0.01055]])
_tvals = np.linspace(0, 1, 256)
_turbo_data = \
dict((color, np.transpose([_tvals,
_turbo_colormap_data[:, i],
_turbo_colormap_data[:, i]]))
for i, color in enumerate(['red', 'green', 'blue']))
add_colormap("turbo", _turbo_data)
_turbo_r_colormap_data = np.flip(_turbo_colormap_data, axis=0)
_turbo_r_data = dict((color, np.transpose([_tvals,
_turbo_r_colormap_data[:, i],
_turbo_r_colormap_data[:, i]]))
for i, color in enumerate(['red', 'green', 'blue']))
add_colormap("turbo_r", _turbo_r_data)
# Add colormaps from cmocean, if it's installed
if cmocean is not None:
cmo_cmapnames = cmocean.cm.cmapnames
cmo_cmapnames += ["%s_r" % name for name in cmo_cmapnames]
for cmname in cmo_cmapnames:
cm = getattr(cmocean.cm, cmname)
# cmocean has a colormap named 'algae', so let's avoid overwriting
# yt's algae or any other colormap we've already added
if cmname in yt_colormaps:
cmname = cmname + '_cmocean'
yt_colormaps[cmname] = cm
try:
mcm.register_cmap(cmname, yt_colormaps[cmname])
except AttributeError:
# for old versions of matplotlib this won't work, so we avoid
# erroring out but don't worry about registering with matplotlib
pass
# Add colormaps in _colormap_data.py that weren't defined here
_vs = np.linspace(0,1,256)
for k,v in list(_cm.color_map_luts.items()):
if k not in yt_colormaps and k not in mcm.cmap_d:
cdict = { 'red': np.transpose([_vs,v[0],v[0]]),
'green': np.transpose([_vs,v[1],v[1]]),
'blue': np.transpose([_vs,v[2],v[2]]) }
add_colormap(k, cdict)
def _extract_lookup_table(cmap_name):
cmap = mcm.get_cmap(cmap_name)
if not cmap._isinit: cmap._init()
r = cmap._lut[:-3, 0]
g = cmap._lut[:-3, 1]
b = cmap._lut[:-3, 2]
a = np.ones(b.shape)
return [r, g, b, a]
def show_colormaps(subset="all", filename=None):
"""
Displays the colormaps available to yt. Note, most functions can use
both the matplotlib and the native yt colormaps; however, there are
some special functions existing within image_writer.py (e.g. write_image()
write_bitmap(), etc.), which cannot access the matplotlib
colormaps.
In addition to the colormaps listed, one can access the reverse of each
colormap by appending a "_r" to any map.
If you wish to only see certain colormaps, include them in the cmap_list
attribute.
Parameters
----------
subset : string, or list of strings, optional
valid values : "all", "yt_native", or list of cmap names
default : "all"
As mentioned above, a few functions can only access yt_native
colormaps. To display only the yt_native colormaps, set this
to "yt_native".
If you wish to only see a few colormaps side by side, you can
include them as a list of colormap names.
Example: ['algae', 'gist_stern', 'kamae', 'spectral']
filename : string, opt
default: None
If filename is set, then it will save the colormaps to an output
file. If it is not set, it will "show" the result interactively.
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
a=np.outer(np.arange(0,1,0.01), np.ones(10))
if subset == "all":
maps = [ m for m in cm.cmap_d if (not m.startswith("idl")) & (not m.endswith("_r"))]
elif subset == "yt_native":
maps = [ m for m in _cm.color_map_luts if (not m.startswith("idl")) & (not m.endswith("_r"))]
else:
try:
maps = [ m for m in cm.cmap_d if m in subset]
if len(maps) == 0:
raise AttributeError
except AttributeError:
raise AttributeError("show_colormaps requires subset attribute "
"to be 'all', 'yt_native', or a list of "
"valid colormap names.")
maps = list(set(maps))
maps.sort()
# scale the image size by the number of cmaps
plt.figure(figsize=(2.*len(maps)/10.,6))
plt.subplots_adjust(top=0.7,bottom=0.05,left=0.01,right=0.99)
l = len(maps)+1
for i,m in enumerate(maps):
plt.subplot(1,l,i+1)
plt.axis("off")
plt.imshow(a, aspect='auto',cmap=plt.get_cmap(m),origin="lower")
plt.title(m,rotation=90, fontsize=10, verticalalignment='bottom')
if filename is not None:
plt.savefig(filename, dpi=100, facecolor='gray')
else:
plt.show()
def make_colormap(ctuple_list, name=None, interpolate=True):
"""
This generates a custom colormap based on the colors and spacings you
provide. Enter a ctuple_list, which consists of tuples of (color, spacing)
to return a colormap appropriate for use in yt. If you specify a
name, it will automatically be added to the current session as a valid
colormap.
Output colormap is in the format yt expects for adding a colormap to the
current session: a dictionary with the appropriate RGB channels each
consisting of a 256x3 array :
First number is the number at which we are defining a color breakpoint
Second number is the (0..1) number to interpolate to when coming *from below*
Third number is the (0..1) number to interpolate to when coming *from above*
Parameters
----------
ctuple_list: list of (color, float) tuples
The ctuple_list consists of pairs of (color, interval) tuples
identifying the colors to use in the colormap and the intervals
they take to change to the next color in the list. A color can
either be a string of the name of a color, or it can be an array
of 3 floats, each representing the intensity of R, G, and B on
a scale of 0 to 1. Valid color names and their equivalent
arrays are listed below.
Any interval can be given for the different color tuples, and
the total of all the intervals will be scaled to the 256 output
elements.
If a ctuple_list ends with a color and a non-zero interval,
a white 0-interval would be added to the end to finish the
interpolation. To avoid finishing with white, specify your own
zero-interval color at the end.
name: string, optional
If you wish this colormap to be added as a valid colormap to the
current session, specify a name here. Default: None
interpolation: boolean, optional
Designates whether or not the colormap will interpolate between
the colors provided or just give solid colors across the intervals.
Default: True
Preset Color Options
--------------------
'white' : np.array([255, 255, 255 ])/255.
'gray' : np.array([130, 130, 130])/255.
'dgray' : np.array([80, 80, 80])/255.
'black' : np.array([0, 0, 0])/255.
'blue' : np.array([0, 0, 255])/255.
'dblue' : np.array([0, 0, 160])/255.
'purple' : np.array([100, 0, 200])/255.
'dpurple' : np.array([66, 0, 133])/255.
'dred' : np.array([160, 0, 0])/255.
'red' : np.array([255, 0, 0])/255.
'orange' : np.array([255, 128, 0])/255.
'dorange' : np.array([200,100, 0])/255.
'yellow' : np.array([255, 255, 0])/255.
'dyellow' : np.array([200, 200, 0])/255.
'green' : np.array([0, 255, 0])/255.
'dgreen' : np.array([0, 160, 0])/255.
Examples
--------
To obtain a colormap that starts at black with equal intervals in green,
blue, red, yellow in that order and interpolation between those colors.
(In reality, it starts at black, takes an interval of 10 to interpolate to
green, then an interval of 10 to interpolate to blue, then an interval of
10 to interpolate to red.)
>>> cm = make_colormap([('black', 10), ('green', 10), ('blue', 10),
... ('red', 0)])
To add a colormap that has five equal blocks of solid major colors to
the current session as "steps":
>>> make_colormap([('red', 10), ('orange', 10), ('yellow', 10),
... ('green', 10), ('blue', 10)], name="steps",
... interpolate=False)
To add a colormap that looks like the French flag (i.e. equal bands of
blue, white, and red) using your own RGB keys, then to display it:
>>> make_colormap([([0,0,1], 10), ([1,1,1], 10), ([1,0,0], 10)],
... name='french_flag', interpolate=False)
>>> show_colormaps(['french_flag'])
"""
# aliases for different colors
color_dict = {
'white' : np.array([255, 255, 255 ])/255.,
'gray' : np.array([130, 130, 130])/255.,
'dgray' : np.array([80, 80, 80])/255.,
'black' : np.array([0, 0, 0])/255.,
'blue' : np.array([0, 0, 255])/255.,
'dblue' : np.array([0, 0, 160])/255.,
'purple' : np.array([100, 0, 200])/255.,
'dpurple' : np.array([66, 0, 133])/255.,
'dred' : np.array([160, 0, 0])/255.,
'red' : np.array([255, 0, 0])/255.,
'orange' : np.array([255, 128, 0])/255.,
'dorange' : np.array([200,100, 0])/255.,
'yellow' : np.array([255, 255, 0])/255.,
'dyellow' : np.array([200, 200, 0])/255.,
'green' : np.array([0, 255, 0])/255.,
'dgreen' : np.array([0, 160, 0])/255.}
cmap = np.zeros((256,3))
# If the user provides a list with a non-zero final interval, it
# doesn't make sense because you have an interval but no final
# color to which it interpolates. So provide a 0-length white final
# interval to end the previous interval in white.
if ctuple_list[-1][1] != 0:
ctuple_list.append(('white', 0))
# Figure out how many intervals there are total.
rolling_index = 0
for i, (color, interval) in enumerate(ctuple_list):
if isinstance(color, str):
ctuple_list[i] = (color_dict[color], interval)
rolling_index += interval
scale = 256./rolling_index
n = len(ctuple_list)
# Step through each ctuple and interpolate from one color to the
# next over the interval provided
rolling_index = 0
for i in range(n-1):
color, interval = ctuple_list[i]
interval *= scale
next_index = rolling_index + interval
next_color, next_interval = ctuple_list[i+1]
if not interpolate:
next_color = color
# Interpolate the R, G, and B channels from one color to the next
# Use np.round to make sure you're on a discrete index
interval = int(np.round(next_index)-np.round(rolling_index))
for j in np.arange(3):
cmap[int(np.rint(rolling_index)):int(np.rint(next_index)), j] = \
np.linspace(color[j], next_color[j], num=interval)
rolling_index = next_index
# Return a dictionary with the appropriate RGB channels each consisting of
# a 256x3 array in the format that is expected by add_colormap() to add a
# colormap to the session.
# The format is as follows:
# First number is the number at which we are defining a color breakpoint
# Second number is the (0..1) number to interpolate to when coming *from below*
# Third number is the (0..1) number to interpolate to when coming *from above*
_vs = np.linspace(0,1,256)
cdict = {'red': np.transpose([_vs, cmap[:,0], cmap[:,0]]),
'green': np.transpose([_vs, cmap[:,1], cmap[:,1]]),
'blue': np.transpose([_vs, cmap[:,2], cmap[:,2]])}
if name is not None:
add_colormap(name, cdict)
return cdict
| 42.822609 | 261 | 0.598871 |
fbd66bb18e9c66ceb8bdd076bd052e79e578640a | 312 | py | Python | dependencies/georeference maps/pythongis/vector/fileformats/thirdparty/PyDTA/testvaluelabels.py | karimbahgat/AutoMap | eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f | [
"MIT"
] | 4 | 2015-12-05T14:31:55.000Z | 2018-02-09T05:54:36.000Z | dependencies/georeference maps/pythongis/vector/fileformats/thirdparty/PyDTA/testvaluelabels.py | karimbahgat/AutoMap | eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f | [
"MIT"
] | 1 | 2022-01-13T02:52:09.000Z | 2022-01-13T02:52:09.000Z | dependencies/georeference maps/pythongis/vector/fileformats/thirdparty/PyDTA/testvaluelabels.py | karimbahgat/AutoMap | eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f | [
"MIT"
] | 1 | 2018-10-24T01:08:11.000Z | 2018-10-24T01:08:11.000Z |
import sys
sys.path.append("C:/Users/kimo/Documents/GitHub")
import PyDTA
dta = PyDTA.Reader(open("Junk/bleh/BDIR51FL.DTA","rb"))
print dta
for var in dta.variables():
print var.name, var.label
if var.value_format:
print var.value_format
print dta.value_labels()[var.value_format]
| 17.333333 | 55 | 0.695513 |
1fa1dbf4062b705acbcc6f78e5f7f330211aa5cc | 1,534 | py | Python | setup.py | chatziko/ha-philipsjs | 54581fe81b15389cb5ee75d0c353550e9b6c89fa | [
"MIT"
] | 17 | 2016-11-06T18:52:03.000Z | 2021-05-09T18:10:06.000Z | setup.py | chatziko/ha-philipsjs | 54581fe81b15389cb5ee75d0c353550e9b6c89fa | [
"MIT"
] | 20 | 2017-09-01T16:08:48.000Z | 2022-03-19T22:51:15.000Z | setup.py | chatziko/ha-philipsjs | 54581fe81b15389cb5ee75d0c353550e9b6c89fa | [
"MIT"
] | 22 | 2016-12-13T15:24:06.000Z | 2022-03-18T20:28:13.000Z | import os
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
PACKAGE_NAME = 'ha-philipsjs'
HERE = os.path.abspath(os.path.dirname(__file__))
VERSION = '2.8.0'
PACKAGES = find_packages(exclude=['tests', 'tests.*', 'dist', 'ccu', 'build'])
REQUIRES = [
"cryptography",
"httpx",
]
setup(
name=PACKAGE_NAME,
version=VERSION,
license='MIT License',
url='https://github.com/danielperna84/ha-philipsjs',
download_url='https://github.com/danielperna84/ha-philipsjs/tarball/'+VERSION,
author='Daniel Perna',
author_email='danielperna84@gmail.com',
description='jointSPACE API for Home-Assistant',
long_description=readme(),
long_description_content_type='text/markdown',
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=REQUIRES,
keywords=['jointSPACE'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8'
],
extras_require={
'tests': [
'pytest>3.6.4',
'pytest-cov<2.6',
'pytest-aiohttp',
'coveralls',
'pytest-mock',
'respx>=0.17.0',
]
},
python_requires='>=3.8'
)
| 28.407407 | 86 | 0.563885 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.