text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Tests of the climate entity of the balboa integration."""
from __future__ import annotations
from unittest.mock import MagicMock, patch
import pytest
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODES,
ATTR_MAX_TEMP,
ATTR_MIN_TEMP,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
FAN_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from . import init_integration
from tests.components.climate import common
FAN_SETTINGS = [
FAN_OFF,
FAN_LOW,
FAN_MEDIUM,
FAN_HIGH,
]
HVAC_SETTINGS = [
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
]
ENTITY_CLIMATE = "climate.fakespa_climate"
async def test_spa_defaults(hass: HomeAssistant, client: MagicMock) -> None:
"""Test supported features flags."""
await init_integration(hass)
state = hass.states.get(ENTITY_CLIMATE)
assert state
assert (
state.attributes["supported_features"]
== SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
)
assert state.state == HVAC_MODE_HEAT
assert state.attributes[ATTR_MIN_TEMP] == 10.0
assert state.attributes[ATTR_MAX_TEMP] == 40.0
assert state.attributes[ATTR_PRESET_MODE] == "Ready"
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
async def test_spa_defaults_fake_tscale(hass: HomeAssistant, client: MagicMock) -> None:
"""Test supported features flags."""
client.get_tempscale.return_value = 1
await init_integration(hass)
state = hass.states.get(ENTITY_CLIMATE)
assert state
assert (
state.attributes["supported_features"]
== SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
)
assert state.state == HVAC_MODE_HEAT
assert state.attributes[ATTR_MIN_TEMP] == 10.0
assert state.attributes[ATTR_MAX_TEMP] == 40.0
assert state.attributes[ATTR_PRESET_MODE] == "Ready"
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
async def test_spa_with_blower(hass: HomeAssistant, client: MagicMock) -> None:
"""Test supported features flags."""
client.have_blower.return_value = True
config_entry = await init_integration(hass)
# force a refresh
await client.new_data_cb()
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state
assert (
state.attributes["supported_features"]
== SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_FAN_MODE
)
for fan_state in range(4):
# set blower
state = await _patch_blower(hass, config_entry, fan_state, client)
assert state
assert state.attributes[ATTR_FAN_MODE] == FAN_SETTINGS[fan_state]
# test the nonsense checks
for fan_state in (None, 70): # type: ignore[assignment]
state = await _patch_blower(hass, config_entry, fan_state, client)
assert state
assert state.attributes[ATTR_FAN_MODE] == FAN_OFF
async def test_spa_temperature(hass: HomeAssistant, client: MagicMock) -> None:
"""Test spa temperature settings."""
config_entry = await init_integration(hass)
# flip the spa into F
# set temp to a valid number
state = await _patch_spa_settemp(hass, config_entry, 0, 100.0, client)
assert state
assert state.attributes.get(ATTR_TEMPERATURE) == 38.0
async def test_spa_temperature_unit(hass: HomeAssistant, client: MagicMock) -> None:
"""Test temperature unit conversions."""
with patch.object(hass.config.units, "temperature_unit", TEMP_FAHRENHEIT):
config_entry = await init_integration(hass)
state = await _patch_spa_settemp(hass, config_entry, 0, 15.4, client)
assert state
assert state.attributes.get(ATTR_TEMPERATURE) == 15.0
async def test_spa_hvac_modes(hass: HomeAssistant, client: MagicMock) -> None:
"""Test hvac modes."""
config_entry = await init_integration(hass)
# try out the different heat modes
for heat_mode in range(2):
state = await _patch_spa_heatmode(hass, config_entry, heat_mode, client)
assert state
modes = state.attributes.get(ATTR_HVAC_MODES)
assert [HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF] == modes
assert state.state == HVAC_SETTINGS[heat_mode]
with pytest.raises(ValueError):
await _patch_spa_heatmode(hass, config_entry, 2, client)
async def test_spa_hvac_action(hass: HomeAssistant, client: MagicMock) -> None:
"""Test setting of the HVAC action."""
config_entry = await init_integration(hass)
# try out the different heat states
state = await _patch_spa_heatstate(hass, config_entry, 1, client)
assert state
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT
state = await _patch_spa_heatstate(hass, config_entry, 0, client)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
async def test_spa_preset_modes(hass: HomeAssistant, client: MagicMock) -> None:
"""Test the various preset modes."""
await init_integration(hass)
state = hass.states.get(ENTITY_CLIMATE)
assert state
modes = state.attributes.get(ATTR_PRESET_MODES)
assert ["Ready", "Rest", "Ready in Rest"] == modes
# Put it in Ready and Rest
modelist = ["Ready", "Rest"]
for mode in modelist:
client.heatmode = modelist.index(mode)
await common.async_set_preset_mode(hass, mode, ENTITY_CLIMATE)
await client.new_data_cb()
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state
assert state.attributes[ATTR_PRESET_MODE] == mode
# put it in RNR and test assertion
client.heatmode = 2
with pytest.raises(ValueError):
await common.async_set_preset_mode(hass, 2, ENTITY_CLIMATE)
# Helpers
async def _patch_blower(hass, config_entry, fan_state, client):
"""Patch the blower state."""
client.get_blower.return_value = fan_state
if fan_state is not None and fan_state <= len(FAN_SETTINGS):
await common.async_set_fan_mode(hass, FAN_SETTINGS[fan_state])
await client.new_data_cb()
await hass.async_block_till_done()
return hass.states.get(ENTITY_CLIMATE)
async def _patch_spa_settemp(hass, config_entry, tscale, settemp, client):
"""Patch the settemp."""
client.get_tempscale.return_value = tscale
client.get_settemp.return_value = settemp
await common.async_set_temperature(
hass, temperature=settemp, entity_id=ENTITY_CLIMATE
)
await client.new_data_cb()
await hass.async_block_till_done()
return hass.states.get(ENTITY_CLIMATE)
async def _patch_spa_heatmode(hass, config_entry, heat_mode, client):
"""Patch the heatmode."""
client.heatmode = heat_mode
await common.async_set_hvac_mode(hass, HVAC_SETTINGS[heat_mode], ENTITY_CLIMATE)
await client.new_data_cb()
await hass.async_block_till_done()
return hass.states.get(ENTITY_CLIMATE)
async def _patch_spa_heatstate(hass, config_entry, heat_state, client):
"""Patch the heatmode."""
client.get_heatstate.return_value = heat_state
await common.async_set_hvac_mode(hass, HVAC_SETTINGS[heat_state], ENTITY_CLIMATE)
await client.new_data_cb()
await hass.async_block_till_done()
return hass.states.get(ENTITY_CLIMATE)
|
{
"content_hash": "044bf94d8db932fec6cfad2d8e8ce97c",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 88,
"avg_line_length": 30.4,
"alnum_prop": 0.6822368421052631,
"repo_name": "home-assistant/home-assistant",
"id": "94a5b612f2c242ce360b11dc12718c9ad6251f13",
"size": "7600",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/balboa/test_climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
import logging
import warnings
import cherrypy
from cherrypy._cptools import Tool, _getargs
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.exc import SQLAlchemyError
__all__ = ["SQLAlchemySessionTool"]
logger = logging.getLogger(__name__)
class MultiHookPointTool(Tool):
"""MultiHookPointTool provides subclasses the infrastructure for writing
Tools that need to run at more than one request hook point.
Subclasses can simply provide methods with the same name as the hook points
and MuiltiHookPointTool will automatically wire each hook up to the request
hook points when turned on in the configuration.
"""
def __init__(self, name=None, priority=50):
"""Subclasses of MultiHookPointTool do not need to provide a default
hook point and a callable because there's no such thing. As such,
subclasses will most likely only provide a default priority for the
hook point callables the subclasses provide.
Same as the Tool class CherryPy supplies, the `name` parameter is set
automatically when a MultiHookPointTool instance is attached to a
ToolBox if not supplied.
Lastly, similar to Tool, this constructor will also loop through all
the hooks and attach their arguments directly to the Tool instance.
The only difference is all the tool arguments are prefixed with their
hook point names to avoid name conflicts.
Example::
app_config = {
"/": {
"tools.my_multipoint_tool.on": True,
"tools.my_multipoint_tools.before_handler.echo": True,
"tools.my_multipoint_tools.before_finalize.priority": 80
}
}
"""
self._name = name
self._priority = priority
self._setargs()
def _setargs(self):
for hook_point in cherrypy._cprequest.hookpoints:
if hasattr(self, hook_point):
hook = getattr(self, hook_point)
try:
for arg in _getargs(hook):
setattr(self, hook_point + "_" + arg, None)
# IronPython 1.0 raises NotImplementedError because
# inspect.getargspec tries to access Python bytecode
# in co_code attribute.
except NotImplementedError:
pass
# IronPython 1B1 may raise IndexError in some cases,
# but if we trap it here it doesn't prevent CP from working.
except IndexError:
pass
def _setup(self):
request = cherrypy.request
conf = self._merged_args()
for hook_point in cherrypy._cprequest.hookpoints:
if hasattr(self, hook_point):
hook = getattr(self, hook_point)
if not callable(hook):
warnings.warn("%r is not a callable." % hook)
hook_conf = {}
for k, v in conf.viewitems():
if k.startswith(hook_point):
k = k.replace(hook_point, "").split(".", 1)[-1]
hook_conf[k] = v
priority_key = hook_point + ".priority"
hook_priority = (self._priority
if priority_key not in hook_conf
else hook_conf[priority_key])
request.hooks.attach(hook_point, hook, hook_priority, **hook_conf)
def __call__(self, *args, **kwargs):
raise NotImplementedError("This %r instance cannot be called directly." %
self.__class__.__name__)
class SQLAlchemySessionTool(MultiHookPointTool):
"""A CherryPy tool to process SQLAlchemy ORM sessions for requests.
This tools sets up a scoped, possibly multi-engine SQLAlchemy ORM session to
`cherrypy.request.orm_session` at the beginning of a request. This tool does
not commit changes for you automatically, you must do you explicitly inside
your controller code. At the end of each requests, this tool will rollback
if errors occured. The session is guaranteed to be removed from the request
in the end.
As this tool hooks up _3_ callables to the request, this tools will also
accept 3 `priority` options - `on_start_resource.priority`,
`before_finalize.priority` and `after_error_response.priority`. The `priority`
option is still accepted as a default for all 3 hook points.
"""
def on_start_resource(self, bindings=None):
if bindings:
if len(bindings) > 1:
Session = scoped_session(sessionmaker(twophase=True))
else:
Session = scoped_session(sessionmaker())
session_bindings = {}
engine_bindings = cherrypy.engine.sqlalchemy.engine_bindings
for binding in bindings:
session_bindings[binding] = engine_bindings[binding]
Session.configure(binds=session_bindings)
else:
Session = scoped_session(sessionmaker())
Session.configure(bind=cherrypy.engine.sqlalchemy.engine)
cherrypy.request.orm_session = Session
def before_finalize(self):
req = cherrypy.request
session = req.orm_session
session.remove()
def after_error_response(self):
req = cherrypy.request
session = req.orm_session
try:
session.rollback()
session.expunge_all()
except SQLAlchemyError as e:
logger.error(e, exc_info=True)
cherrypy.log.error(msg=e, severity=logging.ERROR, traceback=True)
finally:
session.remove()
|
{
"content_hash": "2407fa52591b8fabe1fd4f93f2c5e4a8",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 82,
"avg_line_length": 37.25806451612903,
"alnum_prop": 0.6070995670995671,
"repo_name": "wyuenho/blueberrypy",
"id": "4f58c0789514d8bc1dd476ac5e56e37f890e784b",
"size": "5775",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/blueberrypy/tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13211"
},
{
"name": "Python",
"bytes": "192990"
},
{
"name": "Shell",
"bytes": "5115"
}
],
"symlink_target": ""
}
|
# should re-write compiled functions to take a local and global dict
# as input.
from __future__ import absolute_import, print_function
import sys
import os
from . import ext_tools
from . import catalog
from . import common_info
from numpy.core.multiarray import _get_ndarray_c_version
ndarray_api_version = '/* NDARRAY API VERSION %x */' % (_get_ndarray_c_version(),)
# not an easy way for the user_path_list to come in here.
# the PYTHONCOMPILED environment variable offers the most hope.
function_catalog = catalog.catalog()
class inline_ext_function(ext_tools.ext_function):
# Some specialization is needed for inline extension functions
def function_declaration_code(self):
code = 'static PyObject* %s(PyObject*self, PyObject* args)\n{\n'
return code % self.name
def template_declaration_code(self):
code = 'template<class T>\n' \
'static PyObject* %s(PyObject*self, PyObject* args)\n{\n'
return code % self.name
def parse_tuple_code(self):
""" Create code block for PyArg_ParseTuple. Variable declarations
for all PyObjects are done also.
This code got a lot uglier when I added local_dict...
"""
declare_return = 'py::object return_val;\n' \
'int exception_occurred = 0;\n' \
'PyObject *py__locals = NULL;\n' \
'PyObject *py__globals = NULL;\n'
py_objects = ', '.join(self.arg_specs.py_pointers())
if py_objects:
declare_py_objects = 'PyObject ' + py_objects + ';\n'
else:
declare_py_objects = ''
py_vars = ' = '.join(self.arg_specs.py_variables())
if py_vars:
init_values = py_vars + ' = NULL;\n\n'
else:
init_values = ''
parse_tuple = 'if(!PyArg_ParseTuple(args,"OO:compiled_func",'\
'&py__locals,'\
'&py__globals))\n'\
' return NULL;\n'
return declare_return + declare_py_objects + \
init_values + parse_tuple
def arg_declaration_code(self):
"""Return the declaration code as a string."""
arg_strings = [arg.declaration_code(inline=1)
for arg in self.arg_specs]
return "".join(arg_strings)
def arg_cleanup_code(self):
"""Return the cleanup code as a string."""
arg_strings = [arg.cleanup_code() for arg in self.arg_specs]
return "".join(arg_strings)
def arg_local_dict_code(self):
"""Return the code to create the local dict as a string."""
arg_strings = [arg.local_dict_code() for arg in self.arg_specs]
return "".join(arg_strings)
def function_code(self):
from .ext_tools import indent
decl_code = indent(self.arg_declaration_code(),4)
cleanup_code = indent(self.arg_cleanup_code(),4)
function_code = indent(self.code_block,4)
# local_dict_code = indent(self.arg_local_dict_code(),4)
try_code = \
' try \n' \
' { \n' \
'#if defined(__GNUC__) || defined(__ICC)\n' \
' PyObject* raw_locals __attribute__ ((unused));\n' \
' PyObject* raw_globals __attribute__ ((unused));\n' \
'#else\n' \
' PyObject* raw_locals;\n' \
' PyObject* raw_globals;\n' \
'#endif\n' \
' raw_locals = py_to_raw_dict(py__locals,"_locals");\n' \
' raw_globals = py_to_raw_dict(py__globals,"_globals");\n' \
' /* argument conversion code */ \n' \
+ decl_code + \
' /* inline code */ \n' \
+ function_code + \
' /*I would like to fill in changed locals and globals here...*/ \n' \
' }\n'
catch_code = "catch(...) \n" \
"{ \n" + \
" return_val = py::object(); \n" \
" exception_occurred = 1; \n" \
"} \n"
return_code = " /* cleanup code */ \n" + \
cleanup_code + \
" if(!(PyObject*)return_val && !exception_occurred)\n" \
" {\n \n" \
" return_val = Py_None; \n" \
" }\n \n" \
" return return_val.disown(); \n" \
"} \n"
all_code = self.function_declaration_code() + \
indent(self.parse_tuple_code(),4) + \
try_code + \
indent(catch_code,4) + \
return_code
return all_code
def python_function_definition_code(self):
args = (self.name, self.name)
function_decls = '{"%s",(PyCFunction)%s , METH_VARARGS},\n' % args
return function_decls
class inline_ext_module(ext_tools.ext_module):
def __init__(self,name,compiler=''):
ext_tools.ext_module.__init__(self,name,compiler)
self._build_information.append(common_info.inline_info())
function_cache = {}
def inline(code,arg_names=[],local_dict=None, global_dict=None,
force=0,
compiler='',
verbose=0,
support_code=None,
headers=[],
customize=None,
type_converters=None,
auto_downcast=1,
newarr_converter=0,
**kw):
"""
Inline C/C++ code within Python scripts.
``inline()`` compiles and executes C/C++ code on the fly. Variables
in the local and global Python scope are also available in the
C/C++ code. Values are passed to the C/C++ code by assignment
much like variables passed are passed into a standard Python
function. Values are returned from the C/C++ code through a
special argument called return_val. Also, the contents of
mutable objects can be changed within the C/C++ code and the
changes remain after the C code exits and returns to Python.
inline has quite a few options as listed below. Also, the keyword
arguments for distutils extension modules are accepted to
specify extra information needed for compiling.
Parameters
----------
code : string
A string of valid C++ code. It should not specify a return
statement. Instead it should assign results that need to be
returned to Python in the `return_val`.
arg_names : [str], optional
A list of Python variable names that should be transferred from
Python into the C/C++ code. It defaults to an empty string.
local_dict : dict, optional
If specified, it is a dictionary of values that should be used as
the local scope for the C/C++ code. If local_dict is not
specified the local dictionary of the calling function is used.
global_dict : dict, optional
If specified, it is a dictionary of values that should be used as
the global scope for the C/C++ code. If `global_dict` is not
specified, the global dictionary of the calling function is used.
force : {0, 1}, optional
If 1, the C++ code is compiled every time inline is called. This
is really only useful for debugging, and probably only useful if
your editing `support_code` a lot.
compiler : str, optional
The name of compiler to use when compiling. On windows, it
understands 'msvc' and 'gcc' as well as all the compiler names
understood by distutils. On Unix, it'll only understand the
values understood by distutils. (I should add 'gcc' though to
this).
On windows, the compiler defaults to the Microsoft C++ compiler.
If this isn't available, it looks for mingw32 (the gcc compiler).
On Unix, it'll probably use the same compiler that was used when
compiling Python. Cygwin's behavior should be similar.
verbose : {0,1,2}, optional
Specifies how much much information is printed during the compile
phase of inlining code. 0 is silent (except on windows with msvc
where it still prints some garbage). 1 informs you when compiling
starts, finishes, and how long it took. 2 prints out the command
lines for the compilation process and can be useful if your having
problems getting code to work. Its handy for finding the name of
the .cpp file if you need to examine it. verbose has no effect if
the compilation isn't necessary.
support_code : str, optional
A string of valid C++ code declaring extra code that might be
needed by your compiled function. This could be declarations of
functions, classes, or structures.
headers : [str], optional
A list of strings specifying header files to use when compiling
the code. The list might look like ``["<vector>","'my_header'"]``.
Note that the header strings need to be in a form than can be
pasted at the end of a ``#include`` statement in the C++ code.
customize : base_info.custom_info, optional
An alternative way to specify `support_code`, `headers`, etc. needed
by the function. See :mod:`scipy.weave.base_info` for more
details. (not sure this'll be used much).
type_converters : [type converters], optional
These guys are what convert Python data types to C/C++ data types.
If you'd like to use a different set of type conversions than the
default, specify them here. Look in the type conversions section
of the main documentation for examples.
auto_downcast : {1,0}, optional
This only affects functions that have numpy arrays as input
variables. Setting this to 1 will cause all floating point values
to be cast as float instead of double if all the Numeric arrays
are of type float. If even one of the arrays has type double or
double complex, all variables maintain their standard
types.
newarr_converter : int, optional
Unused.
Other Parameters
----------------
Relevant :mod:`distutils` keywords. These are duplicated from Greg Ward's
:class:`distutils.extension.Extension` class for convenience:
sources : [string]
List of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
.. note:: The `module_path` file is always appended to the front of
this list
include_dirs : [string]
List of directories to search for C/C++ header files (in Unix
form for portability).
define_macros : [(name : string, value : string|None)]
List of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line).
undef_macros : [string]
List of macros to undefine explicitly.
library_dirs : [string]
List of directories to search for C/C++ libraries at link time.
libraries : [string]
List of library names (not filenames or paths) to link against.
runtime_library_dirs : [string]
List of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded).
extra_objects : [string]
List of extra files to link with (e.g. object files not implied
by 'sources', static libraries that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
Any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
Any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
List of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
Any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
List of files that the extension depends on.
language : string
Extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
See Also
--------
distutils.extension.Extension : Describes additional parameters.
"""
# this grabs the local variables from the *previous* call
# frame -- that is the locals from the function that called
# inline.
global function_catalog
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
if force:
module_dir = global_dict.get('__file__',None)
func = compile_function(code,arg_names,local_dict,
global_dict,module_dir,
compiler=compiler,
verbose=verbose,
support_code=support_code,
headers=headers,
customize=customize,
type_converters=type_converters,
auto_downcast=auto_downcast,
**kw)
function_catalog.add_function(code,func,module_dir)
results = attempt_function_call(code,local_dict,global_dict)
else:
# 1. try local cache
try:
results = apply(function_cache[code],(local_dict,global_dict))
return results
except TypeError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
except KeyError:
pass
# 2. try function catalog
try:
results = attempt_function_call(code,local_dict,global_dict)
# 3. build the function
except ValueError:
# compile the library
module_dir = global_dict.get('__file__',None)
func = compile_function(code,arg_names,local_dict,
global_dict,module_dir,
compiler=compiler,
verbose=verbose,
support_code=support_code,
headers=headers,
customize=customize,
type_converters=type_converters,
auto_downcast=auto_downcast,
**kw)
function_catalog.add_function(code,func,module_dir)
results = attempt_function_call(code,local_dict,global_dict)
return results
def attempt_function_call(code,local_dict,global_dict):
# we try 3 levels here -- a local cache first, then the
# catalog cache, and then persistent catalog.
#
global function_catalog
# 1. try local cache
try:
results = apply(function_cache[code],(local_dict,global_dict))
return results
except TypeError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
except KeyError:
pass
# 2. try catalog cache.
function_list = function_catalog.get_functions_fast(code)
for func in function_list:
try:
results = apply(func,(local_dict,global_dict))
function_catalog.fast_cache(code,func)
function_cache[code] = func
return results
except TypeError as msg: # should specify argument types here.
# This should really have its own error type, instead of
# checking the beginning of the message, but I don't know
# how to define that yet.
msg = str(msg)
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
# 3. try persistent catalog
module_dir = global_dict.get('__file__',None)
function_list = function_catalog.get_functions(code,module_dir)
for func in function_list:
try:
results = apply(func,(local_dict,global_dict))
function_catalog.fast_cache(code,func)
function_cache[code] = func
return results
except: # should specify argument types here.
pass
# if we get here, the function wasn't found
raise ValueError('function with correct signature not found')
def inline_function_code(code,arg_names,local_dict=None,
global_dict=None,auto_downcast=1,
type_converters=None,compiler=''):
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
ext_func = inline_ext_function('compiled_func',code,arg_names,
local_dict,global_dict,auto_downcast,
type_converters=type_converters)
from . import build_tools
compiler = build_tools.choose_compiler(compiler)
ext_func.set_compiler(compiler)
return ext_func.function_code()
def compile_function(code,arg_names,local_dict,global_dict,
module_dir,
compiler='',
verbose=1,
support_code=None,
headers=[],
customize=None,
type_converters=None,
auto_downcast=1,
**kw):
# figure out where to store and what to name the extension module
# that will contain the function.
# storage_dir = catalog.intermediate_dir()
code = ndarray_api_version + '\n' + code
module_path = function_catalog.unique_module_name(code, module_dir)
storage_dir, module_name = os.path.split(module_path)
mod = inline_ext_module(module_name,compiler)
# create the function. This relies on the auto_downcast and
# type factories setting
ext_func = inline_ext_function('compiled_func',code,arg_names,
local_dict,global_dict,auto_downcast,
type_converters=type_converters)
mod.add_function(ext_func)
# if customize (a custom_info object), then set the module customization.
if customize:
mod.customize = customize
# add the extra "support code" needed by the function to the module.
if support_code:
mod.customize.add_support_code(support_code)
# add the extra headers needed by the function to the module.
for header in headers:
mod.customize.add_header(header)
# it's nice to let the users know when anything gets compiled, as the
# slowdown is very noticeable.
if verbose > 0:
print('<weave: compiling>')
# compile code in correct location, with the given compiler and verbosity
# setting. All input keywords are passed through to distutils
mod.compile(location=storage_dir,compiler=compiler,
verbose=verbose, **kw)
# import the module and return the function. Make sure
# the directory where it lives is in the python path.
try:
sys.path.insert(0,storage_dir)
exec('import ' + module_name)
func = eval(module_name+'.compiled_func')
finally:
del sys.path[0]
return func
|
{
"content_hash": "7534f4e4560c1b982f174064a686385b",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 93,
"avg_line_length": 42.87025948103793,
"alnum_prop": 0.574960424620542,
"repo_name": "juliantaylor/scipy",
"id": "54b1aaaffe199c1d1b4544c92d7ff4ae855af525",
"size": "21478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/weave/inline_tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4674358"
},
{
"name": "C++",
"bytes": "9673833"
},
{
"name": "CSS",
"bytes": "2624"
},
{
"name": "FORTRAN",
"bytes": "6351600"
},
{
"name": "M",
"bytes": "66"
},
{
"name": "Matlab",
"bytes": "4280"
},
{
"name": "Objective-C",
"bytes": "4666"
},
{
"name": "Python",
"bytes": "7389933"
},
{
"name": "Shell",
"bytes": "1612"
},
{
"name": "TeX",
"bytes": "37261"
},
{
"name": "nesC",
"bytes": "1736"
}
],
"symlink_target": ""
}
|
"""
This tests VTK's use of Piston's slice operator.
"""
import sys
import vtk
from vtk.test import Testing
from PistonTestCommon import *
def widgetCallBack(obj,event):
global plane, filter
obj.GetPlane(plane)
filter.Update() #TODO: Why is this necessary?
class TestSlice(Testing.vtkTest):
def testSlice(self):
global plane, filter, args
writefiles = "SaveData" in args
renderer = vtk.vtkRenderer()
renwin = vtk.vtkRenderWindow()
renwin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renwin)
renwin.Render()
if "GPURender" in args:
vtk.vtkPistonMapper.InitCUDAGL(renwin)
src = vtk.vtkImageMandelbrotSource()
src.SetWholeExtent(0,20,0,20,0,20)
#scale and bias until piston understands origin and spacing
src.Update()
inputdata = src.GetOutput()
if "Normalize" in args:
testdata1 = inputdata.NewInstance()
testdata1.ShallowCopy(inputdata)
testdata1.SetSpacing(1,1,1)
testdata1.SetOrigin(0,0,0)
inputdata = testdata1
bounds = inputdata.GetBounds()
center = [(bounds[1]-bounds[0])/2+bounds[0],
(bounds[3]-bounds[2])/2+bounds[2],
(bounds[5]-bounds[4])/2+bounds[4]]
d2p = vtk.vtkDataSetToPiston()
d2p.SetInputData(inputdata)
#d2p.SetInputConnection(src.GetOutputPort())
plane = vtk.vtkPlane()
plane.SetOrigin(center)
plane.SetNormal(0,0,1)
filter = vtk.vtkPistonSlice()
filter.SetInputConnection(d2p.GetOutputPort())
filter.SetClippingPlane(plane)
filter.SetOffset(0.0)
p2d = vtk.vtkPistonToDataSet()
p2d.SetOutputDataSetType(vtk.VTK_POLY_DATA)
p2d.SetInputConnection(filter.GetOutputPort())
if writefiles:
writeFile(p2d, "piston_slice.vtk")
mapper = vtk.vtkPistonMapper()
mapper.SetInputConnection(filter.GetOutputPort())
mapper.Update() #TODO why is this necessary
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer.AddActor(actor)
widget = vtk.vtkImplicitPlaneWidget()
widget.PlaceWidget(bounds)
widget.SetOrigin([plane.GetOrigin()[x] for x in 0,1,2])
widget.SetNormal([plane.GetNormal()[x] for x in 0,1,2])
widget.SetInteractor(iren)
widget.AddObserver("InteractionEvent", widgetCallBack)
widget.SetEnabled(1)
widget.DrawPlaneOff()
renderer.ResetCamera()
renwin.Render()
img_file = "TestSlice.png"
Testing.compareImage(renwin, Testing.getAbsImagePath(img_file))
if Testing.isInteractive():
widget.DrawPlaneOn()
iren.Start()
if __name__ == "__main__":
global args
args = parseArgs()
Testing.main([(TestSlice, 'test')])
|
{
"content_hash": "5dc0128744a04b287e3fa68ad1b26f86",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 67,
"avg_line_length": 28.13,
"alnum_prop": 0.6519729825808745,
"repo_name": "timkrentz/SunTracker",
"id": "2805ae24959a29c093a985e653d4c8a1200491a0",
"size": "2836",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/Accelerators/Piston/Testing/Python/TestSlice.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
}
|
import os.path
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Athens'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
# 'diogenis.common.context_processors.get_csrf_token_value',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'diogenis.urls'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'diogenis.auth',
'diogenis.common',
'diogenis.schools',
'diogenis.students',
'diogenis.teachers',
# 'south',
)
# Needed for the decorator
LOGIN_URL = '/'
try:
from diogenis.local_settings import *
except:
pass
|
{
"content_hash": "102720e3d2ad52672e1f921f62198f3e",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 88,
"avg_line_length": 31.528089887640448,
"alnum_prop": 0.7337847469707769,
"repo_name": "gtsiokos/diogenis",
"id": "6add0cbc84b7acf7ca777b8313b41127e2653295",
"size": "2871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "445116"
},
{
"name": "Python",
"bytes": "119388"
}
],
"symlink_target": ""
}
|
"""
TransportCoefficients for flow and transport in porous media
.. inheritance-diagram:: proteus.SubsurfaceTransportCoefficients
:parts: 1
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import range
from past.utils import old_div
from builtins import object
from math import *
from .TransportCoefficients import TC_base
import numpy
from .Profiling import logEvent
from proteus import FemTools
from proteus import Transport
from . import subsurfaceTransportFunctions as stfuncs
######################################################################
#Utility classes for dealing with common aspects of flow and transport
######################################################################
class BlockHeterogeneousCoefficients(object):
"""Basic data structures and functionality for keeping track of a
block heterogeneity
"""
def __init__(self,mesh):
self.mesh = mesh
def initializeMaterialTypes(self):
"""returns material type identifiers for mesh topology in tuple
element,exterior_element_boundaries,element_boundaries
note element_boundaries is nElementBoundaries_global x 2 and
gives the element material property to the left and right of a
global element boundary
"""
elementMaterialTypes = self.mesh.elementMaterialTypes
exteriorElementBoundaryTypes = numpy.zeros((self.mesh.nExteriorElementBoundaries_global),'i')
stfuncs.setExteriorElementBoundaryTypes(self.mesh.nExteriorElementBoundaries_global,
self.mesh.exteriorElementBoundariesArray,
self.mesh.elementBoundaryElementsArray,
elementMaterialTypes,
exteriorElementBoundaryTypes)
elementBoundaryTypes = numpy.zeros((self.mesh.nElementBoundaries_global,2),'i')
stfuncs.setElementBoundariesArray(self.mesh.nElementBoundaries_global,
self.mesh.elementBoundaryElementsArray,
self.mesh.elementMaterialTypes,
elementBoundaryTypes)
return elementMaterialTypes,exteriorElementBoundaryTypes,elementBoundaryTypes
##################################################
#Single-phase flow
##################################################
class SinglePhaseDarcyCoefficients(TC_base):
""":math:`S_s h_t -\deld ( K_i(x,t) \grad h_i ) + r(x,t) = 0 i=1,nc`
"""
def __init__(self,K_types,source_types,S_s_types=None,
nc=1,nd=2,
timeVaryingCoefficients=False,
materialValuesLocallyConstant=False):
self.K_types = K_types
self.source_types = source_types
if S_s_types is None:
self.S_s_types = {}
for mat in list(self.K_types.keys()):
self.S_s_types[mat] = lambda x,t: 0.0
else:
self.S_s_types = S_s_types
assert timeVaryingCoefficients == True, "Transient Simulation (S_s > 0) requires timeVaryingCoefficients == True"
self.nd = nd
self.timeVaryingCoefficients=timeVaryingCoefficients
self.materialValuesLocallyConstant = materialValuesLocallyConstant
self.K_types_const=None
self.source_types_const=None
self.S_s_types_const=None
if self.materialValuesLocallyConstant:
self.K_types_const = {}; self.source_types_const= {}; self.S_s_types_const = {}
x = numpy.zeros((3,),'d'); t0 = 0.0
for mat in list(self.K_types.keys()):
self.K_types_const[mat] =self.K_types[mat](x,t0)
for mat in list(self.source_types.keys()):
self.source_types_const[mat]=self.source_types[mat](x,t0)
for mat in list(self.S_s_types.keys()):
self.S_s_types_const[mat]=self.S_s_types[mat](x,t0)
#for matching shapes of quadrature arrays
self.q_x_shape = None; self.ebqe_x_shape=None
self.ebq_global_x_shape=None; self.ebq_shape=None
mass = {}
advection = {}
diffusion = {}
potential = {}
reaction = {}
hamiltonian = {}
for i in range(nc):
diffusion[i] = {i : {i:'constant'}}
reaction[i] = {i : 'constant'}
advection[i] = {i : 'constant'} #now include for gravity type terms
potential[i] = {i : 'u'}
mass[i] = {i : 'linear'}
#end i
sparseDiffusionTensors = {}
for ci in range(nc):
sparseDiffusionTensors[(ci,ci)]=(numpy.arange(start=0,stop=nd**2+1,step=nd,dtype='i'),
numpy.array([list(range(nd)) for row in range(nd)],dtype='i'))
TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
def initializeMesh(self,mesh):
self.elementMaterialTypes,self.exteriorElementBoundaryTypes,self.elementBoundaryTypes = BlockHeterogeneousCoefficients(mesh).initializeMaterialTypes()
self.elementBoundariesArray = mesh.elementBoundariesArray
def initializeElementQuadrature(self,t,cq):
self.evaluateHeterogeneity_element(t,cq)
self.q_x_shape = cq['x'].shape
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
self.evaluateHeterogeneity_globalElementBoundary(t,cebq_global)
self.ebq_global_x_shape = cebq_global['x'].shape
self.evaluateHeterogeneity_elementBoundary(t,cebq)
self.ebq_x_shape = cebq['x'].shape
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
self.evaluateHeterogeneity_exteriorElementBoundary(t,cebqe)
self.ebqe_x_shape = cebqe['x'].shape
def evaluateHeterogeneity_element(self,t,cq):
if self.materialValuesLocallyConstant:
x = numpy.zeros((3,),'d'); t0 = 0.0
for mat in list(self.K_types.keys()):
self.K_types_const[mat] =self.K_types[mat](x,t0)
for mat in list(self.source_types.keys()):
self.source_types_const[mat]=self.source_types[mat](x,t0)
for mat in list(self.S_s_types.keys()):
self.S_s_types_const[mat]=self.S_s_types[mat](x,t0)
for ci in range(self.nc):
cq[('f',ci)].flat[:] = 0.0
stfuncs.setScalarMaterialFunctionOverElements(self.elementMaterialTypes,
cq[('r',ci)],
self.source_types_const)
cq[('r',ci)] *= -1.0
#sparse matrix storage for tensors
stfuncs.setVectorMaterialFunctionOverElements(self.elementMaterialTypes,
cq[('a',ci,ci)],
self.K_types_const)
if ('dm',ci,ci) in cq:
stfuncs.setScalarMaterialFunctionOverElements(self.elementMaterialTypes,
cq[('dm',ci,ci)],
self.S_s_types_const)
cq[('m',ci)].flat[:] = cq[('u',ci)].flat
cq[('m',ci)] *= cq[('dm',ci,ci)]
else:
for ci in range(self.nc):
cq[('f',ci)].flat[:] = 0.0
stfuncs.evaluateScalarMaterialFunctionOverElements(t,self.elementMaterialTypes,
cq['x'],
cq[('r',ci)],
self.source_types)
cq[('r',ci)] *= -1.0
#sparse matrix storage for tensors
stfuncs.evaluateVectorMaterialFunctionOverElements(t,self.elementMaterialTypes,
cq['x'],
cq[('a',ci,ci)],
self.K_types)
if ('dm',ci,ci) in cq:
stfuncs.evaluateScalarMaterialFunctionOverElements(t,self.elementMaterialTypes,
cq['x'],
cq[('dm',ci,ci)],
self.S_s_types)
cq[('m',ci)].flat[:] = cq[('u',ci)].flat
cq[('m',ci)] *= cq[('dm',ci,ci)]
def evaluateHeterogeneity_elementBoundary(self,t,cebq):
#use harmonic average for a, arith average for f
if self.materialValuesLocallyConstant:
x = numpy.zeros((3,),'d'); t0 = 0.0
for mat in list(self.K_types.keys()):
self.K_types_const[mat] =self.K_types[mat](x,t0)
for mat in list(self.source_types.keys()):
self.source_types_const[mat]=self.source_types[mat](x,t0)
for mat in list(self.S_s_types.keys()):
self.S_s_types_const[mat]=self.S_s_types[mat](x,t0)
for ci in range(self.nc):
if ('f',ci) in cebq: cebq[('f',ci)].flat[:] = 0.0
if ('r',ci) in cebq:
stfuncs.setScalarMaterialFunctionOverElementBoundaries_arithmeticAverage(self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq[('r',ci)],
self.source_types_const)
cebq[('r',ci)] *= -1.0
if ('a',ci,ci) in cebq:
stfuncs.setSparseTensorMaterialFunctionOverElementBoundaries_harmonicAverage(self.nd,
self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq[('a',ci,ci)],
self.K_types_const)
if ('dm',ci,ci) in cebq:
stfuncs.setScalarMaterialFunctionOverElementBoundaries_arithmeticAverage(self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq[('dm',ci,ci)],
self.S_s_types_const)
cebq[('m',ci)].flat[:] = cebq[('u',ci)].flat
cebq[('m',ci)] *= cebq[('dm',ci,ci)]
else:
for ci in range(self.nc):
if ('f',ci) in cebq: cebq[('f',ci)].fill(0.0)
if ('r',ci) in cebq:
stfuncs.evaluateScalarMaterialFunctionOverElementBoundaries_arithmeticAverage(t,
self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq['x'],
cebq[('r',ci)],
self.source_types)
cebq[('r',ci)] *= -1.0
if ('a',ci,ci) in cebq:
stfuncs.evaluateSparseTensorMaterialFunctionOverElementBoundaries_harmonicAverage(self.nd,
t,
self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq['x'],
cebq[('a',ci,ci)],
self.K_types)
if ('dm',ci,ci) in cebq:
stfuncs.evaluateScalarMaterialFunctionOverElementBoundaries_arithmeticAverage(t,
self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq['x'],
cebq[('dm',ci,ci)],
self.S_s_types)
cebq[('m',ci)].flat[:] = cebq[('u',ci)].flat
cebq[('m',ci)] *= cebq[('dm',ci,ci)]
def evaluateHeterogeneity_globalElementBoundary(self,t,cebq_global):
#use harmonic average for a, arith average for f
if self.materialValuesLocallyConstant:
x = numpy.zeros((3,),'d'); t0 = 0.0
for mat in list(self.K_types.keys()):
self.K_types_const[mat] =self.K_types[mat](x,t0)
for mat in list(self.source_types.keys()):
self.source_types_const[mat]=self.source_types[mat](x,t0)
for mat in list(self.S_s_types.keys()):
self.S_s_types_const[mat]=self.S_s_types[mat](x,t0)
for ci in range(self.nc):
if ('f',ci) in cebq_global: cebq_global[('f',ci)].flat[:] = 0.0
if ('r',ci) in cebq_global:
stfuncs.setScalarMaterialFunctionOverGlobalElementBoundaries_arithmeticAverage(self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq_global[('r',ci)],
self.source_types_const)
cebq_global[('r',ci)] *= -1.0
if ('a',ci,ci) in cebq_global:
stfuncs.setSparseTensorMaterialFunctionOverGlobalElementBoundaries_harmonicAverage(self.nd,
self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq_global[('a',ci,ci)],
self.K_types_const)
if ('dm',ci,ci) in cebq_global:
stfuncs.setScalarMaterialFunctionOverGlobalElementBoundaries_arithmeticAverage(self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq_global[('dm',ci,ci)],
self.S_s_types_const)
cebq_global[('m',ci)].flat[:] = cebq_global[('u',ci)].flat
cebq_global[('m',ci)] *= cebq_global[('dm',ci,ci)]
else:
for ci in range(self.nc):
if ('f',ci) in cebq_global: cebq_global[('f',ci)].flat[:] = 0.0
if ('r',ci) in cebq_global:
stfuncs.evaluateScalarMaterialFunctionOverGlobalElementBoundaries_arithmeticAverage(t,
self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq_global['x'],
cebq_global[('r',ci)],
self.source_types)
cebq_global[('r',ci)] *= -1.0
if ('a',ci,ci) in cebq_global:
stfuncs.evaluateSparseTensorMaterialFunctionOverGlobalElementBoundaries_harmonicAverage(self.nd,
t,
self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq_global['x'],
cebq_global[('a',ci,ci)],
self.K_types)
if ('dm',ci,ci) in cebq_global:
stfuncs.evaluateScalarMaterialFunctionOverGlobalElementBoundaries_arithmeticAverage(t,
self.elementBoundariesArray,
self.elementBoundaryTypes,
cebq_global['x'],
cebq_global[('dm',ci,ci)],
self.S_s_types)
cebq_global[('m',ci)].flat[:] = cebq_global[('u',ci)].flat
cebq_global[('m',ci)] *= cebq_global[('dm',ci,ci)]
def evaluateHeterogeneity_exteriorElementBoundary(self,t,cebqe):
if self.materialValuesLocallyConstant:
x = numpy.zeros((3,),'d'); t0 = 0.0
for mat in list(self.K_types.keys()):
self.K_types_const[mat] =self.K_types[mat](x,t0)
for mat in list(self.source_types.keys()):
self.source_types_const[mat]=self.source_types[mat](x,t0)
for mat in list(self.S_s_types.keys()):
self.S_s_types_const[mat]=self.S_s_types[mat](x,t0)
for ci in range(self.nc):
cebqe[('f',ci)].flat[:] = 0.0
stfuncs.setScalarMaterialFunctionOverElements(self.exteriorElementBoundaryTypes,
cebqe[('r',ci)],
self.source_types_const)
cebqe[('r',ci)] *= -1.0
#sparse matrix storage for tensors
stfuncs.setVectorMaterialFunctionOverElements(self.exteriorElementBoundaryTypes,
cebqe[('a',ci,ci)],
self.K_types_const)
if ('dm',ci,ci) in cebqe:
stfuncs.setScalarMaterialFunctionOverElements(self.exteriorElementBoundaryTypes,
cebqe[('dm',ci,ci)],
self.S_s_types_const)
cebqe[('m',ci)].flat[:] = cebqe[('u',ci)].flat
cebqe[('m',ci)] *= cebqe[('dm',ci,ci)]
else:
for ci in range(self.nc):
cebqe[('f',ci)].flat[:] = 0.0
stfuncs.evaluateScalarMaterialFunctionOverElements(t,self.exteriorElementBoundaryTypes,
cebqe['x'],
cebqe[('r',ci)],
self.source_types)
cebqe[('r',ci)] *= -1.0
#sparse matrix storage for tensors
stfuncs.evaluateVectorMaterialFunctionOverElements(t,self.exteriorElementBoundaryTypes,
cebqe['x'],
cebqe[('a',ci,ci)],
self.K_types)
if ('dm',ci,ci) in cebqe:
stfuncs.evaluateScalarMaterialFunctionOverElements(t,self.exteriorElementBoundaryTypes,
cebqe['x'],
cebqe[('dm',ci,ci)],
self.S_s_types)
cebqe[('m',ci)].flat[:] = cebqe[('u',ci)].flat
cebqe[('m',ci)] *= cebqe[('dm',ci,ci)]
def evaluate(self,t,c):
if self.timeVaryingCoefficients == True:
if c['x'].shape == self.q_x_shape:
self.evaluateHeterogeneity_element(t,c)
elif c['x'].shape == self.ebqe_x_shape:
self.evaluateHeterogeneity_exteriorElementBoundary(t,c)
elif c['x'].shape == self.ebq_global_x_shape:
self.evaluateHeterogeneity_globalElementBoundary(t,c)
elif c['x'].shape == self.ebq_x_shape:
self.evaluateHeterogeneity_elementBoundary(t,c)
else:
raise NotImplementedError
#mwf debug
#print "eval c[('a',ci,ci)]= %s" % c[('a',0,0)]
#end def
########################################
#Richards equation
########################################
class ConservativeHeadRichardsMualemVanGenuchten(TC_base):
"""version of Re where element material type id's used in evals
"""
from .ctransportCoefficients import conservativeHeadRichardsMualemVanGenuchtenHetEvaluateV2
from .ctransportCoefficients import conservativeHeadRichardsMualemVanGenuchten_sd_het
def __init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
gravity,
density,
beta,
diagonal_conductivity=True,
getSeepageFace=None,
pc_eps=1.0e-8):
variableNames=['pressure_head']
nc=1
mass={0:{0:'nonlinear'}}
advection={0:{0:'nonlinear'}}
diffusion={0:{0:{0:'nonlinear'}}}
potential={0:{0:'u'}}
reaction={0:{0:'linear'}}
hamiltonian={}
self.getSeepageFace=getSeepageFace
self.gravity=gravity
self.rho = density
self.beta=beta
self.vgm_n_types = vgm_n_types
self.vgm_alpha_types = vgm_alpha_types
self.thetaR_types = thetaR_types
self.thetaSR_types = thetaSR_types
self.elementMaterialTypes = None
self.exteriorElementBoundaryTypes = None
self.materialTypes_q = None
self.materialTypes_ebq = None
self.materialTypes_ebqe = None
self.pc_eps = pc_eps
self.nd = nd
self.nMaterialTypes = len(thetaR_types)
self.q = {}; self.ebqe = {}; self.ebq = {}; self.ebq_global={}
#try to allow some flexibility in input of permeability/conductivity tensor
self.diagonal_conductivity = diagonal_conductivity
self.Ksw_types_in = Ksw_types
if self.diagonal_conductivity:
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i'))}
assert len(Ksw_types.shape) in [1,2], "if diagonal conductivity true then Ksw_types scalar or vector of diagonal entries"
#allow scalar input Ks
if len(Ksw_types.shape)==1:
self.Ksw_types = numpy.zeros((self.nMaterialTypes,self.nd),'d')
for I in range(self.nd):
self.Ksw_types[:,I] = Ksw_types
else:
self.Ksw_types = Ksw_types
else: #full
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i'))}
assert len(Ksw_types.shape) in [1,2], "if full tensor conductivity true then Ksw_types scalar or 'flattened' row-major representation of entries"
if len(Ksw_types.shape)==1:
self.Ksw_types = numpy.zeros((self.nMaterialTypes,self.nd**2),'d')
for I in range(self.nd):
self.Ksw_types[:,I*self.nd+I] = Ksw_types
else:
assert Ksw_types.shape[1] == self.nd**2
self.Ksw_types = Ksw_types
TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
def initializeMesh(self,mesh):
#import pdb
#pdb.set_trace()
self.elementMaterialTypes,self.exteriorElementBoundaryTypes,self.elementBoundaryTypes = BlockHeterogeneousCoefficients(mesh).initializeMaterialTypes()
#want element boundary material types for evaluating heterogeneity
#not boundary conditions
self.isSeepageFace = numpy.zeros((mesh.nExteriorElementBoundaries_global),'i')
if self.getSeepageFace is not None:
for ebNE in range(mesh.nExteriorElementBoundaries_global):
#mwf missing ebNE-->ebN?
ebN = mesh.exteriorElementBoundariesArray[ebNE]
#print "eb flag",mesh.elementBoundaryMaterialTypes[ebN]
#print self.getSeepageFace(mesh.elementBoundaryMaterialTypes[ebN])
self.isSeepageFace[ebNE] = self.getSeepageFace(mesh.elementBoundaryMaterialTypes[ebN])
#print self.isSeepageFace
def initializeElementQuadrature(self,t,cq):
self.materialTypes_q = self.elementMaterialTypes
self.q_shape = cq[('u',0)].shape
# cq['Ks'] = numpy.zeros(self.q_shape,'d')
# for k in range(self.q_shape[1]):
# cq['Ks'][:,k] = self.Ksw_types[self.elementMaterialTypes,0]
self.q[('vol_frac',0)] = numpy.zeros(self.q_shape,'d')
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
self.materialTypes_ebq = numpy.zeros(cebq[('u',0)].shape[0:2],'i')
self.ebq_shape = cebq[('u',0)].shape
for ebN_local in range(self.ebq_shape[1]):
self.materialTypes_ebq[:,ebN_local] = self.elementMaterialTypes
self.ebq[('vol_frac',0)] = numpy.zeros(self.ebq_shape,'d')
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
self.materialTypes_ebqe = self.exteriorElementBoundaryTypes
self.ebqe_shape = cebqe[('u',0)].shape
self.ebqe[('vol_frac',0)] = numpy.zeros(self.ebqe_shape,'d')
#
def evaluate(self,t,c):
if c[('u',0)].shape == self.q_shape:
materialTypes = self.materialTypes_q
vol_frac = self.q[('vol_frac',0)]
elif c[('u',0)].shape == self.ebqe_shape:
materialTypes = self.materialTypes_ebqe
vol_frac = self.ebqe[('vol_frac',0)]
elif c[('u',0)].shape == self.ebq_shape:
materialTypes = self.materialTypes_ebq
vol_frac = self.ebq[('vol_frac',0)]
else:
assert False, "no materialType found to match c[('u',0)].shape= %s " % c[('u',0)].shape
linearize = 0
self.conservativeHeadRichardsMualemVanGenuchten_sd_het(self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.rho,
self.beta,
self.gravity,
self.vgm_alpha_types,
self.vgm_n_types,
self.thetaR_types,
self.thetaSR_types,
self.Ksw_types,
c[('u',0)],
c[('m',0)],
c[('dm',0,0)],
c[('f',0)],
c[('df',0,0)],
c[('a',0,0)],
c[('da',0,0,0)],
vol_frac,
linearize,
self.pc_eps)
# self.conservativeHeadRichardsMualemVanGenuchtenHetEvaluateV2(materialTypes,
# self.rho,
# self.beta,
# self.gravity,
# self.vgm_alpha_types,
# self.vgm_n_types,
# self.thetaR_types,
# self.thetaSR_types,
# self.Ksw_types,
# c[('u',0)],
# c[('m',0)],
# c[('dm',0,0)],
# c[('f',0)],
# c[('df',0,0)],
# c[('a',0,0)],
# c[('da',0,0,0)])
#mwf debug
if (numpy.isnan(c[('da',0,0,0)]).any() or
numpy.isnan(c[('a',0,0)]).any() or
numpy.isnan(c[('df',0,0)]).any() or
numpy.isnan(c[('f',0)]).any() or
numpy.isnan(c[('u',0)]).any() or
numpy.isnan(c[('m',0)]).any() or
numpy.isnan(c[('dm',0,0)]).any()):
import pdb
pdb.set_trace()
# #mwf debug
# if c[('u',0)].shape == self.q_shape:
# c[('visPerm',0)]=c[('a',0,0)][:,:,0,0]
from proteus import cfemIntegrals
class RE_NCP1_OneLevelTransport(Transport.OneLevelTransport):
"""OneLevelTransport designed specifically for Non-Conforming
:math:`P^1` approximation to RE Approximation uses nodal
quadrature and upwinding
"""
def __init__(self,
uDict,
phiDict,
testSpaceDict,
matType,
dofBoundaryConditionsDict,
dofBoundaryConditionsSetterDict,
coefficients,
elementQuadrature,
elementBoundaryQuadrature,
fluxBoundaryConditionsDict=None,
advectiveFluxBoundaryConditionsSetterDict=None,
diffusiveFluxBoundaryConditionsSetterDictDict=None,
stressFluxBoundaryConditionsSetterDict=None,
stabilization=None,
shockCapturing=None,
conservativeFluxDict=None,
numericalFluxType=None,
TimeIntegrationClass=None,
massLumping=False,
reactionLumping=False,
options=None,
name='defaultName',
reuse_trial_and_test_quadrature=False,
sd = True,
movingDomain=False):#,
Transport.OneLevelTransport.__init__(self,
uDict,
phiDict,
testSpaceDict,
matType,
dofBoundaryConditionsDict,
dofBoundaryConditionsSetterDict,
coefficients,
elementQuadrature,
elementBoundaryQuadrature,
fluxBoundaryConditionsDict=fluxBoundaryConditionsDict,
advectiveFluxBoundaryConditionsSetterDict=advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict=diffusiveFluxBoundaryConditionsSetterDictDict,
stressFluxBoundaryConditionsSetterDict=stressFluxBoundaryConditionsSetterDict,
stabilization=stabilization,
shockCapturing=shockCapturing,
conservativeFluxDict=conservativeFluxDict,
numericalFluxType=numericalFluxType,
TimeIntegrationClass=TimeIntegrationClass,
massLumping=massLumping,
reactionLumping=reactionLumping,
options=options,
name=name,
reuse_trial_and_test_quadrature=reuse_trial_and_test_quadrature,
sd = sd,
movingDomain=movingDomain)
assert self.nQuadraturePoints_element == self.nSpace_global+1
#particular arrays needed for Darcy flow
self.q[('k_r',0,0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
self.q[('dk_r',0,0,0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
self.q[('f_lin',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')
self.q[('a_lin',0,0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),'d')
self.q[('k_r_up',0,0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
#need to check that points are the interpolation points too
# #quadrature dictionary for solution interpolation points
# self.n_u_ip_element = [u_j.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for u_j in self.u.values()]
# self.u_ip = {}
# #coefficients
# self.points_u_ip =StorageSet(shape=(self.mesh.nElements_global,self.n_u_ip_element[0],3))
# self.scalars_u_ip=StorageSet(shape=(self.mesh.nElements_global,self.n_u_ip_element[0]))
# self.vectors_u_ip=StorageSet(shape=(self.mesh.nElements_global,self.n_u_ip_element[0],self.nSpace_global))
# self.tensors_u_ip=StorageSet(shape={})
# #shape
# self.trial_shape_u_ip=StorageSet(shape={})
# self.test_shape_u_ip = StorageSet(shape={})
# self.test_shapeGradient_u_ip = StorageSet(shape={})
# self.trial_shapeGradient_u_ip= StorageSet(shape={})
# trial_shape_u_ip_duplicate = StorageSet(shape={})
# trial_shape_u_ip_duplicate_map = {}
# #
# # build sets
# #
# #u
# self.scalars_u_ip |= set([('u',ci) for ci in range(self.nc)])
# self.vectors_u_ip |= set([('grad(u)',ci) for ci in range(self.nc)])
# #trial
# self.trial_shape_u_ip |= set([('v',ci) for ci in self.unique_test_trial_range])
# trial_shape_u_ip_duplicate |= set([('v',ci) for ci in self.duplicate_test_trial_range])
# for ci in self.duplicate_test_trial_range:
# trial_shape_u_ip_duplicate_map[('v',ci)] = ('v',self.unique_test_trial_range[0])
# #test
# self.test_shape_u_ip |= set([('w',ci) for ci in range(self.nc)])
# self.test_shapeGradient_u_ip |= set([('grad(w)',ci) for ci in range(self.nc)])
# #m
# self.scalars_u_ip |= set([('m',ci) for ci in self.coefficients.mass.keys()])
# self.scalars_u_ip |= set([('mt',ci) for ci in self.coefficients.mass.keys()])
# for ci,cjDict in self.coefficients.mass.iteritems():
# self.scalars_u_ip |= set([('dm',ci,cj) for cj in cjDict.keys()])
# #f
# self.vectors_u_ip |= set([('f',ci) for ci in self.coefficients.advection.keys()])
# #special for Darcy flow
# self.vectors_u_ip |= set([('f_lin',ci) for ci in self.coefficients.advection.keys()])
# for ci,cjDict in self.coefficients.advection.iteritems():
# self.vectors_u_ip |= set([('df',ci,cj) for cj in cjDict.keys()])
# #a and phi
# self.vectors_u_ip |= set([('velocity',ci) for ci in self.coefficients.diffusion.keys()])
# self.scalars_u_ip |= set([('phi',ck) for ck in self.coefficients.potential.keys()])
# for ci,ckDict in self.coefficients.diffusion.iteritems():
# self.tensors_u_ip |= set([('a',ci,ck) for ck in ckDict.keys()])
# #special for Darcy flow
# self.tensors_u_ip |= set([('a_lin',ci,ck) for ck in ckDict.keys()])
# self.scalars_u_ip |= set([('k_r',ci,ck) for ck in self.ckDict.keys()])
# self.vectors_u_ip |= set([('grad(phi)',ck) for ck in ckDict.keys()])
# for ck,cjDict in ckDict.iteritems():
# self.tensors_u_ip |= set([('da',ci,ck,cj) for cj in cjDict.keys()])
# #special for Darcy flow
# self.scalars_u_ip |= set([('dk_r',ci,ck,cj) for cj in cjDict.keys()])
# #mwf trial-dup start
# #mwf orig self.trial_shape_quadrature |= set([('v',cj) for cj in cjDict.keys()])
# unique_cj = set.intersection(set(self.unique_test_trial_range),set(cjDict.keys()))
# duplicate_cj= set.intersection(set(self.duplicate_test_trial_range),set(cjDict.keys()))
# self.trial_shape_u_ip |= set([('v',cj) for cj in unique_cj])
# trial_shape_u_ip_duplicate |= set([('v',cj) for cj in duplicate_cj])
# #any reason to do this again
# for cj in self.duplicate_test_trial_range:
# trial_shape_u_ip_duplicate_map[('v',cj)] = ('v',self.unique_test_trial_range[0])
# #mwf trial-dup end
# for cj in self.coefficients.potential[ck].keys():
# self.scalars_u_ip |= set([('dphi',ck,cj)])
# #mwf what if miss nonlinearities in diffusion that don't match potentials?
# for comb in self.dphi.keys():
# self.scalars_u_ip |= set([('dphi',comb[0],comb[1])])
# #r
# self.scalars_u_ip |= set([('r',ci) for ci in self.coefficients.reaction.keys()])
# for ci,cjDict in self.coefficients.reaction.iteritems():
# self.scalars_u_ip |= set([('dr',ci,cj) for cj in cjDict.keys()])
# #mesh
# self.points_u_ip |= set([('x',ci) for ci in self.unique_test_trial_range])
# #
# memory()
# #
# for ci in self.unique_test_trial_range:
# self.u_ip[('x',ci)] = self.u[ci].femSpace.updateInterpolationPoints()
# for ci in self.duplicate_test_trial_range:
# self.u_ip[('x',ci)] = self.u_ip[('x',self.unique_test_trial_range[0])]
# #
# #allocations
# #
# self.scalars_u_ip.allocate(self.u_ip)
# #points
# self.points_u_ip.allocate(self.u_ip)
# #scalars
# self.scalars_u_ip.allocate(self.u_ip)
# #vectors
# self.vectors_u_ip.allocate(self.u_ip)
# #allocate tensors element quadrature
# for k in self.tensors_u_ip:
# if (self.sd
# and k[0] in ['a','da']
# and self.coefficients.sdInfo is not None
# and (k[1],k[2]) in self.coefficients.sdInfo.keys()):
# self.u_ip[k]=numpy.zeros(
# (self.mesh.nElements_global,
# self.n_u_ip_element[k[2]],
# self.coefficients.sdInfo[(k[1],k[2])][0][self.nSpace_global]),
# 'd')
# else:
# self.u_ip[k]=numpy.zeros(
# (self.mesh.nElements_global,
# self..n_u_ip_element[k[2]],
# self.nSpace_global,
# self.nSpace_global),
# 'd')
# #
# # shape
# #
# def makeAlias(sd,kw,vstring='v',wstring='w'):
# aliased=False
# kv0 = kw[0].replace(wstring,vstring)
# if len(kw) > 1:
# kv = (kv0,)+kw[1:]
# else:
# kv = kv0
# if self.testIsTrial:
# if kv in sd.keys():
# logEvent("Shallow copy of trial shape is being used for test shape %s " % kw[0],level=4)
# sd[kw] = sd[kv]
# aliased=True
# return aliased
# def makeAliasForComponent1(sd,k,entryStrings,refi=0):
# aliased=False
# #need to add combinatorial options
# ks = k[0]
# if ks not in entryStrings:
# return False
# k0 = (ks,)+(refi,)
# if self.reuse_test_trial_quadrature and refi is not None:
# if k0 in sd.keys():
# logEvent("Shallow copy of trial shape %s is being used for trial shape %s" % (k0,k),level=4)
# sd[k] = sd[k0]
# aliased=True
# return aliased
# #
# # element quadrature
# #
# #allocate trial shape functions
# for k in sorted(self.trial_shape_u_ip):
# if not makeAliasForComponent1(self.u_ip,k,['v'],refi=0):#need to handle multiple component combinations
# self.u_ip[k]=numpy.zeros(
# (self.mesh.nElements_global,
# self.n_u_ip_element,
# self.nDOF_trial_element[k[-1]]),
# 'd')
# for k in trial_shape_quadrature_duplicate:
# self.u_ip[k] = self.u_ip[trial_shape_quadrature_duplicate_map[k]]
# #mwf trial-dup end
# logEvent(memory("solution interpolation points, test/trial functions trial_shape","OneLevelTransport"),level=4)
# #allocate test shape functions
# for k in self.test_shape_quadrature:
# if not makeAlias(self.u_ip,k):
# self.u_ip[k]=numpy.zeros(
# (self.mesh.nElements_global,
# self.n_u_ip_element,
# self.nDOF_test_element[k[-1]]),
# 'd')
# logEvent(memory("solution interpolation points, test/trial functions test_shape","OneLevelTransport"),level=4)
# #allocate trial shape function gradients
# for k in sorted(self.trial_shapeGradient_quadrature):
# if not makeAliasForComponent1(self.u_ip,k,['grad(v)'],refi=0):#need to handle multiple component combinations
# self.u_ip[k]=numpy.zeros(
# (self.mesh.nElements_global,
# self.n_u_ip_element,
# self.nDOF_trial_element[k[-1]],
# self.nSpace_global),
# 'd')
# logEvent(memory("solution interpolation points, test/trial functions trial_shapeGradient","OneLevelTransport"),level=4)
# #allocate test shape function gradients
# for k in self.test_shapeGradient_quadrature:
# if not makeAlias(self.u_ip,k):
# self.u_ip[k]=numpy.zeros(
# (self.mesh.nElements_global,
# self.n_u_ip_element,
# self.nDOF_test_element[k[-1]],
# self.nSpace_global),
# 'd')
def calculateElementCoefficients(self):
"""calculate the nonlinear coefficients at the quadrature points and
nodes include interpolation points explicitly here now
"""
for cj in range(self.nc):
self.u[cj].getValues(self.q[('v',cj)],
self.q[('u',cj)])
if ('grad(u)',cj) in self.q:
self.u[cj].getGradientValues(self.q[('grad(v)',cj)],
self.q[('grad(u)',cj)])
#can skip this after first call
stfuncs.RE_NCP1_evaluateElementCoefficients_Linear(self.coefficients.rho,
self.coefficients.gravity,
self.coefficients.sdInfo[(0,0)][0],
self.coefficients.sdInfo[(0,0)][1],
self.coefficients.Ksw_types,
self.nSpace_global,
self.mesh.nElements_global,
self.mesh.nElementBoundaries_element,
self.mesh.elementNeighborsArray,
self.mesh.elementMaterialTypes,
self.q[('f_lin',0)],
self.q[('a_lin',0,0)])
stfuncs.RE_NCP1_evaluateElementCoefficients_VGM(self.coefficients.rho,
self.coefficients.beta,
self.coefficients.gravity,
self.coefficients.vgm_alpha_types,
self.coefficients.vgm_n_types,
self.coefficients.thetaR_types,
self.coefficients.thetaSR_types,
self.nSpace_global,
self.mesh.nElements_global,
self.mesh.nElementBoundaries_element,
self.mesh.elementNeighborsArray,
self.mesh.elementBarycentersArray,
self.mesh.elementMaterialTypes,
self.nDOF_trial_element[0],
self.u[0].femSpace.dofMap.l2g,
self.u[0].dof,
self.q['x'],
self.q[('u',0)],
self.q[('m',0)],
self.q[('dm',0,0)],
self.q[('r',0)],
self.q[('k_r',0,0)],
self.q[('dk_r',0,0,0)],
self.q[('k_r_up',0,0)])
if self.movingDomain and self.coefficients.movingDomain:
self.coefficients.updateToMovingDomain(self.timeIntegration.t,self.q)
if self.timeTerm:
self.timeIntegration.calculateElementCoefficients(self.q)
#cek need to clean up calculation of dimless numbers, might as well do it all the time and pass to subgrid error
#mwf figure out what to do with this
#what happens if stabilization didn't compute cfl?
for ci in range(self.nc):
#for two phase flow would need this
self.q[('dphi',ci,ci)].fill(1.0)
if self.sd:
cfemIntegrals.calculateDimensionlessNumbersADR_sd(self.mesh.nElements_global,
self.nQuadraturePoints_element,
self.nSpace_global,
self.coefficients.sdInfo[(ci,ci)][0],self.coefficients.sdInfo[(ci,ci)][1],
self.elementEffectiveDiametersArray,
self.q[('df',ci,ci)],
self.q[('a',ci,ci)],
self.q[('dphi',ci,ci)],
self.q[('dr',ci,ci)],
self.q[('dmt',ci,ci)],
self.q[('pe',ci)],
self.q[('cfl',ci)])
else:
cfemIntegrals.calculateDimensionlessNumbersADR(self.mesh.nElements_global,
self.nQuadraturePoints_element,
self.nSpace_global,
self.elementEffectiveDiametersArray,
self.q[('df',ci,ci)],
self.q[('a',ci,ci)],
self.q[('dphi',ci,ci)],
self.q[('dr',ci,ci)],
self.q[('dmt',ci,ci)],
self.q[('pe',ci)],
self.q[('cfl',ci)])
if self.shockCapturing is not None:
self.shockCapturing.calculateNumericalDiffusion(self.q)
def calculateElementResidual(self):
"""Calculate all the element residuals"""
import pdb
#mwf debug
#Transport.OneLevelTransport.calculateElementResidual(self)
#self.elementResidual_save = numpy.copy(self.elementResidual[0])
for ci in range(self.nc):
self.elementResidual[ci].fill(0.0)
#mwf debug
#pdb.set_trace()
stfuncs.RE_NCP1_getElementResidual(self.coefficients.gravity,
self.coefficients.sdInfo[(0,0)][0],
self.coefficients.sdInfo[(0,0)][1],
self.nSpace_global,
self.mesh.nElements_global,
self.mesh.nElementBoundaries_element,
self.mesh.elementNeighborsArray,
self.mesh.elementBarycentersArray,
self.nDOF_test_element[0],
self.q[('u',0)],
self.q[('grad(u)',0)],
self.q[('grad(w)',0)],
self.q['abs(det(J))'],
self.q[('m',0)],
self.q[('mt',0)],
self.q[('r',0)],
self.q[('k_r',0,0)],
self.q[('k_r_up',0,0)],
self.q[('f_lin',0)],
self.q[('a_lin',0,0)],
self.elementResidual[0])
#mwf debug
#pdb.set_trace()
def calculateElementJacobian(self):
import pdb
#Transport.OneLevelTransport.calculateElementJacobian(self)
#self.elementJacobian_save = numpy.copy(self.elementJacobian[0][0])
for ci in range(self.nc):
for cj in self.coefficients.stencil[ci]:
self.elementJacobian[ci][cj].fill(0.0)
#mwf debug
#pdb.set_trace()
stfuncs.RE_NCP1_getElementJacobian(self.coefficients.gravity,
self.coefficients.sdInfo[(0,0)][0],
self.coefficients.sdInfo[(0,0)][1],
self.nSpace_global,
self.mesh.nElements_global,
self.mesh.nElementBoundaries_element,
self.mesh.elementNeighborsArray,
self.mesh.elementBarycentersArray,
self.nDOF_test_element[0],
self.nDOF_trial_element[0],
self.q[('u',0)],
self.q[('grad(u)',0)],
self.q[('grad(w)',0)],
self.q[('grad(v)',0)],
self.q['abs(det(J))'],
self.q[('m',0)],
self.q[('dm',0,0)],
self.q[('mt',0)],
self.q[('dmt',0,0)],
self.q[('r',0)],
self.q[('k_r',0,0)],
self.q[('dk_r',0,0,0)],
self.q[('k_r_up',0,0)],
self.q[('f_lin',0)],
self.q[('a_lin',0,0)],
self.elementJacobian[0][0])
########################################
#Immiscible two-phase flow
########################################
class TwophaseDarcyFlow_base(TC_base):
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_vol_frac,generateSplineTables
"""base class for two-phase flow implementations
holds information for:
* fluid properties
* eos model tag
* psk model tag
* material heterogeneity information (number of types, lookup arrays)
default fluids are water and air assumed to be incompressible
"""
default_density_w_parameters = {'model':'Exponential',
'nParameters':3,
'rho_0':998.2,#Water kg/m^3
'psi_0':0.0,
'beta':0.0}
default_density_n_parameters = {'model':'Exponential',
'nParameters':3,
'rho_0':1.205,#Air kg/m^3
'psi_0':0.0,
'beta':0.0}
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
density_w_parameters=default_density_w_parameters,
density_n_parameters=default_density_n_parameters,
psk_model='VGM',
nMaterialTypes=1,
diagonal_conductivity=True,
nPSKsplineKnots=None): #otherwise Full
self.nd = nd
self.g = dimensionless_gravity
##fluid properties
#normalized density terms
self.rhon = old_div(density_n,density_w)
self.rhow = old_div(density_w,density_w)
self.b = old_div(density_n,density_w)
#normalized viscosities
self.muw= old_div(viscosity_w,viscosity_w)
self.mun= old_div(viscosity_n,viscosity_w)
#density eos options
self.density_types={'Exponential':1,
'IdealGas':2}
self.density_w_model = density_w_parameters['model']
self.density_n_model = density_n_parameters['model']
assert self.density_w_model in self.density_types
assert self.density_n_model in self.density_types
self.density_w_parameters = density_w_parameters
self.density_n_parameters = density_n_parameters
for params,rwork in zip([self.density_w_parameters,self.density_n_parameters],
['rwork_density_w','rwork_density_n']):
if params is not None:
if params['model'] == 'Exponential':
setattr(self,rwork,numpy.array([old_div(params['rho_0'],params['rho_0']),#normalize by phase density
params['psi_0'],
params['beta']],dtype='d'))
elif params['model'] == 'IdealGas':
setattr(self,rwork,numpy.array([params['T'],
old_div(params['W'],params['rho_0']),#normalize by phase density
params['R'],
params['headToPressure'],
old_div(params['rho_0'],params['rho_0']),#normalize by phase density
params['psi_0']],dtype='d'))
else:
assert False, 'TwophaseDarcy_base density params= %s not found ' % params
#
##psk relations and heterogeneity
self.psk_model=psk_model
self.psk_types={'simp':0,
'VGM':1,
'VGB':2,
'BCM':3,
'BCB':4,
'PSKspline':5}
self.psk_tolerances={'default':{'eps_small':1.0e-16},
'VGM':{'eps_small':1.0e-16,'ns_del':1.0e-8}}
for psk_model_id in self.psk_types:
if psk_model_id not in self.psk_tolerances:
self.psk_tolerances[psk_model_id] = self.psk_tolerances['default']
self.nPskTolerances=1
assert(self.psk_model in list(self.psk_types.keys()))
self.nMaterialTypes = nMaterialTypes
#psk rwork array lengths
self.nPSKsplineKnots = nPSKsplineKnots
self.iwork_psk = numpy.zeros((2,),'i')
if self.psk_model == 'simp':
self.nPskParams=2
elif self.psk_model in ['VGM','VGB']:
self.nPskParams=4
if self.psk_model == 'VGM':
self.nPskTolerances=2
elif self.psk_model in ['BCM','BCB']:
self.nPskParams=4
elif self.psk_model in ['PSKspline']:
assert self.nPSKsplineKnots is not None
self.nPskParams=self.nPSKsplineKnots*4
self.iwork_psk[0] = self.nPSKsplineKnots
self.iwork_psk[1] = self.nPskParams
self.rwork_psk = None
self.rwork_psk_tolerances = numpy.zeros((self.nPskTolerances,),'d')
for i,tol in enumerate(self.psk_tolerances[self.psk_model].values()):
self.rwork_psk_tolerances[i] = tol
self.diagonal_conductivity=diagonal_conductivity
self.q = {}; self.ebqe = {}; self.ebq = {}; self.ebq_global={}; self.ip = {}
def initializeMesh(self,mesh):
self.elementMaterialTypes,self.exteriorElementBoundaryTypes,self.elementBoundaryTypes = BlockHeterogeneousCoefficients(mesh).initializeMaterialTypes()
self.mesh = mesh #for debugging
def initializeElementQuadrature(self,t,cq):
self.materialTypes_q = self.elementMaterialTypes
self.q_shape = cq[('u',0)].shape
#
cq['psi_n'] = numpy.zeros(cq[('u',0)].shape,'d')
cq[('dpsi_n',0)] = numpy.zeros(cq[('u',0)].shape,'d')
cq[('dpsi_n',1)] = numpy.zeros(cq[('u',0)].shape,'d')
#for visualization
cq['Ks'] = numpy.zeros(self.q_shape,'d')
for k in range(self.q_shape[1]):
cq['Ks'][:,k] = self.Ksw_types[self.elementMaterialTypes,0]
for ci in range(2):
self.q[('vol_frac',ci)] = numpy.zeros(self.q_shape,'d')
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
self.materialTypes_ebq = numpy.zeros(cebq[('u',0)].shape[0:2],'i')
self.ebq_shape = cebq[('u',0)].shape
for ebN_local in range(self.ebq_shape[1]):
self.materialTypes_ebq[:,ebN_local] = self.elementMaterialTypes
cebq['psi_n'] = numpy.zeros(cebq[('u',0)].shape,'d')
cebq[('dpsi_n',0)] = numpy.zeros(cebq[('u',0)].shape,'d')
cebq[('dpsi_n',1)] = numpy.zeros(cebq[('u',0)].shape,'d')
for ci in range(2):
self.ebq[('vol_frac',ci)] = numpy.zeros(self.ebq_shape,'d')
if ('u',0) in cebq_global:
cebq_global['psi_n'] = numpy.zeros(cebq_global[('u',0)].shape,'d')
cebq_global[('dpsi_n',0)] = numpy.zeros(cebq_global[('u',0)].shape,'d')
cebq_global[('dpsi_n',1)] = numpy.zeros(cebq_global[('u',0)].shape,'d')
for ci in range(2):
self.ebq_global[('vol_frac',ci)] = numpy.zeros(cebq_global[('u',0)].shape,'d')
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
self.materialTypes_ebqe = self.exteriorElementBoundaryTypes
self.ebqe_shape = cebqe[('u',0)].shape
#
cebqe['psi_n'] = numpy.zeros(cebqe[('u',0)].shape,'d')
cebqe[('dpsi_n',0)] = numpy.zeros(cebqe[('u',0)].shape,'d')
cebqe[('dpsi_n',1)] = numpy.zeros(cebqe[('u',0)].shape,'d')
for ci in range(2):
self.ebqe[('vol_frac',ci)] = numpy.zeros(self.ebqe_shape,'d')
def initializeGeneralizedInterpolationPointQuadrature(self,t,cip):
self.materialTypes_ip = self.elementMaterialTypes
self.ip_shape = cip[('u',0)].shape
#
cip['psi_n'] = numpy.zeros(cip[('u',0)].shape,'d')
cip[('dpsi_n',0)] = numpy.zeros(cip[('u',0)].shape,'d')
cip[('dpsi_n',1)] = numpy.zeros(cip[('u',0)].shape,'d')
for ci in range(2):
self.ip[('vol_frac',ci)] = numpy.zeros(self.ip_shape,'d')
def setMaterialTypes(self,
Ksw_types=[1.0],
omega_types = [0.4],
Sw_max_types = [1.0],
Sw_min_types = [0.0],
bc_lambda_types = None,
bc_pd_types = None,
vg_alpha_types = None,
vg_m_types = None,
psk_spline_types = None):
self.nMaterialTypes=len(omega_types)
self.omega_types= omega_types
if self.psk_model == 'simp':
assert self.nPskParams == 2
self.rwork_psk = numpy.zeros((self.nMaterialTypes,self.nPskParams),'d')
for Sw_min,Sw_max,i in zip(Sw_min_types,Sw_max_types,list(range(self.nMaterialTypes))):
self.rwork_psk[i,0] = Sw_min
self.rwork_psk[i,1] = Sw_max
elif self.psk_model in ['VGM','VGB']:
assert(vg_alpha_types is not None and vg_m_types is not None)
assert self.nPskParams == 4
self.rwork_psk = numpy.zeros((self.nMaterialTypes,self.nPskParams),'d')
for Sw_min,Sw_max,vg_alpha,vg_m,i in zip(Sw_min_types,Sw_max_types,vg_alpha_types,vg_m_types,list(range(self.nMaterialTypes))):
self.rwork_psk[i,0] = Sw_min
self.rwork_psk[i,1] = Sw_max
self.rwork_psk[i,2] = vg_alpha
self.rwork_psk[i,3] = vg_m
elif self.psk_model in ['BCM','BCB']:
assert(bc_lambda_types is not None and bc_lambda_types is not None)
assert self.nPskParams == 4
self.rwork_psk = numpy.zeros((self.nMaterialTypes,self.nPskParams),'d')
for Sw_min,Sw_max,bc_pd,bc_lambda,i in zip(Sw_min_types,Sw_max_types,bc_pd_types,bc_lambda_types,list(range(self.nMaterialTypes))):
self.rwork_psk[i,0] = Sw_min
self.rwork_psk[i,1] = Sw_max
self.rwork_psk[i,2] = bc_pd
self.rwork_psk[i,3] = bc_lambda
elif self.psk_model in ['PSKspline']:
assert psk_spline_types is not None
self.rwork_psk = psk_spline_types
assert self.rwork_psk.shape[0] == self.nMaterialTypes*self.nPskParams
#try to allow some flexibility in input of permeability/conductivity tensor
#keep track of input for debugging now
self.Ksw_types_in = Ksw_types
if self.diagonal_conductivity:
assert len(Ksw_types.shape) in [1,2], "if diagonal conductivity true then Ksw_types scalar or vector of diagonal entries"
#allow scalar input Ks
if len(Ksw_types.shape)==1:
self.Ksw_types = numpy.zeros((self.nMaterialTypes,self.nd),'d')
for I in range(self.nd):
self.Ksw_types[:,I] = Ksw_types
else:
self.Ksw_types = Ksw_types
else: #full
assert len(Ksw_types.shape) in [1,2], "if full tensor conductivity true then Ksw_types scalar or or 'flattened' row-major representation of entries"
if len(Ksw_types.shape)==1:
self.Ksw_types = numpy.zeros((self.nMaterialTypes,self.nd**2),'d')
for I in range(self.nd):
self.Ksw_types[:,I*self.nd+I] = Ksw_types
else:
assert Ksw_types.shape[1] == self.nd**2
self.Ksw_types = Ksw_types
#not the best place for this
self.rwork_psk_tolerances = numpy.zeros((self.nPskTolerances,),'d')
for i,tol in enumerate(self.psk_tolerances[self.psk_model].values()):
self.rwork_psk_tolerances[i] = tol
# #mwf do some debugging
# assert self.elementMaterialTypes.max() < self.nMaterialTypes
# assert self.elementMaterialTypes.min() == 0
# assert self.exteriorElementBoundaryTypes.max() < self.nMaterialTypes
# assert self.exteriorElementBoundaryTypes.min() == 0
# assert self.elementBoundaryTypes.max() < self.nMaterialTypes
# assert self.elementBoundaryTypes.min() == 0
#
class TwophaseDarcy_fc(TwophaseDarcyFlow_base):
"""continuity equation for each phase
.. math::
\pd{m_w}{t} - \deld (\ten{a}_w \grad \phi_w) + r_w = 0
\pd{m_n}{t} - \deld (\ten{a}_n \grad \phi_n) + r_n = 0
"""
# (normalized) mass for each phase
# .. math::
# m_i = \theta_i \rho_i, i=w,n
# \theta_i = \theta_s S_i
# \rho_i = \varrho_i/\varrho_{i,0}, i=w,n
# where :math:`S_i` is the saturation for each phase,
# :math:`\varrho_{i}` is the density and :math:`\varrho_{i,0}` is a
# reference value
# (normalized) mass flux for each phase is
# .. math::
# \vec \sigma_i = - \ten{a}_{i}\grad \phi_i, i=w,n
# \ten{a}_i = \rho_i k_{r,i}/\hat{\mu}_i \ten{K}_s
# and the potentials are defined as
# .. math::
# \phi_{w} = \psi_{w} - \rho_w \vec g\cdot \vec x
# \phi_{n} = \psi_{n} - b \rho_n \vec g\cdot \vec x
# where
# .. math::
# \psi_{i} = p_{i}/|\vec g|\rho_{w,0}
# b = \varrho_{n,0}/\varrho_{w,0}
# \hat{mu}_{i} = \mu_i/\mu_{w}
# for the pressure of each phase, :math:`p_i`, and we have the
# capillary pressure relation
# .. math::
# \psi_{c} = \psi_{n} - \psi_{w}
# The dependent variables are :math:`S_w`, and :math:`\psi_w`
# Note :math:`S_n = 1-S_w`
# needs to be implemented
# .. math::
# r_i = r_i(\vec x,t)
# slight compressibility assumed in spatial gradients
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_fc_sd_het_matType
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
density_w_parameters=TwophaseDarcyFlow_base.default_density_w_parameters,
density_n_parameters=TwophaseDarcyFlow_base.default_density_n_parameters,
psk_model='VGM',
nMaterialTypes=1,
diagonal_conductivity=True,
spatialCompressibilityFlag=0,
nPSKsplineKnots=None):#0, slight compressibility, full compressibility otherwise
TwophaseDarcyFlow_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_parameters,
density_n_parameters=density_n_parameters,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
diagonal_conductivity=diagonal_conductivity,
nPSKsplineKnots=nPSKsplineKnots)
nc=2
variableNames=['s_w','psi_w']
mass = {0:{0:'linear',1:'nonlinear'},
1:{0:'nonlinear',1:'nonlinear'}}
advection = {}
#advection = {0:{0:'nonlinear',1:'nonlinear'},
# 1:{0:'nonlinear',1:'nonlinear'}} #don't need 1:nonlinear if using slight compressibility
hamiltonian={}
potential = {0:{1:'nonlinear',#actually this is just linear if we use slight compressibility
0:'nonlinear'},#don't really need saturation dependence for potential
1:{0:'nonlinear',
1:'nonlinear'}}
diffusion = {0:{0:{0:'nonlinear',
1:'nonlinear'}},
1:{1:{0:'nonlinear',
1:'nonlinear'}}}
reaction = {0:{0:'linear'},
1:{1:'linear'}}
if self.diagonal_conductivity:
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i')),
(1,1):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i'))}
else: #full
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i')),
(1,1):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i'))}
TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
self.spatialCompressibilityFlag = spatialCompressibilityFlag
def evaluate(self,t,c):
if c[('u',0)].shape == self.q_shape:
materialTypes = self.materialTypes_q
vol_frac_w = self.q[('vol_frac',0)]
vol_frac_n = self.q[('vol_frac',1)]
elif c[('u',0)].shape == self.ebqe_shape:
materialTypes = self.materialTypes_ebqe
vol_frac_w = self.ebqe[('vol_frac',0)]
vol_frac_n = self.ebqe[('vol_frac',1)]
elif c[('u',0)].shape == self.ip_shape:
materialTypes = self.materialTypes_ip
vol_frac_w = self.ip[('vol_frac',0)]
vol_frac_n = self.ip[('vol_frac',1)]
elif c[('u',0)].shape == self.ebq_shape:
materialTypes = self.materialTypes_ebq
vol_frac_w = self.ebq[('vol_frac',0)]
vol_frac_n = self.ebq[('vol_frac',1)]
else:
assert False, "no materialType found to match c[('u',0)].shape= %s " % c[('u',0)].shape
assert self.rwork_psk is not None
assert self.iwork_psk is not None
#mwf do some debugging
assert materialTypes.max() < self.nMaterialTypes
assert materialTypes.min() == 0
self.twophaseDarcy_fc_sd_het_matType(self.psk_types[self.psk_model],
self.density_types[self.density_w_model],
self.density_types[self.density_n_model],
self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.rwork_psk,self.iwork_psk,
self.rwork_psk_tolerances,
self.rwork_density_w,
self.rwork_density_n,
self.g[:self.nd],#todo get consistent on dimension setting
c['x'],
c[('u',0)],
c[('u',1)],
c[('m',0)],
c[('dm',0,0)],
c[('dm',0,1)],
c[('m',1)],
c[('dm',1,0)],
c[('dm',1,1)],
c['psi_n'],
c[('dpsi_n',0)],
c[('dpsi_n',1)],
c[('phi',0)],
c[('dphi',0,1)],
c[('phi',1)],
c[('dphi',1,1)],
c[('dphi',1,0)],
c[('a',0,0)],
c[('da',0,0,0)],
c[('da',0,0,1)],
c[('a',1,1)],
c[('da',1,1,0)],
c[('da',1,1,1)])
#mwf hack, need to put this in?
c[('dphi',0,0)].fill(0.0)
self.twophaseDarcy_vol_frac(materialTypes,
self.omega_types,
c[('u',0)],
vol_frac_w,
vol_frac_n)
#mwf debug
if (numpy.isnan(c[('da',0,0,0)]).any() or
numpy.isnan(c[('a',0,0)]).any() or
numpy.isnan(c[('da',1,1,0)]).any() or
numpy.isnan(c[('a',1,1)]).any() or
numpy.isnan(c[('u',0)]).any() or
numpy.isnan(c[('m',0)]).any() or
numpy.isnan(c[('dm',0,0)]).any()):
import pdb
pdb.set_trace()
#
class FullyCoupledMualemVanGenuchten(TwophaseDarcy_fc):
"""Formulation using phase continuity equations and Van-Genuchten
Mualem psk relations
Basically a convenience wrapper for fully coupled approximation
with volume-fraction based inputs as in Richards' equation
formulations
"""
def __init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
density_w_params,
density_n_params,
diagonal_conductivity=True,
vgm_small_eps=1.0e-16,
vgm_ns_del=1.0e-8,
use_spline = False,
nPSKsplineKnots=None):
self.use_spline = use_spline
psk_model = 'VGM'
if self.use_spline:
assert nPSKsplineKnots is not None
psk_model = 'PSKspline'
TwophaseDarcy_fc.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_params,
density_n_parameters=density_n_params,
psk_model=psk_model,
nMaterialTypes=len(thetaR_types),
diagonal_conductivity=diagonal_conductivity,
nPSKsplineKnots=nPSKsplineKnots)
for input in [vgm_n_types,vgm_alpha_types,thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
vgm_m_types = 1.0-old_div(1.0,vgm_n_types)
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
self.psk_tolerances['VGM']['eps_small']=vgm_small_eps
self.psk_tolerances['VGM']['ns_del'] =vgm_ns_del
if self.use_spline:
self.splineTableWork = numpy.zeros((self.nPskParams*self.nMaterialTypes),'d')
pskCalcFlag = 0 #formulation is f(Sw)
rwork_tmp = numpy.zeros((4,),'d')
rwork_tol_tmp = numpy.zeros((2,),'d')
rwork_tol_tmp[0] = max(vgm_small_eps,1.0e-5); rwork_tol_tmp[1] = vgm_ns_del
plot_splines = False
if plot_splines:
#mwf debug
import pdb
import matplotlib
from matplotlib import pylab
for i in range(self.nMaterialTypes):
sw_domain = numpy.zeros((self.nPSKsplineKnots,),'d')
sw_domain[1:-1] = numpy.linspace(Sw_min_types[i],Sw_max_types[i],num=self.nPSKsplineKnots-2)
sw_domain[0] = 0.99*Sw_min_types[i]; sw_domain[-1] = 1.01*Sw_max_types[i]
materialOffset = i*self.nPskParams
rwork_tmp[0] = Sw_min_types[i]; rwork_tmp[1] = Sw_max_types[i];
rwork_tmp[2] = vgm_alpha_types[i]; rwork_tmp[3]= vgm_m_types[i]
self.generateSplineTables(self.psk_types['VGM'],
materialOffset,
pskCalcFlag,
sw_domain,
rwork_tmp,
self.iwork_psk,
rwork_tol_tmp,
self.splineTableWork)
#mwf debug
if plot_splines:
pdb.set_trace()
pylab.figure(1); pylab.plot(self.splineTableWork[materialOffset:materialOffset+self.nPSKsplineKnots],
self.splineTableWork[materialOffset+self.nPSKsplineKnots:materialOffset+2*self.nPSKsplineKnots],'b--')
pylab.figure(2); pylab.plot(self.splineTableWork[materialOffset:materialOffset+self.nPSKsplineKnots],
self.splineTableWork[materialOffset+2*self.nPSKsplineKnots:materialOffset+3*self.nPSKsplineKnots],'r--')
pylab.figure(2); pylab.plot(self.splineTableWork[materialOffset:materialOffset+self.nPSKsplineKnots],
self.splineTableWork[materialOffset+3*self.nPSKsplineKnots:materialOffset+4*self.nPSKsplineKnots],'g--')
pylab.show()
#
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types,
psk_spline_types=self.splineTableWork)
else:
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types,
vg_alpha_types=vgm_alpha_types,
vg_m_types=vgm_m_types)
class FullyCoupledSimplePSKs(TwophaseDarcy_fc):
"""Formulation using phase continuity equations and 'simp' quadratic
rel-perm, linear capillary pressure psk relations
"""
def __init__(self,
nd,
Ksw_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
density_w_params,
density_n_params,
diagonal_conductivity=True):
TwophaseDarcy_fc.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_params,
density_n_parameters=density_n_params,
psk_model='simp',
nMaterialTypes=len(thetaR_types),
diagonal_conductivity=diagonal_conductivity)
for input in [thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types)
class TwophaseDarcy_fc_pp(TwophaseDarcyFlow_base):
"""continuity equation for each phase
.. math::
\pd{m_w}{t} - \deld (\ten{a}_w \grad \phi_w) + r_w = 0
\pd{m_n}{t} - \deld (\ten{a}_n \grad \phi_n) + r_n = 0
"""
# (normalized) mass for each phase :math:`m_i = \theta_i \rho_i,
# i=w,n`
# .. math::
# \theta_i = \theta_s S_i
# \rho_i = \varrho_i/\varrho_{i,0}, i=w,n
# where :math:`S_i` is the saturation for each phase,
# :math:`\varrho_{i}` is the density and :math:`\varrho_{i,0}` is a
# reference value
# (normalized) mass flux for each phase is
# .. math::
# \vec \sigma_i = - \ten{a}_{i}\grad \phi_i, i=w,n
# \ten{a}_i = \rho_i k_{r,i}/\hat{\mu}_i \ten{K}_s
# and the potentials are defined as
# .. math::
# \phi_{w} = \psi_{w} - \rho_w \vec g\cdot \vec x
# \phi_{n} = \psi_{n} - b \rho_n \vec g\cdot \vec x
# where
# .. math::
# \psi_{i} = p_{i}/|\vec g|\rho_{w,0}
# b = \varrho_{n,0}/\varrho_{w,0}
# \hat{mu}_{i} = \mu_i/\mu_{w}
# for the pressure of each phase, p_i, and we have the capillary pressure relation
# .. math::
# \psi_{c} = \psi_{n} - \psi_{w}
# The dependent variables are
# .. math::
# \psi_w, and \psi_c
# Note :math:`S_n = 1-S_w`
# needs to be implemented
# .. math::
# r_i = r_i(\vec x,t)
# slight compressibility assumed in spatial gradients
# """
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_fc_pp_sd_het_matType
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
density_w_parameters=TwophaseDarcyFlow_base.default_density_w_parameters,
density_n_parameters=TwophaseDarcyFlow_base.default_density_n_parameters,
psk_model='VGM',
nMaterialTypes=1,
diagonal_conductivity=True,
nPSKsplineKnots=None):
TwophaseDarcyFlow_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_parameters,
density_n_parameters=density_n_parameters,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
diagonal_conductivity=diagonal_conductivity,
nPSKsplineKnots=nPSKsplineKnots)
nc=2
variableNames=['psi_w','psi_c']
mass = {0:{0:'nonlinear',1:'nonlinear'},
1:{0:'nonlinear',1:'nonlinear'}}
advection = {}
hamiltonian={}
potential = {0:{1:'nonlinear',#actually this is just linear if we use slight compressibility
0:'nonlinear'},#don't really need saturation dependence for potential
1:{0:'nonlinear',
1:'nonlinear'}}
diffusion = {0:{0:{0:'nonlinear',
1:'nonlinear'}},
1:{1:{0:'nonlinear',
1:'nonlinear'}}}
reaction = {0:{0:'linear'},
1:{1:'linear'}}
if self.diagonal_conductivity:
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i')),
(1,1):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i'))}
else: #full
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i')),
(1,1):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i'))}
TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
def initializeElementQuadrature(self,t,cq):
TwophaseDarcyFlow_base.initializeElementQuadrature(self,t,cq)
#
cq['sw'] = numpy.zeros(cq[('u',0)].shape,'d')
cq[('dpsi_n',0)].fill(1.0)
cq[('dpsi_n',1)].fill(1.0)
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
TwophaseDarcyFlow_base.initializeElementBoundaryQuadrature(self,t,cebq,cebq_global)
if ('u',0) in cebq:
cebq['sw'] = numpy.zeros(cebq[('u',0)].shape,'d')
cebq[('dpsi_n',0)].fill(1.0)
cebq[('dpsi_n',1)].fill(1.0)
if ('u',0) in cebq_global:
cebq_global['sw'] = numpy.zeros(cebq_global[('u',0)].shape,'d')
cebq_global[('dpsi_n',0)].fill(1.0)
cebq_global[('dpsi_n',1)].fill(1.0)
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
TwophaseDarcyFlow_base.initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe)
cebqe['sw'] = numpy.zeros(cebqe[('u',0)].shape,'d')
cebqe[('dpsi_n',0)].fill(1.0)
cebqe[('dpsi_n',1)].fill(1.0)
def initializeGeneralizedInterpolationPointQuadrature(self,t,cip):
TwophaseDarcyFlow_base.initializeGeneralizedInterpolationPointQuadrature(self,t,cip)
cip['sw'] = numpy.zeros(cip[('u',0)].shape,'d')
cip[('dpsi_n',0)].fill(1.0)
cip[('dpsi_n',1)].fill(1.0)
def evaluate(self,t,c):
if c[('u',0)].shape == self.q_shape:
materialTypes = self.materialTypes_q
vol_frac_w = self.q[('vol_frac',0)]
vol_frac_n = self.q[('vol_frac',1)]
elif c[('u',0)].shape == self.ebqe_shape:
materialTypes = self.materialTypes_ebqe
vol_frac_w = self.ebqe[('vol_frac',0)]
vol_frac_n = self.ebqe[('vol_frac',1)]
elif c[('u',0)].shape == self.ip_shape:
materialTypes = self.materialTypes_ip
vol_frac_w = self.ip[('vol_frac',0)]
vol_frac_n = self.ip[('vol_frac',1)]
elif c[('u',0)].shape == self.ebq_shape:
materialTypes = self.materialTypes_ebq
vol_frac_w = self.ebq[('vol_frac',0)]
vol_frac_n = self.ebq[('vol_frac',1)]
else:
assert False, "no materialType found to match c[('u',0)].shape= %s " % c[('u',0)].shape
assert self.rwork_psk is not None
#mwf do some debugging
assert materialTypes.max() < self.nMaterialTypes
assert materialTypes.min() == 0
self.twophaseDarcy_fc_pp_sd_het_matType(self.psk_types[self.psk_model],
self.density_types[self.density_w_model],
self.density_types[self.density_n_model],
self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.rwork_psk,self.iwork_psk,
self.rwork_psk_tolerances,
self.rwork_density_w,
self.rwork_density_n,
self.g[:self.nd],#todo get consistent on dimension setting
c['x'],
c[('u',0)],
c[('u',1)],
c['sw'],
c[('m',0)],
c[('dm',0,0)],
c[('dm',0,1)],
c[('m',1)],
c[('dm',1,0)],
c[('dm',1,1)],
c[('phi',0)],
c[('dphi',0,0)],
c[('phi',1)],
c[('dphi',1,0)],
c[('dphi',1,1)],
c[('a',0,0)],
c[('da',0,0,0)],
c[('da',0,0,1)],
c[('a',1,1)],
c[('da',1,1,0)],
c[('da',1,1,1)])
#todo put this back in cpp eval
#c['psi_n'][:] = c[('u',0)]
#c['psi_n'] += c[('u',1)]
#c[('dpsi_n',0)].fill(1.0)
#c[('dpsi_n',1)].fill(1.0)
c['psi_n'][:] = c[('phi',1)]
c[('dpsi_n',0)][:] = c[('dphi',1,0)]
c[('dpsi_n',1)][:] = c[('dphi',1,1)]
self.twophaseDarcy_vol_frac(materialTypes,
self.omega_types,
c['sw'],
vol_frac_w,
vol_frac_n)
#mwf debug
if (numpy.isnan(c[('da',0,0,0)]).any() or
numpy.isnan(c[('a',0,0)]).any() or
numpy.isnan(c[('da',1,1,0)]).any() or
numpy.isnan(c[('a',1,1)]).any() or
numpy.isnan(c[('u',0)]).any() or
numpy.isnan(c[('m',0)]).any() or
numpy.isnan(c[('dm',0,0)]).any()):
import pdb
pdb.set_trace()
#
class FullyCoupledPressurePressureMualemVanGenuchten(TwophaseDarcy_fc_pp):
"""Formulation using phase continuity equations, pressure-pressure
formulation and Van-Genuchten Mualem psk relations
Basically a convenience wrapper for fully coupled approximation
with volume-fraction based inputs as in Richards' equation
formulations
"""
def __init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
density_w_params,
density_n_params,
diagonal_conductivity=True,
vgm_small_eps=1.0e-16,
vgm_ns_del=1.0e-8,
use_spline = False,
nPSKsplineKnots=None):
self.use_spline = use_spline
psk_model = 'VGM'
if self.use_spline:
assert nPSKsplineKnots is not None
psk_model = 'PSKspline'
TwophaseDarcy_fc_pp.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_params,
density_n_parameters=density_n_params,
psk_model=psk_model,
nMaterialTypes=len(thetaR_types),
diagonal_conductivity=diagonal_conductivity,
nPSKsplineKnots=nPSKsplineKnots)
for input in [vgm_n_types,vgm_alpha_types,thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
vgm_m_types = 1.0-old_div(1.0,vgm_n_types)
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
self.psk_tolerances['VGM']['eps_small']=vgm_small_eps
self.psk_tolerances['VGM']['ns_del'] =vgm_ns_del
if self.use_spline:
from . import cpskRelations
self.splineTableWork = numpy.zeros((self.nPskParams*self.nMaterialTypes),'d')
pskCalcFlag = 1 #formulation is f(psic)
rwork_tmp = numpy.zeros((4,),'d')
rwork_tol_tmp = numpy.zeros((2,),'d')
rwork_tol_tmp[0] = max(vgm_small_eps,1.0e-5); rwork_tol_tmp[1] = vgm_ns_del
#mwf come up with this as an input
psic_min_types=numpy.zeros((self.nMaterialTypes,),'d')
psic_max_types=numpy.zeros((self.nMaterialTypes,),'d')
for i in range(self.nMaterialTypes):
psic,krw,krn,dpsic,dkrw,dkrn= cpskRelations.vgm_calc_from_sw(Sw_min_types[i],Sw_min_types[i],Sw_max_types[i],
vgm_alpha_types[i],vgm_m_types[i],vgm_small_eps,vgm_ns_del)
psic_max_types[i]=3.0#psic*0.1
psic_min_types[i]=max(vgm_small_eps,1.0e-5)
#mwf debug
import pdb
import matplotlib
from matplotlib import pylab
for i in range(self.nMaterialTypes):
psic_domain = numpy.zeros((self.nPSKsplineKnots,),'d')
psic_domain[1:-1] = numpy.linspace(psic_min_types[i],psic_max_types[i],num=self.nPSKsplineKnots-2)
psic_domain[0] = 0.99*psic_min_types[i]; psic_domain[-1] = 1.01*psic_max_types[i]
materialOffset = i*self.nPskParams
rwork_tmp[0] = Sw_min_types[i]; rwork_tmp[1] = Sw_max_types[i];
rwork_tmp[2] = vgm_alpha_types[i]; rwork_tmp[3]= vgm_m_types[i]
self.generateSplineTables(self.psk_types['VGM'],
materialOffset,
pskCalcFlag,
psic_domain,
rwork_tmp,
self.iwork_psk,
rwork_tol_tmp,
self.splineTableWork)
#mwf debug
pdb.set_trace()
pylab.figure(1); pylab.plot(self.splineTableWork[materialOffset:materialOffset+self.nPSKsplineKnots],
self.splineTableWork[materialOffset+self.nPSKsplineKnots:materialOffset+2*self.nPSKsplineKnots],'b--')
pylab.figure(2); pylab.plot(self.splineTableWork[materialOffset:self.nPSKsplineKnots],
self.splineTableWork[materialOffset+2*self.nPSKsplineKnots:materialOffset+3*self.nPSKsplineKnots],'r--')
pylab.figure(2); pylab.plot(self.splineTableWork[materialOffset:self.nPSKsplineKnots],
self.splineTableWork[materialOffset+3*self.nPSKsplineKnots:materialOffset+4*self.nPSKsplineKnots],'g--')
pylab.show()
#
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types,
psk_spline_types=self.splineTableWork)
else:
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types,
vg_alpha_types=vgm_alpha_types,
vg_m_types=vgm_m_types)
class FullyCoupledPressurePressureSimplePSKs(TwophaseDarcy_fc_pp):
"""Formulation using phase continuity equations and 'simp' quadratic
rel-perm, linear capillary pressure psk relations
pressure-pressure formulation
"""
def __init__(self,
nd,
Ksw_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
density_w_params,
density_n_params,
diagonal_conductivity=True):
TwophaseDarcy_fc_pp.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_params,
density_n_parameters=density_n_params,
psk_model='simp',
nMaterialTypes=len(thetaR_types),
diagonal_conductivity=diagonal_conductivity)
for input in [thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types)
###########
class TwophaseDarcy_split_pressure_base(TwophaseDarcyFlow_base):
"""Base class for 'pressure' or total flow conservation equation in
fractional flow formulations. This
The primary functionality of the base class is to handle synchronization with a 'saturation' model to
get the saturation, S_w, and capillary pressure (head), \psi_c, variables
"""
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
density_w_parameters=TwophaseDarcyFlow_base.default_density_w_parameters,
density_n_parameters=TwophaseDarcyFlow_base.default_density_n_parameters,
psk_model='VGM',
nMaterialTypes=1,
nSatModel=1,
diagonal_conductivity=True,
#for debugging
swConstant=1.0,
capillaryDiffusionScaling=1.0):
TwophaseDarcyFlow_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_parameters,
density_n_parameters=density_n_parameters,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
diagonal_conductivity=diagonal_conductivity)
self.nc=1
self.nd=nd
#
self.nSatModel=nSatModel
#for debugging
self.swConstant=swConstant
self.capillaryDiffusionScaling=capillaryDiffusionScaling
def attachModels(self,modelList):
if self.nSatModel is None:
print('Warning TwophaseDarcy_split_pressure_base nSatModel is None returning in attachModels')
return
#not ideal, but need a way to force nonlinear potential to be evaluated in saturation model
modelList[self.nSatModel].calculateElementCoefficients()
self.q_s_w = modelList[self.nSatModel].q[('u',0)]
self.ebqe_s_w = modelList[self.nSatModel].ebqe[('u',0)]
if ('u',0) in modelList[self.nSatModel].ebq:
self.ebq_s_w = modelList[self.nSatModel].ebq[('u',0)]
#mwf need to check
assert ('u',0) in modelList[self.nSatModel].phi_ip
assert self.ip_s_w.shape == modelList[self.nSatModel].phi_ip[('u',0)].shape
self.ip_s_w = modelList[self.nSatModel].phi_ip[('u',0)]
#mwf hack 04/03/09 skip
#assert modelList[self.nSatModel].phi_ip.has_key(('grad(phi)',0))
self.ip_grad_psic = None#modelList[self.nSatModel].phi_ip[('grad(phi)',0)]
#mwf end ip stuff
self.q_grad_psic = modelList[self.nSatModel].q[('grad(phi)',0)]
self.ebqe_grad_psic = modelList[self.nSatModel].ebqe[('grad(phi)',0)]
if ('grad(phi)',0) in modelList[self.nSatModel].ebq:
self.ebq_grad_psic = modelList[self.nSatModel].ebq[('grad(phi)',0)]
self.q_psic = modelList[self.nSatModel].q[('phi',0)]
self.ebqe_psic= modelList[self.nSatModel].ebqe[('phi',0)]
if ('phi',0) in modelList[self.nSatModel].ebq:
self.ebq_psic = modelList[self.nSatModel].ebq[('phi',0)]
assert ('phi',0) in modelList[self.nSatModel].phi_ip
assert self.ip_psic.shape == modelList[self.nSatModel].phi_ip[('phi',0)].shape
self.ip_psic = modelList[self.nSatModel].phi_ip[('phi',0)]
#
self.q_grad_sw = modelList[self.nSatModel].q[('grad(u)',0)]
self.ebqe_grad_sw = modelList[self.nSatModel].ebqe[('grad(u)',0)]
if ('grad(u)',0) in modelList[self.nSatModel].ebq:
self.ebq_grad_sw = modelList[self.nSatModel].ebq[('grad(u)',0)]
def initializeElementQuadrature(self,t,cq):
TwophaseDarcyFlow_base.initializeElementQuadrature(self,t,cq)
#set up dummy values in case we're not running the other model
self.q_s_w = numpy.zeros(cq[('u',0)].shape,'d')
self.q_s_w.fill(self.swConstant)
self.q_grad_psic = numpy.zeros(cq[('f',0)].shape,'d')
self.q_psic = numpy.zeros(cq[('u',0)].shape,'d')
self.q_grad_sw = numpy.zeros(cq[('f',0)].shape,'d')
#mwf not sure if this is ok
cq['psi_n'] = numpy.zeros(cq[('u',0)].shape,'d')
cq[('dpsi_n',0)] = numpy.ones(cq[('u',0)].shape,'d')
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
TwophaseDarcyFlow_base.initializeElementBoundaryQuadrature(self,t,cebq,cebq_global)
#set up dummy values in case we're not running the other model
self.ebq_s_w = numpy.zeros(cebq[('u',0)].shape,'d')
self.ebq_s_w.fill(self.swConstant)
self.ebq_grad_psic = numpy.zeros(cebq[('f',0)].shape,'d')
self.ebq_psic = numpy.zeros(cebq[('u',0)].shape,'d')
self.ebq_grad_sw = numpy.zeros(cebq[('f',0)].shape,'d')
if ('u',0) in cebq:
cebq['psi_n'] = numpy.zeros(cebq[('u',0)].shape,'d')
cebq[('dpsi_n',0)] = numpy.ones(cebq[('u',0)].shape,'d')
if ('u',0) in cebq_global:
cebq_global['psi_n'] = numpy.zeros(cebq_global[('u',0)].shape,'d')
cebq_global[('dpsi_n',0)] = numpy.ones(cebq_global[('u',0)].shape,'d')
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
TwophaseDarcyFlow_base.initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe)
#set up dummy values in case we're not running the other model
self.ebqe_s_w = numpy.zeros(cebqe[('u',0)].shape,'d')
self.ebqe_s_w.fill(self.swConstant)
self.ebqe_grad_psic = numpy.zeros(cebqe[('f',0)].shape,'d')
self.ebqe_psic = numpy.zeros(cebqe[('u',0)].shape,'d')
self.ebqe_grad_sw = numpy.zeros(cebqe[('f',0)].shape,'d')
cebqe['psi_n'] = numpy.zeros(cebqe[('u',0)].shape,'d')
cebqe[('dpsi_n',0)] = numpy.ones(cebqe[('u',0)].shape,'d')
def initializeGeneralizedInterpolationPointQuadrature(self,t,cip):
TwophaseDarcyFlow_base.initializeGeneralizedInterpolationPointQuadrature(self,t,cip)
#set up dummy values in case we're not running the other model
self.ip_s_w = numpy.zeros(cip[('u',0)].shape,'d')
self.ip_s_w.fill(self.swConstant)
self.ip_grad_psic = numpy.zeros(cip[('f',0)].shape,'d')
self.ip_psic = numpy.zeros(cip[('u',0)].shape,'d')
self.ip_grad_sw = numpy.zeros(cip[('f',0)].shape,'d')
cip['psi_n'] = numpy.zeros(cip[('u',0)].shape,'d')
cip[('dpsi_n',0)] = numpy.ones(cip[('u',0)].shape,'d')
#
class TwophaseDarcy_split_saturation_base(TwophaseDarcyFlow_base):
"""Base class for aqueous phase mass conservation equation
(saturation equation) in a fractional flow formulation.
The primary responsibility of the base class is to handle
synchronization with the 'pressure' equation to get the total flow
velocity variable, q_t, and aqueous phase pressure head, psi_w
"""
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
density_w_parameters=TwophaseDarcyFlow_base.default_density_w_parameters,
density_n_parameters=TwophaseDarcyFlow_base.default_density_n_parameters,
psk_model='VGM',
nMaterialTypes=1,
nPressModel=0,
diagonal_conductivity=True,
#for debugging
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
advectionScaling=1.0):
TwophaseDarcyFlow_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_parameters,
density_n_parameters=density_n_parameters,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
diagonal_conductivity=diagonal_conductivity)
self.nc=1
self.nd=nd
self.nPressModel=nPressModel
#for debugging
self.qScalarConstant=1.0
self.capillaryDiffusionScaling=capillaryDiffusionScaling
self.advectionScaling=advectionScaling
def attachModels(self,modelList):
if self.nPressModel is None:
print('Warning TwophaseDarcy_split_saturation_base nPressModel is None returning in attachModels')
return
self.flowModel = modelList[self.nPressModel]
#
self.q_q_t = modelList[self.nPressModel].q[('velocity',0)]
self.ebqe_q_t = modelList[self.nPressModel].ebqe[('velocity',0)]
if ('velocity',0) in modelList[self.nPressModel].ebq:
self.ebq_q_t = modelList[self.nPressModel].ebq[('velocity',0)]
#do we really need other model values for q_t in potential calculation?
assert self.ip_psiw.shape == modelList[self.nPressModel].phi_ip[('u',0)].shape
self.ip_psiw = modelList[self.nPressModel].phi_ip[('u',0)]
self.q_psiw = modelList[self.nPressModel].q[('u',0)]
self.ebqe_psiw = modelList[self.nPressModel].ebqe[('u',0)]
if ('u',0) in modelList[self.nPressModel].ebq:
self.ebq_psiw = modelList[self.nPressModel].ebq[('u',0)]
def initializeElementQuadrature(self,t,cq):
TwophaseDarcyFlow_base.initializeElementQuadrature(self,t,cq)
#set up dummy values in case we're not running the other model
self.q_q_t = numpy.zeros(cq[('f',0)].shape,'d')
self.q_q_t[:] = self.qScalarConstant
self.q_psiw = numpy.ones(cq[('u',0)].shape,'d')
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
TwophaseDarcyFlow_base.initializeElementBoundaryQuadrature(self,t,cebq,cebq_global)
#set up dummy values in case we're not running the other model
self.ebq_q_t = numpy.zeros(cebq[('f',0)].shape,'d')
self.ebq_q_t[:] = self.qScalarConstant
self.ebq_psiw = numpy.ones(cebq[('u',0)].shape,'d')
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
TwophaseDarcyFlow_base.initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe)
#set up dummy values in case we're not running the other model
self.ebqe_q_t = numpy.zeros(cebqe[('f',0)].shape,'d')
self.ebqe_q_t[:] = self.qScalarConstant
self.ebqe_psiw = numpy.ones(cebqe[('u',0)].shape,'d')
def initializeGeneralizedInterpolationPointQuadrature(self,t,cip):
TwophaseDarcyFlow_base.initializeGeneralizedInterpolationPointQuadrature(self,t,cip)
#set up dummy values in case we're not running the other model
self.ip_q_t = numpy.zeros(cip[('f',0)].shape,'d')
self.ip_q_t[:] = self.qScalarConstant
self.ip_psiw = numpy.ones(cip[('u',0)].shape,'d')
class TwophaseDarcy_incompressible_split_pressure(TwophaseDarcy_split_pressure_base):
"""Total flow conservation equation in an incompressible fractional
flow formulation
"""
# Saturation equation
# .. math::
# \begin{eqnarray}
# \label{eq:2p-ff-mb-w}
# \pd{m_w}{t} + \deld\left(\vec f_w - \ten{a}_{w}\grad \phi_w \right) + r_w &=& 0
# \end{eqnarray}
# and total flow conservation equation
# \begin{eqnarray}
# \label{eq:2p-ff-mb-m}
# \deld\left(\vec f_m - \ten{a}_m \grad \phi_m \right) + r_m &=& 0
# \end{eqnarray}
# \begin{table}[ht]
# \caption{Coefficient definitions for Two-phase flow, \eqn{2p-ff-mb-w} and \eqn{2p-ff-mb-m}
# \label{tab:2p-ff-coef-1}
# }
# \begin{tabular}{cc}
# \hline
# Var. & Def. \\
# \hline
# $u_w$ & $S_w $ \\
# $u_n$ & $\psi_w $ \\
# $\phi_w$ & $\psi_c$ \\
# $\phi_m$ & $\psi_w$ \\
# $m_w$ & $\theta_s \rho_{w}S_w$ \\
# $\vec f_w$ & $\gvec{\sigma}_t F_w - \ten{K}_s\lambda_wF_n\left(b\rho_n - \rho_w\right)\vec g_u$ \\
# $\vec f_m$ &$-\ten{K}_s\lambda_tF_n\grad \psi_c + \ten{K}_s\vec g\lambda_t\left[\rho_w + F_n\left(b\rho_n - \rho_w\right)\right]$\\
# $\ten a_{w,w}$ & $-\lambda_wF_n \ten{K}_{s}$ \\
# $\ten a_{m,m}$ & $\lambda_t \ten{K}_{s}$ \\
# \hline
# $F_i $ & $\lambda_i/\lambda_t $, $i=w,n$ \\
# $\grad \psi_c $ & $\od{\psi_c}{S_w}\grad S_w $ \\
# $\gvec {\sigma}_t$ & $\gvec \sigma_w + \gvec \sigma_n$\\
# \hline
# \end{tabular}
# \end{table}
# Here :math:`S_i` is the saturation for each phase,
# :math:`\varrho_{i}` is the density and :math:`\varrho_{i,0}` is a
# reference value
# (normalized) mass flux for each phase is
# .. math::
# \vec \sigma_i = - \ten{a}_{i}\grad \phi_i, i=w,n
# and
# .. math::
# \psi_{i} = p_{i}/|\vec g|\rho_{w,0}
# b = \varrho_{n,0}/\varrho_{w,0}
# \hat{mu}_{i} = \mu_i/\mu_{w}
# for the pressure of each phase, p_i, and we have the capillary pressure relation
# .. math::
# \psi_{c} = \psi_{n} - \psi_{w}
# The dependent variables are :math:`S_w`, and :math:`\psi_w`
# Note :math:`S_n = 1-S_w`
# needs to be implemented
# .. math::
# r_m = r_m(\vec x,t)
# """
# TODO:
#
# Figure out if really need to evaluate potential interpolation points
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_incompressible_split_sd_pressure_het_matType
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
psk_model='VGM',
nMaterialTypes=1,
nSatModel=1,
diagonal_conductivity=True,
#for debugging
swConstant=1.0,
capillaryDiffusionScaling=1.0):
TwophaseDarcy_split_pressure_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
nSatModel=nSatModel,
diagonal_conductivity=diagonal_conductivity,
swConstant=swConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling)
variableNames=['psi_w']
#these are only nonlinear for compressible flow
mass = {}
advection = {0:{0:'linear'}}
hamiltonian={}
diffusion = {0:{0: {0:'linear'}}}
potential = {0:{0: 'u'}}
reaction = {0:{0:'linear'}}
if self.diagonal_conductivity:
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i'))}
else: #full
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i'))}
TC_base.__init__(self,
self.nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
def evaluate(self,t,c):
if c[('u',0)].shape == self.q_s_w.shape:
materialTypes = self.materialTypes_q
s_w = self.q_s_w
grad_psic = self.q_grad_psic
grad_sw = self.q_grad_sw
c['psi_n']= numpy.copy(self.q_psic)
c['psi_n'] += c[('u',0)]
vol_frac_w = self.q[('vol_frac',0)]
vol_frac_n = self.q[('vol_frac',1)]
#mwf debug
#import pdb
#pdb.set_trace()
#for eN in range(c['x'].shape[0]):
# for k in range(c['x'].shape[1]):
# if (1.25 <= c['x'][eN,k,0] and c['x'][eN,k,0] <= 2.25 and
# 1.25 <= c['x'][eN,k,1] and c['x'][eN,k,1] <= 1.5):
#mwf major hack to test initialization
#grad_psic[eN,k,0] = 0.0
#grad_psic[eN,k,1] = 1.0
#
# print "split press eval inside lens eN=%s k=%s x=%s grad_psiw= %s grad_psic= %s s_w= %s " % (eN,k,c['x'][eN,k],c[('grad(u)',0)][eN,k],grad_psic[eN,k],s_w[eN,k])
# else:
# print "split press eval outside lens eN=%s k=%s x=%s grad_psiw= %s grad_psic= %s s_w= %s " % (eN,k,c['x'][eN,k],c[('grad(u)',0)][eN,k],grad_psic[eN,k],s_w[eN,k])
#import pdb
#pdb.set_trace()
elif c[('u',0)].shape == self.ebqe_s_w.shape:
materialTypes = self.materialTypes_ebqe
s_w = self.ebqe_s_w
grad_psic = self.ebqe_grad_psic
grad_sw = self.ebqe_grad_sw
c['psi_n']= numpy.copy(self.ebqe_psic)
c['psi_n'] += c[('u',0)]
vol_frac_w = self.ebqe[('vol_frac',0)]
vol_frac_n = self.ebqe[('vol_frac',1)]
elif c[('u',0)].shape == self.ip_s_w.shape:
c['psi_n']= numpy.copy(self.ip_psic)
c['psi_n'] += c[('u',0)]
#mwf hack 04/03/09 skip
return
materialTypes = self.materialTypes_ip
s_w = self.ip_s_w
grad_psic = self.ip_grad_psic
grad_sw = self.ip_grad_sw
vol_frac_w = self.ip[('vol_frac',0)]
vol_frac_n = self.ip[('vol_frac',1)]
else:
assert c[('u',0)].shape == self.ebq_s_w.shape
materialTypes = self.materialTypes_ebq
s_w = self.ebq_s_w
grad_psic = self.ebq_grad_psic
grad_sw = self.ebq_grad_sw
c['psi_n']= numpy.copy(self.ebq_psic)
c['psi_n'] += c[('u',0)]
vol_frac_w = self.ebq[('vol_frac',0)]
vol_frac_n = self.ebq[('vol_frac',1)]
assert self.rwork_psk is not None
self.twophaseDarcy_incompressible_split_sd_pressure_het_matType(self.psk_types[self.psk_model],
self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.capillaryDiffusionScaling,
self.rwork_psk,self.iwork_psk,
self.rwork_psk_tolerances,
self.rwork_density_w,
self.rwork_density_n,
self.g[:self.nd],
s_w,
grad_psic,
c[('f',0)],
c[('a',0,0)])
self.twophaseDarcy_vol_frac(materialTypes,
self.omega_types,
s_w,
vol_frac_w,
vol_frac_n)
#mwf debug
if (numpy.isnan(c[('a',0,0)]).any() or
numpy.isnan(c[('f',0)]).any() or
numpy.isnan(c[('u',0)]).any()):
import pdb
pdb.set_trace()
#mwf debug
#import pdb
#pdb.set_trace()
if c[('u',0)].shape == self.q_s_w.shape:
c['Ks']=c[('a',0,0)][:,:,0]
#
class TwophaseDarcy_incompressible_split_saturation(TwophaseDarcy_split_saturation_base):
"""Aqueous phase mass conservation equation (saturation equation) in
an incompressible fractional flow formulation
"""
# Saturation equation
# .. math::
# \begin{eqnarray}
# \label{eq:2p-ff-mb-w}
# \pd{m_w}{t} + \deld\left(\vec f_w - \ten{a}_{w}\grad \phi_w \right) + r_w &=& 0
# \end{eqnarray}
# and total flow conservation equation
# \begin{eqnarray}
# \label{eq:2p-ff-mb-m}
# \deld\left(\vec f_m - \ten{a}_m \grad \phi_m \right) + r_m &=& 0
# \end{eqnarray}
# \begin{table}[ht]
# \caption{Coefficient definitions for Two-phase flow, \eqn{2p-ff-mb-w} and \eqn{2p-ff-mb-m}
# \label{tab:2p-ff-coef-1}
# }
# \begin{tabular}{cc}
# \hline
# Var. & Def. \\
# \hline
# $u_w$ & $S_w $ \\
# $u_n$ & $\psi_w $ \\
# $\phi_w$ & $\psi_c$ \\
# $\phi_m$ & $\psi_w$ \\
# $m_w$ & $\theta_s \rho_{w}S_w$ \\
# $\vec f_w$ & $\gvec{\sigma}_t F_w - \ten{K}_s\lambda_wF_n\left(b\rho_n - \rho_w\right)\vec g_u$ \\
# $\vec f_m$ &$-\ten{K}_s\lambda_tF_n\grad \psi_c + \ten{K}_s\vec g\lambda_t\left[\rho_w + F_n\left(b\rho_n - \rho_w\right)\right]$\\
# $\ten a_{w,w}$ & $-\lambda_wF_n \ten{K}_{s}$ \\
# $\ten a_{m,m}$ & $\lambda_t \ten{K}_{s}$ \\
# \hline
# $F_i $ & $\lambda_i/\lambda_t $, $i=w,n$ \\
# $\grad \psi_c $ & $\od{\psi_c}{S_w}\grad S_w $ \\
# $\gvec {\sigma}_t$ & $\gvec \sigma_w + \gvec \sigma_n$\\
# \hline
# \end{tabular}
# \end{table}
# Here :math:`S_i` is the saturation for each phase, :math:`\varrho_{i}` is the density and
# :math:`\varrho_{i,0}` is a reference value
# (normalized) mass flux for each phase is
# .. math::
# \vec \sigma_i = - \ten{a}_{i}\grad \phi_i, i=w,n
# and
# .. math::
# \psi_{i} = p_{i}/|\vec g|\rho_{w,0}
# b = \varrho_{n,0}/\varrho_{w,0}
# \hat{mu}_{i} = \mu_i/\mu_{w}
# for the pressure of each phase, p_i, and we have the capillary pressure relation
# .. math::
# \psi_{c} = \psi_{n} - \psi_{w}
# The dependent variables are :math:`S_w`, and :math:`\psi_w`
# Note :math:`S_n = 1-S_w`
# needs to be implemented
# .. math::
# r_m = r_m(\vec x,t)
# """
# TODO:
#
# Figure out if really need to evaluate potential interpolation points
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_incompressible_split_sd_saturation_het_matType
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
psk_model='VGM',
nMaterialTypes=1,
nPressModel=0,
diagonal_conductivity=True,
#for debugging
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
advectionScaling=1.0):
TwophaseDarcy_split_saturation_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
nPressModel=nPressModel,
diagonal_conductivity=diagonal_conductivity,
qScalarConstant=qScalarConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling,
advectionScaling=advectionScaling)
variableNames=['s_w']
mass = {0:{0:'linear'}}
advection = {0:{0:'nonlinear'}}
hamiltonian={}
diffusion = {0:{0:{0:'nonlinear'}}}
potential = {0:{0: 'nonlinear'}}
reaction = {0:{0:'linear'}}
if self.diagonal_conductivity:
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i'))}
else: #full
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i'))}
TC_base.__init__(self,
self.nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
def evaluate(self,t,c):
if c[('f',0)].shape == self.q_q_t.shape:
materialTypes = self.materialTypes_q
q_t = self.q_q_t
psiw = self.q_psiw
vol_frac_w = self.q[('vol_frac',0)]
vol_frac_n = self.q[('vol_frac',1)]
elif c[('f',0)].shape == self.ebqe_q_t.shape:
materialTypes = self.materialTypes_ebqe
q_t = self.ebqe_q_t
psiw = self.ebqe_psiw
vol_frac_w = self.ebqe[('vol_frac',0)]
vol_frac_n = self.ebqe[('vol_frac',1)]
elif c[('f',0)].shape == self.ip_q_t.shape:
materialTypes = self.materialTypes_ip
q_t = self.ip_q_t
psiw = self.ip_psiw
vol_frac_w = self.ip[('vol_frac',0)]
vol_frac_n = self.ip[('vol_frac',1)]
else:
assert c[('f',0)].shape == self.ebq_q_t.shape
materialTypes = self.materialTypes_ebq
q_t = self.ebq_q_t
psiw = self.ebq_psiw
vol_frac_w = self.ebq[('vol_frac',0)]
vol_frac_n = self.ebq[('vol_frac',1)]
assert self.rwork_psk is not None
#mwf debug
#import pdb
#pdb.set_trace()
self.twophaseDarcy_incompressible_split_sd_saturation_het_matType(self.psk_types[self.psk_model],
self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.capillaryDiffusionScaling,
self.advectionScaling,
self.rwork_psk,self.iwork_psk,
self.rwork_psk_tolerances,
self.rwork_density_w,
self.rwork_density_n,
self.g[:self.nd],
q_t,
c[('u',0)],
c[('m',0)],
c[('dm',0,0)],
c[('phi',0)],
c[('dphi',0,0)],
c[('f',0)],
c[('df',0,0)],
c[('a',0,0)],
c[('da',0,0,0)])
self.twophaseDarcy_vol_frac(materialTypes,
self.omega_types,
c[('u',0)],
vol_frac_w,
vol_frac_n)
#mwf debug
# if c[('f',0)].shape == self.ip_q_t.shape:
# for eN in range(c['x'].shape[0]):
# for k in range(c['x'].shape[1]):
# if (1.25 <= c['x'][eN,k,0] and c['x'][eN,k,0] <= 2.25 and
# 1.25 <= c['x'][eN,k,1] and c['x'][eN,k,1] <= 1.5):
# print "split sat eval inside lens eN=%s k=%s x=%s matID=%s psic= %s s_w= %s " % (eN,k,c['x'][eN,k],materialTypes[eN],c[('phi',0)][eN,k],c[('u',0)][eN,k])
# if (1.25 < c['x'][eN,k,0] and c['x'][eN,k,0] < 2.25 and
# 1.25 < c['x'][eN,k,1] and c['x'][eN,k,1] < 1.5):
# assert materialTypes[eN] == 1
# else:
# print "split sat eval outside lens eN=%s k=%s x=%s matID=%s psic= %s s_w= %s " % (eN,k,c['x'][eN,k],materialTypes[eN],c[('phi',0)][eN,k],c[('u',0)][eN,k])
# assert materialTypes[eN] == 0
#import pdb
#pdb.set_trace()
#mwf debug
if (numpy.isnan(c[('da',0,0,0)]).any() or
numpy.isnan(c[('a',0,0)]).any() or
numpy.isnan(c[('phi',0)]).any() or
numpy.isnan(c[('dphi',0,0)]).any() or
numpy.isnan(c[('df',0,0)]).any() or
numpy.isnan(c[('f',0)]).any() or
numpy.isnan(c[('u',0)]).any() or
numpy.isnan(c[('m',0)]).any() or
numpy.isnan(c[('dm',0,0)]).any()):
import pdb
pdb.set_trace()
class TwophaseDarcy_compressible_split_pressure(TwophaseDarcy_split_pressure_base):
"""Total flow conservation equation in a split compressible
fractional flow formulation Right now, the options are
compressibility for the non-wetting phase (compressibleN) :
compressiblityFlag=1
compressibility for both phases but with the slight compressiblity
assumption (spatial density gradients are negligible)
compressiblityFlag=2
"""
# Saturation equation
# .. math::
# \begin{eqnarray}
# \label{eq:2p-c-ff-mb-w}
# \pd{m_w}{t} + \deld\left(\vec f_w - \ten{a}_{w}\grad \phi_w \right) + r_w &=& 0
# \end{eqnarray}
# and total flow conservation equation
# \begin{eqnarray}
# \label{eq:2p-c-ff-mb-m}
# \pd{m_m}{t} + \deld\left(\vec f_m - \ten{a}_m \grad \phi_m \right) + r_m &=& 0
# \end{eqnarray}
# \begin{table}[ht]
# \caption{Coefficient definitions for compressible two-phase fractional flow formulation flow, \eqn{2p-c-ff-mb-w} and \eqn{2p-c-ff-mb-m}
# \label{tab:2p-c-ff-coef-1}
# }
# \begin{tabular}{cc}
# \hline
# Var. & Def. \\
# \hline
# $u_w$ & $S_w $ \\
# $u_n$ & $\psi_w $ \\
# $\phi_w$ & $\psi_c$ \\
# $\phi_m$ & $\psi_w$ \\
# $m_w$ & $\theta_s \rho_{w}S_w$ \\
# $m_m$ & $\theta_s \rho_{w}S_w + \theta_s\rho_{n}(1-S_w)$ \\
# $\vec f_w^{\dagger}$ & $\gvec{\sigma}_t F_w - \ten{K}_s\lambda_wF_n\left(b\rho_n - \rho_w\right)\vec g_u$ \\
# $\vec f_m^{\dagger}$ &$-\ten{K}_s\lambda_tF_n\grad \psi_c + \ten{K}_s\vec g\lambda_t\left[\rho_w + F_n\left(b\rho_n - \rho_w\right)\right]$\\
# $\ten a_{w,w}^{\dagger}$ & $-\lambda_wF_n \ten{K}_{s}$ \\
# $\ten a_{m,m}^{\dagger}$ & $\lambda_t \ten{K}_{s}$ \\
# \hline
# $F_i $ & $\lambda_i/\lambda_t $, $i=w,n$ \\
# $\grad \psi_c $ & $\od{\psi_c}{S_w}\grad S_w $ \\
# $\gvec {\sigma}_t$ & $\gvec \sigma_w + \gvec \sigma_n$\\
# \hline
# \multicolumn{2}{l}
# {$\dagger$ : $\rho_i \equiv 1$, $\lambda_i= k_{r,i}/\hat{\mu}_{i}$ for slight compressibility assumption}
# \end{tabular}
# \end{table}
# Here :math:`S_i` is the saturation for each phase, :math:`\varrho_{i}` is the
# density and :math:`\varrho_{i,0}` is a reference value
# (normalized) mass flux for each phase is
# .. math::
# \vec \sigma_i = - \ten{a}_{i}\grad \phi_i, i=w,n
# and
# .. math::
# \psi_{i} = p_{i}/|\vec g|\rho_{w,0}
# b = \varrho_{n,0}/\varrho_{w,0}
# \hat{mu}_{i} = \mu_i/\mu_{w}
# \lambda_i= \rho k_{r,i}/\hat{\mu}_{i}
# \lambda_t= \lambda_n + \lambda_w
# for the pressure of each phase, p_i, and we have the capillary pressure relation
# .. math::
# \psi_{c} = \psi_{n} - \psi_{w}
# The dependent variables are :math:`S_w`, and :math:`\psi_w`
# Note :math:`S_n = 1-S_w`
# needs to be implemented
# .. math::
# r_m = r_m(\vec x,t)
# """
# TODO:
#
# Figure out if really need to evaluate potential interpolation points
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_slightCompressible_split_sd_pressure_het_matType
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_compressibleN_split_sd_pressure_het_matType
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
density_w_parameters=TwophaseDarcyFlow_base.default_density_w_parameters,
density_n_parameters=TwophaseDarcyFlow_base.default_density_n_parameters,
psk_model='VGM',
nMaterialTypes=1,
nSatModel=1,
compressibilityFlag=2,
diagonal_conductivity=True,
#for debugging
swConstant=1.0,
capillaryDiffusionScaling=1.0):
TwophaseDarcy_split_pressure_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_parameters,
density_n_parameters=density_n_parameters,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
nSatModel=nSatModel,
diagonal_conductivity=diagonal_conductivity,
swConstant=swConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling)
self.compressibilityFlag=compressibilityFlag
variableNames=['psi_w']
#
mass = {0:{0:'nonlinear'}}
advection = {0:{0:'nonlinear'}}
hamiltonian={}
diffusion = {0:{0: {0:'nonlinear'}}}
potential = {0:{0: 'u'}}
reaction = {0:{0:'linear'}}
if self.compressibilityFlag == 2:
advection = {0:{0:'linear'}}
diffusion = {0:{0: {0:'linear'}}}
if self.diagonal_conductivity:
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i'))}
else: #full
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i'))}
TC_base.__init__(self,
self.nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
def evaluate(self,t,c):
if c[('u',0)].shape == self.q_s_w.shape:
materialTypes = self.materialTypes_q
s_w = self.q_s_w
grad_psic = self.q_grad_psic
c['psi_n']= numpy.copy(self.q_psic)
c['psi_n'] += c[('u',0)]
vol_frac_w = self.q[('vol_frac',0)]
vol_frac_n = self.q[('vol_frac',1)]
elif c[('u',0)].shape == self.ebqe_s_w.shape:
materialTypes = self.materialTypes_ebqe
s_w = self.ebqe_s_w
grad_psic = self.ebqe_grad_psic
c['psi_n']= numpy.copy(self.ebqe_psic)
c['psi_n'] += c[('u',0)]
vol_frac_w = self.ebqe[('vol_frac',0)]
vol_frac_n = self.ebqe[('vol_frac',1)]
elif c[('u',0)].shape == self.ip_s_w.shape:
c['psi_n']= numpy.copy(self.ip_psic)
c['psi_n'] += c[('u',0)]
#mwf hack 04/03/09 skip
return
materialTypes = self.materialTypes_ip
s_w = self.ip_s_w
grad_psic = self.ip_grad_psic
vol_frac_w = self.ip[('vol_frac',0)]
vol_frac_n = self.ip[('vol_frac',1)]
else:
assert c[('u',0)].shape == self.ebq_s_w.shape
materialTypes = self.materialTypes_ebq
s_w = self.ebq_s_w
grad_psic = self.ebq_grad_psic
c['psi_n']= numpy.copy(self.ebq_psic)
c['psi_n'] += c[('u',0)]
vol_frac_w = self.ebq[('vol_frac',0)]
vol_frac_n = self.ebq[('vol_frac',1)]
assert self.rwork_psk is not None
if self.compressibilityFlag == 2:
self.twophaseDarcy_slightCompressible_split_sd_pressure_het_matType(self.psk_types[self.psk_model],
self.density_types[self.density_w_model],
self.density_types[self.density_n_model],
self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.capillaryDiffusionScaling,
self.rwork_psk,self.iwork_psk,
self.rwork_psk_tolerances,
self.rwork_density_w,
self.rwork_density_n,
self.g[:self.nd],
s_w,
c[('u',0)],
c['psi_n'],
grad_psic,
c[('m',0)],
c[('dm',0,0)],
c[('f',0)],
c[('a',0,0)])
else:
self.twophaseDarcy_compressibleN_split_sd_pressure_het_matType(self.psk_types[self.psk_model],
self.density_types[self.density_w_model],
self.density_types[self.density_n_model],
self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.capillaryDiffusionScaling,
self.rwork_psk,self.iwork_psk,
self.rwork_psk_tolerances,
self.rwork_density_w,
self.rwork_density_n,
self.g[:self.nd],
s_w,
c[('u',0)],
c['psi_n'],
grad_psic,
c[('m',0)],
c[('dm',0,0)],
c[('f',0)],
c[('a',0,0)])
self.twophaseDarcy_vol_frac(materialTypes,
self.omega_types,
s_w,
vol_frac_w,
vol_frac_n)
#mwf debug
if (numpy.isnan(c[('m',0)]).any() or
numpy.isnan(c[('dm',0,0)]).any() or
numpy.isnan(c[('a',0,0)]).any() or
numpy.isnan(c[('f',0)]).any() or
numpy.isnan(c[('u',0)]).any()):
import pdb
pdb.set_trace()
#mwf debug
#import pdb
#pdb.set_trace()
if c[('u',0)].shape == self.q_s_w.shape:
c['Ks']=c[('a',0,0)][:,:,0]
class TwophaseDarcy_compressible_split_saturation(TwophaseDarcy_split_saturation_base):
"""Aqueous phase mass conservation equation (saturation equation) in a
compressible fractional flow formulation
Right now, the options are
compressibility for the non-wetting phase (compressibleN) :
compressiblityFlag=1
compressibility for both phases but with the slight compressiblity
assumption (spatial density gradients are negligible)
compressiblityFlag=2
"""
# Saturation equation
# .. math::
# \begin{eqnarray}
# \label{eq:2p-c-ff-mb-w}
# \pd{m_w}{t} + \deld\left(\vec f_w - \ten{a}_{w}\grad \phi_w \right) + r_w &=& 0
# \end{eqnarray}
# and total flow conservation equation
# .. math::
# \begin{eqnarray}
# \label{eq:2p-c-ff-mb-m}
# \pd{m_m}{t} + \deld\left(\vec f_m - \ten{a}_m \grad \phi_m \right) + r_m &=& 0
# \end{eqnarray}
# \begin{table}[ht]
# \caption{Coefficient definitions for compressible two-phase fractional flow formulation flow, \eqn{2p-c-ff-mb-w} and \eqn{2p-c-ff-mb-m}
# \label{tab:2p-c-ff-coef-1}
# }
# \begin{tabular}{cc}
# \hline
# Var. & Def. \\
# \hline
# $u_w$ & $S_w $ \\
# $u_n$ & $\psi_w $ \\
# $\phi_w$ & $\psi_c$ \\
# $\phi_m$ & $\psi_w$ \\
# $m_w$ & $\theta_s \rho_{w}S_w$ \\
# $m_m$ & $\theta_s \rho_{w}S_w + \theta_s\rho_{n}(1-S_w)$ \\
# $\vec f_w^{\dagger}$ & $\gvec{\sigma}_t F_w - \ten{K}_s\lambda_wF_n\left(b\rho_n - \rho_w\right)\vec g_u$ \\
# $\vec f_m^{\dagger}$ &$-\ten{K}_s\lambda_tF_n\grad \psi_c + \ten{K}_s\vec g\lambda_t\left[\rho_w + F_n\left(b\rho_n - \rho_w\right)\right]$\\
# $\ten a_{w,w}^{\dagger}$ & $-\lambda_wF_n \ten{K}_{s}$ \\
# $\ten a_{m,m}^{\dagger}$ & $\lambda_t \ten{K}_{s}$ \\
# \hline
# $F_i $ & $\lambda_i/\lambda_t $, $i=w,n$ \\
# $\grad \psi_c $ & $\od{\psi_c}{S_w}\grad S_w $ \\
# $\gvec {\sigma}_t$ & $\gvec \sigma_w + \gvec \sigma_n$\\
# \hline
# \multicolumn{2}{l}
# {$\dagger$ : $\rho_i \equiv 1$, $\lambda_i= k_{r,i}/\hat{\mu}_{i}$ for slight compressibility assumption}
# \end{tabular}
# \end{table}
# Here :math:`S_i` is the saturation for each phase, :math:`\varrho_{i}` is the density and
# :math:`\varrho_{i,0}` is a reference value
# (normalized) mass flux for each phase is
# .. math::
# \vec \sigma_i = - \ten{a}_{i}\grad \phi_i, i=w,n
# and
# .. math::
# \psi_{i} = p_{i}/|\vec g|\rho_{w,0}
# b = \varrho_{n,0}/\varrho_{w,0}
# \hat{mu}_{i} = \mu_i/\mu_{w}
# \lambda_i= \rho k_{r,i}/\hat{\mu}_{i}
# \lambda_t= \lambda_n + \lambda_w
# for the pressure of each phase, p_i, and we have the capillary pressure relation
# .. math::
# \psi_{c} = \psi_{n} - \psi_{w}
# The dependent variables are :math:`S_w`, and :math:`\psi_w`
# Note :math:`S_n = 1-S_w`
# needs to be implemented
# .. math::
# r_m = r_m(\vec x,t)
# """
# TODO:
#
# Figure out if really need to evaluate potential interpolation points
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_slightCompressible_split_sd_saturation_het_matType
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_compressibleN_split_sd_saturation_het_matType
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
density_w_parameters=TwophaseDarcyFlow_base.default_density_w_parameters,
density_n_parameters=TwophaseDarcyFlow_base.default_density_n_parameters,
psk_model='VGM',
nMaterialTypes=1,
nPressModel=0,
compressibilityFlag=2,
diagonal_conductivity=True,
#for debugging
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
advectionScaling=1.0):
TwophaseDarcy_split_saturation_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_parameters,
density_n_parameters=density_n_parameters,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
nPressModel=nPressModel,
diagonal_conductivity=diagonal_conductivity,
qScalarConstant=qScalarConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling,
advectionScaling=advectionScaling)
self.compressibilityFlag=compressibilityFlag
variableNames=['s_w']
mass = {0:{0:'linear'}}
advection = {0:{0:'nonlinear'}}
hamiltonian={}
diffusion = {0:{0:{0:'nonlinear'}}}
potential = {0:{0: 'nonlinear'}}
reaction = {0:{0:'linear'}}
if self.diagonal_conductivity:
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i'))}
else: #full
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i'))}
TC_base.__init__(self,
self.nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
def evaluate(self,t,c):
if c[('f',0)].shape == self.q_q_t.shape:
materialTypes = self.materialTypes_q
q_t = self.q_q_t
psiw = self.q_psiw
vol_frac_w = self.q[('vol_frac',0)]
vol_frac_n = self.q[('vol_frac',1)]
elif c[('f',0)].shape == self.ebqe_q_t.shape:
materialTypes = self.materialTypes_ebqe
q_t = self.ebqe_q_t
psiw = self.ebqe_psiw
vol_frac_w = self.ebqe[('vol_frac',0)]
vol_frac_n = self.ebqe[('vol_frac',1)]
elif c[('f',0)].shape == self.ip_q_t.shape:
materialTypes = self.materialTypes_ip
q_t = self.ip_q_t
psiw = self.ip_psiw
vol_frac_w = self.ip[('vol_frac',0)]
vol_frac_n = self.ip[('vol_frac',1)]
else:
assert c[('f',0)].shape == self.ebq_q_t.shape
materialTypes = self.materialTypes_ebq
q_t = self.ebq_q_t
psiw = self.ebq_psiw
vol_frac_w = self.ebq[('vol_frac',0)]
vol_frac_n = self.ebq[('vol_frac',1)]
assert self.rwork_psk is not None
#mwf debug
#import pdb
#pdb.set_trace()
if self.compressibilityFlag == 2:
self.twophaseDarcy_slightCompressible_split_sd_saturation_het_matType(self.psk_types[self.psk_model],
self.density_types[self.density_w_model],
self.density_types[self.density_n_model],
self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.capillaryDiffusionScaling,
self.advectionScaling,
self.rwork_psk,self.iwork_psk,
self.rwork_psk_tolerances,
self.rwork_density_w,
self.rwork_density_n,
self.g[:self.nd],
q_t,
psiw,
c[('u',0)],
c[('m',0)],
c[('dm',0,0)],
c[('phi',0)],
c[('dphi',0,0)],
c[('f',0)],
c[('df',0,0)],
c[('a',0,0)],
c[('da',0,0,0)])
else:
self.twophaseDarcy_compressibleN_split_sd_saturation_het_matType(self.psk_types[self.psk_model],
self.density_types[self.density_w_model],
self.density_types[self.density_n_model],
self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.capillaryDiffusionScaling,
self.advectionScaling,
self.rwork_psk,self.iwork_psk,
self.rwork_psk_tolerances,
self.rwork_density_w,
self.rwork_density_n,
self.g[:self.nd],
q_t,
psiw,
c[('u',0)],
c[('m',0)],
c[('dm',0,0)],
c[('phi',0)],
c[('dphi',0,0)],
c[('f',0)],
c[('df',0,0)],
c[('a',0,0)],
c[('da',0,0,0)])
self.twophaseDarcy_vol_frac(materialTypes,
self.omega_types,
c[('u',0)],
vol_frac_w,
vol_frac_n)
#material types fi
#mwf debug
if (numpy.isnan(c[('da',0,0,0)]).any() or
numpy.isnan(c[('a',0,0)]).any() or
numpy.isnan(c[('phi',0)]).any() or
numpy.isnan(c[('dphi',0,0)]).any() or
numpy.isnan(c[('df',0,0)]).any() or
numpy.isnan(c[('f',0)]).any() or
numpy.isnan(c[('u',0)]).any() or
numpy.isnan(c[('m',0)]).any() or
numpy.isnan(c[('dm',0,0)]).any()):
import pdb
pdb.set_trace()
###########
class TwophaseDarcy_split_pp_pressure_base(TwophaseDarcyFlow_base):
"""Base class for 'pressure' or total flow conservation equation in
fractional flow formulations. This
The primary functionality of the base class is to handle
synchronization with a 'saturation' model to get the saturation,
:math:`S_w`, and capillary pressure (head), :math:`\psi_c`, variables
This version would allow for capillary pressure to be unknown for
saturation equation
"""
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
density_w_parameters=TwophaseDarcyFlow_base.default_density_w_parameters,
density_n_parameters=TwophaseDarcyFlow_base.default_density_n_parameters,
psk_model='VGM',
nMaterialTypes=1,
nSatModel=1,
diagonal_conductivity=True,
#for debugging
swConstant=1.0,
capillaryDiffusionScaling=1.0):
TwophaseDarcyFlow_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_parameters,
density_n_parameters=density_n_parameters,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
diagonal_conductivity=diagonal_conductivity)
self.nc=1
self.nd=nd
#
self.nSatModel=nSatModel
#for debugging
self.swConstant=swConstant
self.capillaryDiffusionScaling=capillaryDiffusionScaling
def attachModels(self,modelList):
if self.nSatModel is None:
print('Warning TwophaseDarcy_split_pressure_base nSatModel is None returning in attachModels')
return
#not ideal, but need a way to force nonlinear potential to be evaluated in saturation model
modelList[self.nSatModel].calculateElementCoefficients()
self.q_s_w = modelList[self.nSatModel].q['sw']
self.ebqe_s_w = modelList[self.nSatModel].ebqe['sw']
if 'sw' in modelList[self.nSatModel].ebq:
self.ebq_s_w = modelList[self.nSatModel].ebq['sw']
self.q_grad_psic = modelList[self.nSatModel].q[('grad(phi)',0)]
self.ebqe_grad_psic = modelList[self.nSatModel].ebqe[('grad(phi)',0)]
if ('grad(phi)',0) in modelList[self.nSatModel].ebq:
self.ebq_grad_psic = modelList[self.nSatModel].ebq[('grad(phi)',0)]
self.q_psic = modelList[self.nSatModel].q[('phi',0)]
self.ebqe_psic= modelList[self.nSatModel].ebqe[('phi',0)]
if ('phi',0) in modelList[self.nSatModel].ebq:
self.ebq_psic = modelList[self.nSatModel].ebq[('phi',0)]
assert ('phi',0) in modelList[self.nSatModel].phi_ip
assert self.ip_psic.shape == modelList[self.nSatModel].phi_ip[('phi',0)].shape
self.ip_psic = modelList[self.nSatModel].phi_ip[('phi',0)]
def initializeElementQuadrature(self,t,cq):
TwophaseDarcyFlow_base.initializeElementQuadrature(self,t,cq)
#set up dummy values in case we're not running the other model
self.q_s_w = numpy.zeros(cq[('u',0)].shape,'d')
self.q_s_w[:] = self.swConstant
for i in range(old_div(len(self.q_s_w.flat),2),len(self.q_s_w.flat)):
self.q_s_w.flat[i] = 1.0e-4
self.q_grad_psic = numpy.zeros(cq[('f',0)].shape,'d')
self.q_psic = numpy.zeros(cq[('u',0)].shape,'d')
cq['psi_n'] = numpy.zeros(cq[('u',0)].shape,'d')
cq[('dpsi_n',0)] = numpy.ones(cq[('u',0)].shape,'d')
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
TwophaseDarcyFlow_base.initializeElementBoundaryQuadrature(self,t,cebq,cebq_global)
#set up dummy values in case we're not running the other model
self.ebq_s_w = numpy.zeros(cebq[('u',0)].shape,'d')
self.ebq_s_w.fill(self.swConstant)
self.ebq_grad_psic = numpy.zeros(cebq[('f',0)].shape,'d')
self.ebq_psic = numpy.zeros(cebq[('u',0)].shape,'d')
if ('u',0) in cebq:
cebq['psi_n'] = numpy.zeros(cebq[('u',0)].shape,'d')
cebq[('dpsi_n',0)] = numpy.ones(cebq[('u',0)].shape,'d')
if ('u',0) in cebq_global:
cebq_global['psi_n'] = numpy.zeros(cebq_global[('u',0)].shape,'d')
cebq_global[('dpsi_n',0)] = numpy.ones(cebq_global[('u',0)].shape,'d')
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
TwophaseDarcyFlow_base.initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe)
#set up dummy values in case we're not running the other model
self.ebqe_s_w = numpy.zeros(cebqe[('u',0)].shape,'d')
self.ebqe_s_w[:]=self.swConstant
self.ebqe_grad_psic = numpy.zeros(cebqe[('f',0)].shape,'d')
self.ebqe_psic = numpy.zeros(cebqe[('u',0)].shape,'d')
cebqe['psi_n'] = numpy.zeros(cebqe[('u',0)].shape,'d')
cebqe[('dpsi_n',0)] = numpy.ones(cebqe[('u',0)].shape,'d')
def initializeGeneralizedInterpolationPointQuadrature(self,t,cip):
TwophaseDarcyFlow_base.initializeGeneralizedInterpolationPointQuadrature(self,t,cip)
self.ip_grad_psic = numpy.zeros(cip[('f',0)].shape,'d')
self.ip_psic = numpy.zeros(cip[('u',0)].shape,'d')
cip['psi_n'] = numpy.zeros(cip[('u',0)].shape,'d')
cip[('dpsi_n',0)] = numpy.ones(cip[('u',0)].shape,'d')
#
class TwophaseDarcy_split_pp_saturation_base(TwophaseDarcyFlow_base):
"""Base class for aqueous phase mass conservation equation
(saturation equation) in a fractional flow formulation.
The primary responsibility of the base class is to handle
synchronization with the 'pressure' equation to get the total flow
velocity variable, :math:`q_t`, and aqueous phase pressure head, :math:`psi_w`
"""
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
density_w_parameters=TwophaseDarcyFlow_base.default_density_w_parameters,
density_n_parameters=TwophaseDarcyFlow_base.default_density_n_parameters,
psk_model='VGM',
nMaterialTypes=1,
nPressModel=0,
diagonal_conductivity=True,
#for debugging
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
advectionScaling=1.0):
TwophaseDarcyFlow_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_parameters,
density_n_parameters=density_n_parameters,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
diagonal_conductivity=diagonal_conductivity)
self.nc=1
self.nd=nd
self.nPressModel=nPressModel
#for debugging
self.qScalarConstant=1.0
self.capillaryDiffusionScaling=capillaryDiffusionScaling
self.advectionScaling=advectionScaling
def attachModels(self,modelList):
if self.nPressModel is None:
print('Warning TwophaseDarcy_split_saturation_base nPressModel is None returning in attachModels')
return
self.flowModel = modelList[self.nPressModel]
#
self.q_q_t = modelList[self.nPressModel].q[('velocity',0)]
self.ebqe_q_t = modelList[self.nPressModel].ebqe[('velocity',0)]
if ('velocity',0) in modelList[self.nPressModel].ebq:
self.ebq_q_t = modelList[self.nPressModel].ebq[('velocity',0)]
#do we really need other model values for q_t in potential calculation?
assert self.ip_psiw.shape == modelList[self.nPressModel].phi_ip[('u',0)].shape
self.ip_psiw = modelList[self.nPressModel].phi_ip[('u',0)]
self.q_psiw = modelList[self.nPressModel].q[('u',0)]
self.ebqe_psiw = modelList[self.nPressModel].ebqe[('u',0)]
if ('u',0) in modelList[self.nPressModel].ebq:
self.ebq_psiw = modelList[self.nPressModel].ebq[('u',0)]
def initializeElementQuadrature(self,t,cq):
TwophaseDarcyFlow_base.initializeElementQuadrature(self,t,cq)
#set up dummy values in case we're not running the other model
self.q_q_t = numpy.zeros(cq[('f',0)].shape,'d')
self.q_q_t[:] = self.qScalarConstant
self.q_psiw = numpy.ones(cq[('u',0)].shape,'d')
cq['sw'] = numpy.zeros(cq[('u',0)].shape,'d')
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
TwophaseDarcyFlow_base.initializeElementBoundaryQuadrature(self,t,cebq,cebq_global)
#set up dummy values in case we're not running the other model
self.ebq_q_t = numpy.zeros(cebq[('f',0)].shape,'d')
self.ebq_q_t[:] = self.qScalarConstant
self.ebq_psiw = numpy.ones(cebq[('u',0)].shape,'d')
if ('u',0) in cebq:
cebq['sw'] = numpy.zeros(cebq[('u',0)].shape,'d')
if ('u',0) in cebq_global:
cebq_global['sw'] = numpy.zeros(cebq_global[('u',0)].shape,'d')
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
TwophaseDarcyFlow_base.initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe)
#set up dummy values in case we're not running the other model
self.ebqe_q_t = numpy.zeros(cebqe[('f',0)].shape,'d')
self.ebqe_q_t[:] = self.qScalarConstant
self.ebqe_psiw = numpy.ones(cebqe[('u',0)].shape,'d')
cebqe['sw'] = numpy.zeros(cebqe[('u',0)].shape,'d')
def initializeGeneralizedInterpolationPointQuadrature(self,t,cip):
TwophaseDarcyFlow_base.initializeGeneralizedInterpolationPointQuadrature(self,t,cip)
#set up dummy values in case we're not running the other model
self.ip_q_t = numpy.zeros(cip[('f',0)].shape,'d')
self.ip_q_t[:] = self.qScalarConstant
self.ip_psiw = numpy.ones(cip[('u',0)].shape,'d')
cip['sw'] = numpy.zeros(cip[('u',0)].shape,'d')
class TwophaseDarcy_incompressible_split_pp_pressure(TwophaseDarcy_split_pp_pressure_base):
"""Total flow conservation equation in an incompressible fractional
flow formulation
"""
# Saturation equation
# .. math::
# \begin{eqnarray}
# \label{eq:2p-ff-mb-w}
# \pd{m_w}{t} + \deld\left(\vec f_w - \ten{a}_{w}\grad \phi_w \right) + r_w &=& 0
# \end{eqnarray}
# and total flow conservation equation
# \begin{eqnarray}
# \label{eq:2p-ff-mb-m}
# \deld\left(\vec f_m - \ten{a}_m \grad \phi_m \right) + r_m &=& 0
# \end{eqnarray}
# \begin{table}[ht]
# \caption{Coefficient definitions for Two-phase flow, \eqn{2p-ff-mb-w} and \eqn{2p-ff-mb-m}
# \label{tab:2p-ff-coef-1}
# }
# \begin{tabular}{cc}
# \hline
# Var. & Def. \\
# \hline
# $u_w$ & $S_w $ \\
# $u_n$ & $\psi_w $ \\
# $\phi_w$ & $\psi_c$ \\
# $\phi_m$ & $\psi_w$ \\
# $m_w$ & $\theta_s \rho_{w}S_w$ \\
# $\vec f_w$ & $\gvec{\sigma}_t F_w - \ten{K}_s\lambda_wF_n\left(b\rho_n - \rho_w\right)\vec g_u$ \\
# $\vec f_m$ &$-\ten{K}_s\lambda_tF_n\grad \psi_c + \ten{K}_s\vec g\lambda_t\left[\rho_w + F_n\left(b\rho_n - \rho_w\right)\right]$\\
# $\ten a_{w,w}$ & $-\lambda_wF_n \ten{K}_{s}$ \\
# $\ten a_{m,m}$ & $\lambda_t \ten{K}_{s}$ \\
# \hline
# $F_i $ & $\lambda_i/\lambda_t $, $i=w,n$ \\
# $\grad \psi_c $ & $\od{\psi_c}{S_w}\grad S_w $ \\
# $\gvec {\sigma}_t$ & $\gvec \sigma_w + \gvec \sigma_n$\\
# \hline
# \end{tabular}
# \end{table}
# Here :math:`S_i` is the saturation for each phase,
# :math:`\varrho_{i}` is the density and :math:`\varrho_{i,0}` is a
# reference value
# (normalized) mass flux for each phase is
# .. math::
# \vec \sigma_i = - \ten{a}_{i}\grad \phi_i, i=w,n
# and
# .. math::
# \psi_{i} = p_{i}/|\vec g|\rho_{w,0}
# b = \varrho_{n,0}/\varrho_{w,0}
# \hat{mu}_{i} = \mu_i/\mu_{w}
# for the pressure of each phase, p_i, and we have the capillary pressure relation
# .. math::
# \psi_{c} = \psi_{n} - \psi_{w}
# The dependent variables are :math:`S_w`, and :math:`\psi_w`
# Note :math:`S_n = 1-S_w`
# needs to be implemented
# .. math::
# r_m = r_m(\vec x,t)
# """
# TODO:
#
# Figure out if really need to evaluate potential interpolation points
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_incompressible_split_sd_pressure_het_matType
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
psk_model='VGM',
nMaterialTypes=1,
nSatModel=1,
diagonal_conductivity=True,
#for debugging
swConstant=1.0,
capillaryDiffusionScaling=1.0):
TwophaseDarcy_split_pp_pressure_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
nSatModel=nSatModel,
diagonal_conductivity=diagonal_conductivity,
swConstant=swConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling)
variableNames=['psi_w']
#these are only nonlinear for compressible flow
mass = {}
advection = {0:{0:'linear'}}
hamiltonian={}
diffusion = {0:{0: {0:'linear'}}}
potential = {0:{0: 'u'}}
reaction = {0:{0:'linear'}}
if self.diagonal_conductivity:
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i'))}
else: #full
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i'))}
TC_base.__init__(self,
self.nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
def evaluate(self,t,c):
if c[('u',0)].shape == self.q_s_w.shape:
materialTypes = self.materialTypes_q
s_w = self.q_s_w
grad_psic = self.q_grad_psic
c['psi_n']= numpy.copy(self.q_psic)
c['psi_n'] += c[('u',0)]
vol_frac_w = self.q[('vol_frac',0)]
vol_frac_n = self.q[('vol_frac',1)]
#mwf debug
#import pdb
#pdb.set_trace()
#for eN in range(c['x'].shape[0]):
# for k in range(c['x'].shape[1]):
# if (1.25 <= c['x'][eN,k,0] and c['x'][eN,k,0] <= 2.25 and
# 1.25 <= c['x'][eN,k,1] and c['x'][eN,k,1] <= 1.5):
#mwf major hack to test initialization
#grad_psic[eN,k,0] = 0.0
#grad_psic[eN,k,1] = 1.0
#
# print "split press eval inside lens eN=%s k=%s x=%s grad_psiw= %s grad_psic= %s s_w= %s " % (eN,k,c['x'][eN,k],c[('grad(u)',0)][eN,k],grad_psic[eN,k],s_w[eN,k])
# else:
# print "split press eval outside lens eN=%s k=%s x=%s grad_psiw= %s grad_psic= %s s_w= %s " % (eN,k,c['x'][eN,k],c[('grad(u)',0)][eN,k],grad_psic[eN,k],s_w[eN,k])
#import pdb
#pdb.set_trace()
elif c[('u',0)].shape == self.ebqe_s_w.shape:
materialTypes = self.materialTypes_ebqe
s_w = self.ebqe_s_w
grad_psic = self.ebqe_grad_psic
c['psi_n']= numpy.copy(self.ebqe_psic)
c['psi_n'] += c[('u',0)]
vol_frac_w = self.ebqe[('vol_frac',0)]
vol_frac_n = self.ebqe[('vol_frac',1)]
elif c[('u',0)].shape == self.ip_psic.shape:
c['psi_n']= numpy.copy(self.ip_psic)
c['psi_n'] += c[('u',0)]
return
else:
assert c[('u',0)].shape == self.ebq_s_w.shape
materialTypes = self.materialTypes_ebq
s_w = self.ebq_s_w
grad_psic = self.ebq_grad_psic
c['psi_n']= numpy.copy(self.ebq_psic)
c['psi_n'] += c[('u',0)]
vol_frac_w = self.ebq[('vol_frac',0)]
vol_frac_n = self.ebq[('vol_frac',1)]
assert self.rwork_psk is not None
#mwf debug
import pdb
pdb.set_trace()
self.twophaseDarcy_incompressible_split_sd_pressure_het_matType(self.psk_types[self.psk_model],
self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.capillaryDiffusionScaling,
self.rwork_psk,self.iwork_psk,
self.rwork_psk_tolerances,
self.rwork_density_w,
self.rwork_density_n,
self.g[:self.nd],
s_w,
grad_psic,
c[('f',0)],
c[('a',0,0)])
self.twophaseDarcy_vol_frac(materialTypes,
self.omega_types,
s_w,
vol_frac_w,
vol_frac_n)
#mwf debug
if (numpy.isnan(c[('a',0,0)]).any() or
numpy.isnan(c[('f',0)]).any() or
numpy.isnan(c[('u',0)]).any()):
import pdb
pdb.set_trace()
#mwf debug
#import pdb
#pdb.set_trace()
if c[('u',0)].shape == self.q_s_w.shape:
c['Ks']=c[('a',0,0)][:,:,0]
#
class TwophaseDarcy_incompressible_split_pp_saturation(TwophaseDarcy_split_pp_saturation_base):
"""Aqueous phase mass conservation equation (saturation equation) in
an incompressible fractional flow formulation
"""
# Saturation equation
# .. math::
# \begin{eqnarray}
# \label{eq:2p-ff-mb-w}
# \pd{m_w}{t} + \deld\left(\vec f_w - \ten{a}_{w}\grad \phi_w \right) + r_w &=& 0
# \end{eqnarray}
# and total flow conservation equation
# \begin{eqnarray}
# \label{eq:2p-ff-mb-m}
# \deld\left(\vec f_m - \ten{a}_m \grad \phi_m \right) + r_m &=& 0
# \end{eqnarray}
# \begin{table}[ht]
# \caption{Coefficient definitions for Two-phase flow, \eqn{2p-ff-mb-w} and \eqn{2p-ff-mb-m}
# \label{tab:2p-ff-coef-1}
# }
# \begin{tabular}{cc}
# \hline
# Var. & Def. \\
# \hline
# $u_w$ & $S_w $ \\
# $u_n$ & $\psi_w $ \\
# $\phi_w$ & $\psi_c$ \\
# $\phi_m$ & $\psi_w$ \\
# $m_w$ & $\theta_s \rho_{w}S_w$ \\
# $\vec f_w$ & $\gvec{\sigma}_t F_w - \ten{K}_s\lambda_wF_n\left(b\rho_n - \rho_w\right)\vec g_u$ \\
# $\vec f_m$ &$-\ten{K}_s\lambda_tF_n\grad \psi_c + \ten{K}_s\vec g\lambda_t\left[\rho_w + F_n\left(b\rho_n - \rho_w\right)\right]$\\
# $\ten a_{w,w}$ & $-\lambda_wF_n \ten{K}_{s}$ \\
# $\ten a_{m,m}$ & $\lambda_t \ten{K}_{s}$ \\
# \hline
# $F_i $ & $\lambda_i/\lambda_t $, $i=w,n$ \\
# $\grad \psi_c $ & $\od{\psi_c}{S_w}\grad S_w $ \\
# $\gvec {\sigma}_t$ & $\gvec \sigma_w + \gvec \sigma_n$\\
# \hline
# \end{tabular}
# \end{table}
# Here :math:`S_i` is the saturation for each phase, :math:`\varrho_{i}` is the density and
# :math:`\varrho_{i,0}` is a reference value
# (normalized) mass flux for each phase is
# .. math::
# \vec \sigma_i = - \ten{a}_{i}\grad \phi_i, i=w,n
# and
# .. math::
# \psi_{i} = p_{i}/|\vec g|\rho_{w,0}
# b = \varrho_{n,0}/\varrho_{w,0}
# \hat{mu}_{i} = \mu_i/\mu_{w}
# for the pressure of each phase, p_i, and we have the capillary pressure relation
# .. math::
# \psi_{c} = \psi_{n} - \psi_{w}
# The dependent variables are :math:`S_w`, and :math:`\psi_w`
# Note :math:`S_n = 1-S_w`
# needs to be implemented
# .. math::
# r_m = r_m(\vec x,t)
# """
# TODO:
#
# Figure out if really need to evaluate potential interpolation points
from proteus.cTwophaseDarcyCoefficients import twophaseDarcy_incompressible_split_pp_sd_saturation_het_matType
def __init__(self,
nd=1,
dimensionless_gravity=[-1.0],
density_w=998.2, #Water kg/m^3
density_n=1.205, #Air kg/m^3
viscosity_w=8.9e-4, #Water kg/m s
viscosity_n=1.81e-5,#Air kg/ m s
psk_model='VGM',
nMaterialTypes=1,
nPressModel=0,
diagonal_conductivity=True,
#for debugging
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
advectionScaling=1.0):
TwophaseDarcy_split_pp_saturation_base.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
psk_model=psk_model,
nMaterialTypes=nMaterialTypes,
nPressModel=nPressModel,
diagonal_conductivity=diagonal_conductivity,
qScalarConstant=qScalarConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling,
advectionScaling=advectionScaling)
variableNames=['psi_c']
mass = {0:{0:'nonlinear'}}
advection = {0:{0:'nonlinear'}}
hamiltonian={}
diffusion = {0:{0:{0:'nonlinear'}}}
potential = {0:{0: 'nonlinear'}}
reaction = {0:{0:'linear'}}
if self.diagonal_conductivity:
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd+1,dtype='i'),
numpy.arange(self.nd,dtype='i'))}
else: #full
sparseDiffusionTensors = {(0,0):(numpy.arange(self.nd**2+1,step=self.nd,dtype='i'),
numpy.array([list(range(self.nd)) for row in range(self.nd)],dtype='i'))}
TC_base.__init__(self,
self.nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
variableNames,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
def evaluate(self,t,c):
if c[('f',0)].shape == self.q_q_t.shape:
materialTypes = self.materialTypes_q
q_t = self.q_q_t
psiw = self.q_psiw
vol_frac_w = self.q[('vol_frac',0)]
vol_frac_n = self.q[('vol_frac',1)]
elif c[('f',0)].shape == self.ebqe_q_t.shape:
materialTypes = self.materialTypes_ebqe
q_t = self.ebqe_q_t
psiw = self.ebqe_psiw
vol_frac_w = self.ebqe[('vol_frac',0)]
vol_frac_n = self.ebqe[('vol_frac',1)]
elif c[('f',0)].shape == self.ip_q_t.shape:
materialTypes = self.materialTypes_ip
q_t = self.ip_q_t
psiw = self.ip_psiw
vol_frac_w = self.ip[('vol_frac',0)]
vol_frac_n = self.ip[('vol_frac',1)]
else:
assert c[('f',0)].shape == self.ebq_q_t.shape
materialTypes = self.materialTypes_ebq
q_t = self.ebq_q_t
psiw = self.ebq_psiw
vol_frac_w = self.ebq[('vol_frac',0)]
vol_frac_n = self.ebq[('vol_frac',1)]
assert self.rwork_psk is not None
#mwf debug
#import pdb
#pdb.set_trace()
self.twophaseDarcy_incompressible_split_pp_sd_saturation_het_matType(self.psk_types[self.psk_model],
self.sdInfo[(0,0)][0],
self.sdInfo[(0,0)][1],
materialTypes,
self.muw,
self.mun,
self.omega_types,
self.Ksw_types,
self.b,
self.capillaryDiffusionScaling,
self.advectionScaling,
self.rwork_psk,self.iwork_psk,
self.rwork_psk_tolerances,
self.rwork_density_w,
self.rwork_density_n,
self.g[:self.nd],
q_t,
c['sw'],
c[('u',0)],
c[('m',0)],
c[('dm',0,0)],
c[('phi',0)],
c[('dphi',0,0)],
c[('f',0)],
c[('df',0,0)],
c[('a',0,0)],
c[('da',0,0,0)])
self.twophaseDarcy_vol_frac(materialTypes,
self.omega_types,
c['sw'],
vol_frac_w,
vol_frac_n)
########################################
#begin classes for specific psk models
########################################
class IncompressibleFractionalFlowPressureMualemVanGenuchten(TwophaseDarcy_incompressible_split_pressure):
"""Total flow equation coefficients for incompressible flow assuming
Mualem-Van Genuchten psk's
"""
def __init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
nSatModel=1,
diagonal_conductivity=True,
vgm_small_eps=1.0e-16,
vgm_ns_del=1.0e-8,
#for debugging
swConstant=1.0,
capillaryDiffusionScaling=1.0):
TwophaseDarcy_incompressible_split_pressure.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
psk_model='VGM',
nMaterialTypes=len(thetaR_types),
nSatModel=nSatModel,
diagonal_conductivity=diagonal_conductivity,
swConstant=swConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling)
for input in [vgm_n_types,vgm_alpha_types,thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
vgm_m_types = 1.0-old_div(1.0,vgm_n_types)
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
#mwf debug
#import pdb
#pdb.set_trace()
self.psk_tolerances['VGM']['eps_small']=vgm_small_eps
self.psk_tolerances['VGM']['ns_del'] =vgm_ns_del
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types,
vg_alpha_types=vgm_alpha_types,
vg_m_types=vgm_m_types)
#
class IncompressibleFractionalFlowSaturationMualemVanGenuchten(TwophaseDarcy_incompressible_split_saturation):
"""Saturation equation coefficients for incompressible flow assuming
Mualem-Van Genuchten psk's
"""
def __init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
nPressModel=1,
diagonal_conductivity=True,
vgm_small_eps=1.0e-16,
vgm_ns_del=1.0e-8,
#for debugging
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
advectionScaling=1.0):
TwophaseDarcy_incompressible_split_saturation.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
psk_model='VGM',
nMaterialTypes=len(thetaR_types),
nPressModel=nPressModel,
diagonal_conductivity=diagonal_conductivity,
qScalarConstant=qScalarConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling,
advectionScaling=advectionScaling)
for input in [vgm_n_types,vgm_alpha_types,thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
vgm_m_types = 1.0-old_div(1.0,vgm_n_types)
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
#mwf debug
#import pdb
#pdb.set_trace()
self.psk_tolerances['VGM']['eps_small']=vgm_small_eps
self.psk_tolerances['VGM']['ns_del'] =vgm_ns_del
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types,
vg_alpha_types=vgm_alpha_types,
vg_m_types=vgm_m_types)
class IncompressibleFractionalFlowSaturationMualemVanGenuchtenSplitAdvDiff(IncompressibleFractionalFlowSaturationMualemVanGenuchten):
"""Saturation equation coefficients for incompressible flow assuming
Mualem-Van Genuchten psk's and splitting of advection and
capillary diffusion terms
"""
def __init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
nPressModel=1,
diagonal_conductivity=True,
vgm_small_eps=1.0e-16,
vgm_ns_del=1.0e-8,
#for debugging
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
advectionScaling=1.0,
satModelIndex_me=1,
satModelIndex_other=2):
IncompressibleFractionalFlowSaturationMualemVanGenuchten.__init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
nPressModel=nPressModel,
diagonal_conductivity=diagonal_conductivity,
vgm_small_eps=vgm_small_eps,
vgm_ns_del=vgm_ns_del,
qScalarConstant=qScalarConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling,
advectionScaling=advectionScaling)
self.satModelIndex_other=satModelIndex_other
self.satModelIndex_me =satModelIndex_me
self.satModel_me = None
self.satModel_other=None
self.variableNames=['s_w_%s' % self.satModelIndex_me]
self.u_ip = {}
def attachModels(self,modelList):
IncompressibleFractionalFlowSaturationMualemVanGenuchten.attachModels(self,modelList)
if 0 <= self.satModelIndex_me and self.satModelIndex_me < len(modelList):
self.satModel_me = modelList[self.satModelIndex_me]
#interpolation points on physical mesh for this models fem space
self.u_ip['x'] = self.satModel_me.u[0].femSpace.updateInterpolationPoints()
#value of other models solution at this models interpolation points
self.u_ip[('u_other',0)]= numpy.zeros((self.satModel_me.mesh.nElements_global,
self.satModel_me.u[0].femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints),
'd')
if 0 <= self.satModelIndex_other and self.satModelIndex_other < len(modelList):
self.satModel_other = modelList[self.satModelIndex_other]
#other model's saturation trial functions at this models interpolation points
self.u_ip[('v_other',0)] = numpy.zeros((self.satModel_me.mesh.nElements_global,
self.satModel_me.u[0].femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints,
self.satModel_other.nDOF_trial_element[0]),
'd')
self.satModel_other.u[0].femSpace.getBasisValues(self.satModel_me.u[0].femSpace.referenceFiniteElement.interpolationConditions.quadraturePointArray,
self.u_ip[('v_other',0)])
def preStep(self,t,firstStep=False):
if self.satModel_other is not None:# and self.satModelIndex_me != 1:#mwf hack
#todo tLast is getting messed up
#tLastSave =self.satModel_me.timeIntegration.tLast
#todo need to do make sure mass conserved, handle projection from cg to dg correctly
#todo ExplicitRK is getting messed up here, going twice as fast
logEvent("Incomp.FracFlowSatAdvDiff preStep t= %s model %s setting its solution from model %s " % (t,self.satModel_me,self.satModel_other),level=2)
#if self.satModelIndex_me != 1:#mwf hack
self.satModel_other.u[0].getValues(self.u_ip[('v_other',0)],
self.u_ip[('u_other',0)])
#mwf hack to test mass conservation
#for eN in range(self.u_ip[('u_other',0)].shape[0]):
# uAvg = numpy.sum(self.u_ip[('u_other',0)][eN])/float(self.u_ip[('u_other',0)].shape[1])
# self.u_ip[('u_other',0)][eN].fill(uAvg)
#import pdb
#pdb.set_trace()
self.satModel_me.u[0].projectFromInterpolationConditions(self.u_ip[('u_other',0)])
self.satModel_me.calculateCoefficients()
self.satModel_me.calculateElementResidual()
#this doesn't work either?
self.satModel_me.timeIntegration.initializeTimeHistory(resetFromDOF=True)
#only effects lagging of stabilization and shock-capturing
self.satModel_me.updateTimeHistory(t,resetFromDOF=True)
#now do again because of subgrid error lagging
#\todo modify subgrid error lagging so this won't be necessary
self.satModel_me.calculateCoefficients()
self.satModel_me.calculateElementResidual()
#self.satModel_me.timeIntegration.updateTimeHistory(resetFromDOF=True)
self.satModel_me.timeIntegration.resetTimeHistory(resetFromDOF=True)
self.satModel_me.updateTimeHistory(t,resetFromDOF=True)
#mwf hack tLast is getting messed up
#self.satModel_me.timeIntegration.tLast = tLastSave
copyInstructions = {'copy_uList':True,
'uList_model':self.satModelIndex_other}
copyInstructions = {'reset_uList':True}
#print "advDiff satModel_me= %s after \n u_me= %s " % (self.satModelIndex_me,
# self.satModel_me.u[0].dof)
#
return copyInstructions
else:
return {}
#
class CompressibleFractionalFlowPressureMualemVanGenuchten(TwophaseDarcy_compressible_split_pressure):
"""Total flow equation coefficients for slight compressible flow
assuming Mualem-Van Genuchten psk's
"""
def __init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
density_w_parameters,
density_n_parameters,
nSatModel=1,
compressibilityFlag=2,#2 slight-compressiblity for N,W, 1 -- compressiblity in N only
diagonal_conductivity=True,
vgm_small_eps=1.0e-16,
vgm_ns_del=1.0e-8,
#for debugging
swConstant=1.0,
capillaryDiffusionScaling=1.0):
TwophaseDarcy_compressible_split_pressure.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_parameters,
density_n_parameters=density_n_parameters,
psk_model='VGM',
nMaterialTypes=len(thetaR_types),
nSatModel=nSatModel,
compressibilityFlag=compressibilityFlag,
diagonal_conductivity=diagonal_conductivity,
swConstant=swConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling)
for input in [vgm_n_types,vgm_alpha_types,thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
vgm_m_types = 1.0-old_div(1.0,vgm_n_types)
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
#mwf debug
#import pdb
#pdb.set_trace()
self.psk_tolerances['VGM']['eps_small']=vgm_small_eps
self.psk_tolerances['VGM']['ns_del'] =vgm_ns_del
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types,
vg_alpha_types=vgm_alpha_types,
vg_m_types=vgm_m_types)
#
class CompressibleFractionalFlowSaturationMualemVanGenuchten(TwophaseDarcy_compressible_split_saturation):
"""Saturation equation coefficients for slightly compressible flow
assuming Mualem-Van Genuchten psk's
"""
def __init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
density_w_parameters,
density_n_parameters,
nPressModel=1,
compressibilityFlag=2,#2 slight-compressiblity for N,W, 1 -- compressiblity in N only
diagonal_conductivity=True,
vgm_small_eps=1.0e-16,
vgm_ns_del=1.0e-8,
#for debugging
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
advectionScaling=1.0):
TwophaseDarcy_compressible_split_saturation.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
density_w_parameters=density_w_parameters,
density_n_parameters=density_n_parameters,
psk_model='VGM',
nMaterialTypes=len(thetaR_types),
nPressModel=nPressModel,
compressibilityFlag=compressibilityFlag,
diagonal_conductivity=diagonal_conductivity,
qScalarConstant=qScalarConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling,
advectionScaling=advectionScaling)
for input in [vgm_n_types,vgm_alpha_types,thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
vgm_m_types = 1.0-old_div(1.0,vgm_n_types)
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
#mwf debug
#import pdb
#pdb.set_trace()
self.psk_tolerances['VGM']['eps_small']=vgm_small_eps
self.psk_tolerances['VGM']['ns_del'] =vgm_ns_del
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types,
vg_alpha_types=vgm_alpha_types,
vg_m_types=vgm_m_types)
#
class IncompressibleFractionalFlowPressureSimplePSKs(TwophaseDarcy_incompressible_split_pressure):
"""Total flow equation coefficients for incompressible flow assuming
'simp' quadratic rel-perm, linear capillary pressure psk relations
"""
def __init__(self,
nd,
Ksw_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
nSatModel=1,
diagonal_conductivity=True,
#for debugging
swConstant=1.0,
capillaryDiffusionScaling=1.0):
TwophaseDarcy_incompressible_split_pressure.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
psk_model='simp',
nMaterialTypes=len(thetaR_types),
nSatModel=nSatModel,
diagonal_conductivity=diagonal_conductivity,
swConstant=swConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling)
for input in [thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
#mwf debug
#import pdb
#pdb.set_trace()
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types)
#
class IncompressibleFractionalFlowSaturationSimplePSKs(TwophaseDarcy_incompressible_split_saturation):
"""Saturation equation coefficients for incompressible flow assuming
'simp' quadratic rel-perm, linear capillary pressure psk relations
"""
def __init__(self,
nd,
Ksw_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
nPressModel=1,
diagonal_conductivity=True,
#for debugging
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
advectionScaling=1.0):
TwophaseDarcy_incompressible_split_saturation.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
psk_model='simp',
nMaterialTypes=len(thetaR_types),
nPressModel=nPressModel,
diagonal_conductivity=diagonal_conductivity,
qScalarConstant=qScalarConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling,
advectionScaling=advectionScaling)
for input in [thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
vgm_m_types = 1.0-old_div(1.0,vgm_n_types)
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
#mwf debug
#import pdb
#pdb.set_trace()
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types)
class PressurePressureIncompressibleFractionalFlowPressureMualemVanGenuchten(TwophaseDarcy_incompressible_split_pp_pressure):
"""Total flow equation coefficients for incompressible flow assuming
Mualem-Van Genuchten psk's
"""
def __init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
nSatModel=1,
diagonal_conductivity=True,
vgm_small_eps=1.0e-16,
vgm_ns_del=1.0e-8,
#for debugging
swConstant=1.0,
capillaryDiffusionScaling=1.0):
TwophaseDarcy_incompressible_split_pp_pressure.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
psk_model='VGM',
nMaterialTypes=len(thetaR_types),
nSatModel=nSatModel,
diagonal_conductivity=diagonal_conductivity,
swConstant=swConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling)
for input in [vgm_n_types,vgm_alpha_types,thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
vgm_m_types = 1.0-old_div(1.0,vgm_n_types)
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
#mwf debug
#import pdb
#pdb.set_trace()
self.psk_tolerances['VGM']['eps_small']=vgm_small_eps
self.psk_tolerances['VGM']['ns_del'] =vgm_ns_del
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types,
vg_alpha_types=vgm_alpha_types,
vg_m_types=vgm_m_types)
#
class PressurePressureIncompressibleFractionalFlowSaturationMualemVanGenuchten(TwophaseDarcy_incompressible_split_pp_saturation):
"""Saturation equation coefficients for incompressible flow assuming
Mualem-Van Genuchten psk's
"""
def __init__(self,
nd,
Ksw_types,
vgm_n_types,
vgm_alpha_types,
thetaR_types,
thetaSR_types,
dimensionless_gravity,
density_w,
density_n,
viscosity_w,
viscosity_n,
nPressModel=1,
diagonal_conductivity=True,
vgm_small_eps=1.0e-16,
vgm_ns_del=1.0e-8,
#for debugging
qScalarConstant=1.0,
capillaryDiffusionScaling=1.0,
advectionScaling=1.0):
TwophaseDarcy_incompressible_split_pp_saturation.__init__(self,
nd=nd,
dimensionless_gravity=dimensionless_gravity,
density_w=density_w,
density_n=density_n,
viscosity_w=viscosity_w,
viscosity_n=viscosity_n,
psk_model='VGM',
nMaterialTypes=len(thetaR_types),
nPressModel=nPressModel,
diagonal_conductivity=diagonal_conductivity,
qScalarConstant=qScalarConstant,
capillaryDiffusionScaling=capillaryDiffusionScaling,
advectionScaling=advectionScaling)
for input in [vgm_n_types,vgm_alpha_types,thetaR_types,thetaSR_types]:
assert len(input)==self.nMaterialTypes
vgm_m_types = 1.0-old_div(1.0,vgm_n_types)
thetaS_types = thetaSR_types + thetaR_types
Sw_max_types = numpy.ones((self.nMaterialTypes,),'d')
Sw_min_types = old_div(thetaR_types,thetaS_types)
#mwf debug
#import pdb
#pdb.set_trace()
self.psk_tolerances['VGM']['eps_small']=vgm_small_eps
self.psk_tolerances['VGM']['ns_del'] =vgm_ns_del
self.setMaterialTypes(Ksw_types=Ksw_types,
omega_types=thetaS_types,
Sw_max_types=Sw_max_types,
Sw_min_types=Sw_min_types,
vg_alpha_types=vgm_alpha_types,
vg_m_types=vgm_m_types)
########################################
#Single-phase species transport
########################################
class GroundwaterTransportCoefficients(TC_base):
from proteus.ctransportCoefficients import groundwaterTransportCoefficientsEvaluate_hetMat
""" groundwater advection-dispersion equation with coefficients
varying by material type and variable velocity """
def __init__(self,nc=1,nd=2,
omega_types=numpy.array([0.3]),
alpha_L_types=numpy.array([1.0]),
alpha_T_types=numpy.array([0.1]),
d=numpy.array([1.3e-9]),
meModelId = 0,
flowModelId = None,
velocityFunctions = None):
self.alpha_L_types = alpha_L_types
self.alpha_T_types = alpha_T_types
self.omega_types = omega_types
self.d = d
self.nd = nd
self.flowModelId = flowModelId
self.flowModel = None
self.meModelId = meModelId
mass = {}
advection = {}
diffusion = {}
potential = {}
reaction = {}
hamiltonian = {}
for i in range(nc):
diffusion[i] = {i : {i:'constant'}}
advection[i] = {i : {i:'linear'}}
mass[i] = {i : {i:'linear'}}
reaction[i] = {i : {i:'constant'}}
potential[i] = {i : 'u'}
#end i
sparseDiffusionTensors = {}
for ci in range(nc):
sparseDiffusionTensors[(ci,ci)]=(numpy.arange(start=0,stop=nd**2+1,step=nd,dtype='i'),
numpy.array([list(range(nd)) for row in range(nd)],dtype='i'))
names = ['C_%s' % ci for ci in range(nc)]
TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
self.q={}; self.ebqe={}; self.ebq ={}; self.ebq_global = {}
self.velocityFunctions = velocityFunctions
def initializeMesh(self,mesh):
self.elementMaterialTypes,self.exteriorElementBoundaryTypes,self.elementBoundaryTypes = BlockHeterogeneousCoefficients(mesh).initializeMaterialTypes()
self.elementBoundariesArray = mesh.elementBoundariesArray
def initializeElementQuadrature(self,t,cq):
self.materialTypes_q = self.elementMaterialTypes
self.q_shape = cq[('u',0)].shape
for ci in range(self.nc):
self.q[('velocity',ci)] = numpy.zeros((cq['x'].shape[0],cq['x'].shape[1],self.nd),'d')
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
self.materialTypes_ebq = numpy.zeros(cebq[('u',0)].shape[0:2],'i')
self.ebq_shape = cebq[('u',0)].shape
for ebN_local in range(self.ebq_shape[1]):
self.materialTypes_ebq[:,ebN_local] = self.elementMaterialTypes
for ci in range(self.nc):
self.ebq[('velocity',ci)] = numpy.zeros((cebq['x'].shape[0],cebq['x'].shape[1],cebq['x'].shape[2],self.nd),'d')
self.ebq_global[('velocity',ci)] = numpy.zeros((cebq_global['x'].shape[0],cebq_global['x'].shape[1],self.nd),'d')
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
self.materialTypes_ebqe = self.exteriorElementBoundaryTypes
self.ebqe_shape = cebqe[('u',0)].shape
for ci in range(self.nc):
self.ebqe[('velocity',ci)] = numpy.zeros((cebqe['x'].shape[0],cebqe['x'].shape[1],self.nd),'d')
def attachModels(self,modelList):
self.vt = modelList[self.meModelId]
if self.flowModelId is not None:
self.flowModel = modelList[self.flowModelId]
for ci in range(self.nc):
self.q[('velocity',ci)] = self.flowModel.q[('velocity',ci)]
self.ebqe[('velocity',ci)] = self.flowModel.ebqe[('velocity',ci)]
if ('velocity',ci) in self.flowModel.ebq:
self.ebq[('velocity',ci)] = self.flowModel.ebq[('velocity',ci)]
if ('velocity',ci) in self.flowModel.ebq_global:
self.ebq_global[('velocity',ci)] = self.flowModel.ebq_global[('velocity',ci)]
def evaluateVelocity(self,t,c):
if self.velocityFunctions is not None:
for ci in range(self.nc):
if len(c['x'].shape) == 3:
for i in range(c['x'].shape[0]):
for j in range(c['x'].shape[1]):
c[('velocity',ci)][i,j,:] = self.velocityFunctions[ci](c['x'][i,j],t)
elif len(c['x'].shape) == 4:
for i in range(c['x'].shape[0]):
for j in range(c['x'].shape[1]):
for k in range(c['x'].shape[2]):
c[('velocity',ci)][i,j,k,:] = self.velocityFunctions[ci](c['x'][i,j,k],t)
def evaluate(self,t,c):
# TODO
# evaluate velocity is currently setting ebqe when c=q but need to make sure this is done
# before evaluate is called with c=ebqe
#mwf debug
#import pdb
#pdb.set_trace()
if self.velocityFunctions is not None:
self.evaluateVelocity(t,c)
#
for ci in range(self.nc):
if self.q[('velocity',ci)].shape == c[('df',ci,ci)].shape:
v = self.q[('velocity',ci)]
materialTypes = self.materialTypes_q
elif self.ebqe[('velocity',ci)].shape == c[('df',ci,ci)].shape:
v = self.ebqe[('velocity',ci)]
materialTypes = self.materialTypes_ebqe
elif self.ebq[('velocity',ci)].shape == c[('df',ci,ci)].shape:
v = self.ebq[('velocity',ci)]
materialTypes = self.materialTypes_ebq
else:
print(c[('df',ci,ci)].shape)
print("no v---------------------")
raise RuntimeError
self.groundwaterTransportCoefficientsEvaluate_hetMat(self.d[ci],
materialTypes,
self.omega_types,
self.alpha_L_types,
self.alpha_T_types,
v,
c[('u',ci)],
c[('m',ci)],
c[('dm',ci,ci)],
c[('f',ci)],
c[('df',ci,ci)],
c[('a',ci,ci)])
########################################
#multiphase species transport
########################################
class MultiphaseGroundwaterTransportCoefficients(TC_base):
from proteus.ctransportCoefficients import variablySaturatedGroundwaterTransportCoefficientsEvaluate_hetMat
""" groundwater advection-dispersion equation with coefficients
varying by material type and variable velocity """
def __init__(self,nc=1,nd=2,
omega_types=numpy.array([0.3]),
alpha_L_types=numpy.array([1.0]),
alpha_T_types=numpy.array([0.1]),
d=numpy.array([1.3e-9]),
meModelId = 0,
flowModelId = None,
velocityFunctions = None):
self.alpha_L_types = alpha_L_types
self.alpha_T_types = alpha_T_types
self.omega_types = omega_types
self.d = d
self.nd = nd
self.flowModelId = flowModelId
self.flowModel = None
self.meModelId = meModelId
mass = {}
advection = {}
diffusion = {}
potential = {}
reaction = {}
hamiltonian = {}
for i in range(nc):
diffusion[i] = {i : {i:'constant'}}
advection[i] = {i : {i:'linear'}}
mass[i] = {i : {i:'linear'}}
reaction[i] = {i : {i:'constant'}}
potential[i] = {i : 'u'}
#end i
sparseDiffusionTensors = {}
for ci in range(nc):
sparseDiffusionTensors[(ci,ci)]=(numpy.arange(start=0,stop=nd**2+1,step=nd,dtype='i'),
numpy.array([list(range(nd)) for row in range(nd)],dtype='i'))
names = ['C_%s' % ci for ci in range(nc)]
TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
sparseDiffusionTensors = sparseDiffusionTensors,
useSparseDiffusion = True)
self.q={}; self.ebqe={}; self.ebq ={}; self.ebq_global = {}
self.velocityFunctions = velocityFunctions
def initializeMesh(self,mesh):
self.elementMaterialTypes,self.exteriorElementBoundaryTypes,self.elementBoundaryTypes = BlockHeterogeneousCoefficients(mesh).initializeMaterialTypes()
self.elementBoundariesArray = mesh.elementBoundariesArray
def initializeElementQuadrature(self,t,cq):
self.materialTypes_q = self.elementMaterialTypes
self.q_shape = cq[('u',0)].shape
for ci in range(self.nc):
self.q[('velocity',ci)] = numpy.zeros((cq['x'].shape[0],cq['x'].shape[1],self.nd),'d')
self.q[('vol_frac',ci)] = numpy.ones((cq['x'].shape[0],cq['x'].shape[1]),'d')
def initializeElementBoundaryQuadrature(self,t,cebq,cebq_global):
self.materialTypes_ebq = numpy.zeros(cebq[('u',0)].shape[0:2],'i')
self.ebq_shape = cebq[('u',0)].shape
for ebN_local in range(self.ebq_shape[1]):
self.materialTypes_ebq[:,ebN_local] = self.elementMaterialTypes
for ci in range(self.nc):
self.ebq[('velocity',ci)] = numpy.zeros((cebq['x'].shape[0],cebq['x'].shape[1],cebq['x'].shape[2],self.nd),'d')
self.ebq_global[('velocity',ci)] = numpy.zeros((cebq_global['x'].shape[0],cebq_global['x'].shape[1],self.nd),'d')
self.ebq[('vol_frac',ci)] = numpy.ones((cebq['x'].shape[0],cebq['x'].shape[1],cebq['x'].shape[2]),'d')
self.ebq_global[('vol_frac',ci)] = numpy.ones((cebq_global['x'].shape[0],cebq_global['x'].shape[1]),'d')
def initializeGlobalExteriorElementBoundaryQuadrature(self,t,cebqe):
self.materialTypes_ebqe = self.exteriorElementBoundaryTypes
self.ebqe_shape = cebqe[('u',0)].shape
for ci in range(self.nc):
self.ebqe[('velocity',ci)] = numpy.zeros((cebqe['x'].shape[0],cebqe['x'].shape[1],self.nd),'d')
self.ebqe[('vol_frac',ci)] = numpy.ones((cebqe['x'].shape[0],cebqe['x'].shape[1]),'d')
def attachModels(self,modelList):
self.vt = modelList[self.meModelId]
if self.flowModelId is not None:
self.flowModel = modelList[self.flowModelId]
for ci in range(self.nc):
self.q[('velocity',ci)] = self.flowModel.q[('velocity',ci)]
self.ebqe[('velocity',ci)] = self.flowModel.ebqe[('velocity',ci)]
if ('velocity',ci) in self.flowModel.ebq:
self.ebq[('velocity',ci)] = self.flowModel.ebq[('velocity',ci)]
if ('velocity',ci) in self.flowModel.ebq_global:
self.ebq_global[('velocity',ci)] = self.flowModel.ebq_global[('velocity',ci)]
self.q[('vol_frac',ci)] = self.flowModel.coefficients.q[('vol_frac',ci)]
self.ebqe[('vol_frac',ci)] = self.flowModel.coefficients.ebqe[('vol_frac',ci)]
def evaluateVelocity(self,t,c):
if self.velocityFunctions is not None:
for ci in range(self.nc):
if len(c['x'].shape) == 3:
for i in range(c['x'].shape[0]):
for j in range(c['x'].shape[1]):
c[('velocity',ci)][i,j,:] = self.velocityFunctions[ci](c['x'][i,j],t)
elif len(c['x'].shape) == 4:
for i in range(c['x'].shape[0]):
for j in range(c['x'].shape[1]):
for k in range(c['x'].shape[2]):
c[('velocity',ci)][i,j,k,:] = self.velocityFunctions[ci](c['x'][i,j,k],t)
def evaluate(self,t,c):
# TODO
# evaluate velocity is currently setting ebqe when c=q but need to make sure this is done
# before evaluate is called with c=ebqe
#mwf debug
#import pdb
#pdb.set_trace()
if self.velocityFunctions is not None:
self.evaluateVelocity(t,c)
#
for ci in range(self.nc):
if self.q[('velocity',ci)].shape == c[('df',ci,ci)].shape:
v = self.q[('velocity',ci)]
materialTypes = self.materialTypes_q
vol_frac = self.q[('vol_frac',ci)]
elif self.ebqe[('velocity',ci)].shape == c[('df',ci,ci)].shape:
v = self.ebqe[('velocity',ci)]
materialTypes = self.materialTypes_ebqe
vol_frac = self.ebqe[('vol_frac',ci)]
elif self.ebq[('velocity',ci)].shape == c[('df',ci,ci)].shape:
v = self.ebq[('velocity',ci)]
materialTypes = self.materialTypes_ebq
vol_frac = self.ebq[('vol_frac',ci)]
else:
print(c[('df',ci,ci)].shape)
print("no v---------------------")
raise RuntimeError
self.variablySaturatedGroundwaterTransportCoefficientsEvaluate_hetMat(self.d[ci],
materialTypes,
vol_frac,
self.alpha_L_types,
self.alpha_T_types,
v,
c[('u',ci)],
c[('m',ci)],
c[('dm',ci,ci)],
c[('f',ci)],
c[('df',ci,ci)],
c[('a',ci,ci)])
class VariablySaturatedGroundwaterEnergyTransportCoefficients(MultiphaseGroundwaterTransportCoefficients):
from proteus.ctransportCoefficients import variablySaturatedGroundwaterEnergyTransportCoefficientsEvaluate_hetMat
""" groundwater heat equation with coefficients varying by material
type and variable velocity """
def __init__(self,nc=1,nd=2,
density_w=998.2, #kg/m^3
density_n=1.205, #kg/m^3
specificHeat_w=0.04882, #W d / (kg K), 0.001172 W hr/g-C
specificHeat_n=0.01446, #W d / (kg K), 0.000347 W hr/g-C need to convert
omega_types=numpy.array([0.3]),
alpha_L_types=numpy.array([1.0]),
alpha_T_types=numpy.array([0.1]),
d=numpy.array([1.3e-9]),
density_s_types=numpy.array([2.73*998.2]), #kg/m^3
specificHeat_s_types=numpy.array([0.004167]), #W d / (kg K) 1.0e-4, W hr/g-C need to convert
lambda_sat_types=numpy.array([0.58]),#W/m-K need to convert
lambda_dry_types=numpy.array([0.3]),#W/m-K need to convert
lambda_ani_types=numpy.array([1.0,1.0,1.0]),
meModelId = 0,
flowModelId = None,
velocityFunctions = None):
MultiphaseGroundwaterTransportCoefficients.__init__(self,nc=nc,nd=nd,
omega_types=omega_types,
alpha_L_types=alpha_L_types,
alpha_T_types=alpha_T_types,
d=d,
meModelId = meModelId,
flowModelId = flowModelId,
velocityFunctions = velocityFunctions)
self.density_w = density_w
self.density_n = density_n
self.specificHeat_w = specificHeat_w
self.specificHeat_n = specificHeat_n
self.density_s_types = density_s_types
self.specificHeat_s_types = specificHeat_s_types
self.lambda_sat_types = lambda_sat_types
self.lambda_dry_types = lambda_dry_types
self.lambda_ani_types = lambda_ani_types
self.nd = nd
def evaluate(self,t,c):
# TODO
# evaluate velocity is currently setting ebqe when c=q but need to make sure this is done
# before evaluate is called with c=ebqe
#mwf debug
#import pdb
#pdb.set_trace()
if self.velocityFunctions is not None:
self.evaluateVelocity(t,c)
#
for ci in range(self.nc):
if self.q[('velocity',ci)].shape == c[('df',ci,ci)].shape:
v = self.q[('velocity',ci)]
materialTypes = self.materialTypes_q
vol_frac = self.q[('vol_frac',ci)]
elif self.ebqe[('velocity',ci)].shape == c[('df',ci,ci)].shape:
v = self.ebqe[('velocity',ci)]
materialTypes = self.materialTypes_ebqe
vol_frac = self.ebqe[('vol_frac',ci)]
elif self.ebq[('velocity',ci)].shape == c[('df',ci,ci)].shape:
v = self.ebq[('velocity',ci)]
materialTypes = self.materialTypes_ebq
vol_frac = self.ebq[('vol_frac',ci)]
else:
print(c[('df',ci,ci)].shape)
print("no v---------------------")
raise RuntimeError
self.variablySaturatedGroundwaterEnergyTransportCoefficientsEvaluate_hetMat(self.density_w,
self.density_n,
self.specificHeat_w,
self.specificHeat_n,
materialTypes,
vol_frac,
self.omega_types,
self.alpha_L_types,
self.alpha_T_types,
self.density_s_types,
self.specificHeat_s_types,
self.lambda_sat_types,
self.lambda_dry_types,
self.lambda_ani_types,
v,
c[('u',ci)],
c[('m',ci)],
c[('dm',ci,ci)],
c[('f',ci)],
c[('df',ci,ci)],
c[('a',ci,ci)])
|
{
"content_hash": "a9df32475b607d85542af2fe2865c3e8",
"timestamp": "",
"source": "github",
"line_count": 4859,
"max_line_length": 186,
"avg_line_length": 51.840913768265075,
"alnum_prop": 0.43284701959149646,
"repo_name": "erdc/proteus",
"id": "4b39f8269da1f9d5cae9e8cba57efb7453f7ac24",
"size": "251895",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "proteus/SubsurfaceTransportCoefficients.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2790"
},
{
"name": "Asymptote",
"bytes": "1569"
},
{
"name": "C",
"bytes": "2827957"
},
{
"name": "C++",
"bytes": "7262408"
},
{
"name": "Cython",
"bytes": "154607"
},
{
"name": "Dockerfile",
"bytes": "2738"
},
{
"name": "Fortran",
"bytes": "51671"
},
{
"name": "Jupyter Notebook",
"bytes": "33357"
},
{
"name": "Makefile",
"bytes": "19043"
},
{
"name": "Python",
"bytes": "12534530"
},
{
"name": "Roff",
"bytes": "322"
},
{
"name": "Shell",
"bytes": "14084"
}
],
"symlink_target": ""
}
|
from __future__ import division
import os
import subprocess
from tempfile import TemporaryFile, NamedTemporaryFile
import wave
import sys
try:
from StringIO import StringIO
except:
from io import StringIO, BytesIO
from .utils import (
_fd_or_path_or_tempfile,
db_to_float,
ratio_to_db,
get_encoder_name,
audioop,
)
from .exceptions import (
TooManyMissingFrames,
InvalidDuration,
InvalidID3TagVersion,
InvalidTag,
)
if sys.version_info >= (3, 0):
basestring = str
xrange = range
StringIO = BytesIO
AUDIO_FILE_EXT_ALIASES = {
"m4a": "mp4"
}
class AudioSegment(object):
"""
AudioSegments are *immutable* objects representing segments of audio
that can be manipulated using python code.
AudioSegments are slicable using milliseconds.
for example:
a = AudioSegment.from_mp3(mp3file)
first_second = a[:1000] # get the first second of an mp3
slice = a[5000:10000] # get a slice from 5 to 10 seconds of an mp3
"""
converter = get_encoder_name() # either ffmpeg or avconv
# TODO: remove in 1.0 release
# maintain backwards compatibility for ffmpeg attr (now called converter)
ffmpeg = property(lambda s: s.converter,
lambda s, v: setattr(s, 'converter', v))
DEFAULT_CODECS = {
"ogg": "libvorbis"
}
def __init__(self, data=None, *args, **kwargs):
if kwargs.get('metadata', False):
# internal use only
self._data = data
for attr, val in kwargs.pop('metadata').items():
setattr(self, attr, val)
else:
# normal construction
data = data if isinstance(data, basestring) else data.read()
raw = wave.open(StringIO(data), 'rb')
raw.rewind()
self.channels = raw.getnchannels()
self.sample_width = raw.getsampwidth()
self.frame_rate = raw.getframerate()
self.frame_width = self.channels * self.sample_width
raw.rewind()
self._data = raw.readframes(float('inf'))
super(AudioSegment, self).__init__(*args, **kwargs)
def __len__(self):
"""
returns the length of this audio segment in milliseconds
"""
return round(1000 * (self.frame_count() / self.frame_rate))
def __eq__(self, other):
try:
return self._data == other._data
except:
return False
def __ne__(self, other):
return not (self == other)
def __iter__(self):
return (self[i] for i in xrange(len(self)))
def __getitem__(self, millisecond):
if isinstance(millisecond, slice):
start = millisecond.start if millisecond.start is not None else 0
end = millisecond.stop if millisecond.stop is not None \
else len(self)
start = min(start, len(self))
end = min(end, len(self))
else:
start = millisecond
end = millisecond + 1
start = self._parse_position(start) * self.frame_width
end = self._parse_position(end) * self.frame_width
data = self._data[start:end]
# ensure the output is as long as the requester is expecting
expected_length = end - start
missing_frames = (expected_length - len(data)) // self.frame_width
if missing_frames:
if missing_frames > self.frame_count(ms=2):
raise TooManyMissingFrames(
"You should never be filling in "
" more than 2 ms with silence here, "
"missing frames: %s" % missing_frames)
silence = audioop.mul(data[:self.frame_width],
self.sample_width, 0)
data += (silence * missing_frames)
return self._spawn(data)
def get_sample_slice(self, start_sample=None, end_sample=None):
"""
Get a section of the audio segment by sample index.
NOTE: Negative indices do *not* address samples backword
from the end of the audio segment like a python list.
This is intentional.
"""
max_val = self.frame_count()
def bounded(val, default):
if val is None:
return default
if val < 0:
return 0
if val > max_val:
return max_val
return val
start_i = bounded(start_sample, 0) * self.frame_width
end_i = bounded(end_sample, max_val) * self.frame_width
data = self._data[start_i:end_i]
return self._spawn(data)
def __add__(self, arg):
if isinstance(arg, AudioSegment):
return self.append(arg, crossfade=0)
else:
return self.apply_gain(arg)
def __sub__(self, arg):
if isinstance(arg, AudioSegment):
raise TypeError("AudioSegment objects can't be subtracted from "
"each other")
else:
return self.apply_gain(-arg)
def __mul__(self, arg):
"""
If the argument is an AudioSegment, overlay the multiplied audio
segment.
If it's a number, just use the string multiply operation to repeat the
audio.
The following would return an AudioSegment that contains the
audio of audio_seg eight times
`audio_seg * 8`
"""
if isinstance(arg, AudioSegment):
return self.overlay(arg, position=0, loop=True)
else:
return self._spawn(data=self._data * arg)
def _spawn(self, data, overrides={}):
"""
Creates a new audio segment using the metadata from the current one
and the data passed in. Should be used whenever an AudioSegment is
being returned by an operation that would alters the current one,
since AudioSegment objects are immutable.
"""
# accept lists of data chunks
if isinstance(data, list):
data = b''.join(data)
# accept file-like objects
if hasattr(data, 'read'):
if hasattr(data, 'seek'):
data.seek(0)
data = data.read()
metadata = {
'sample_width': self.sample_width,
'frame_rate': self.frame_rate,
'frame_width': self.frame_width,
'channels': self.channels
}
metadata.update(overrides)
return AudioSegment(data=data, metadata=metadata)
@classmethod
def _sync(cls, seg1, seg2):
s1_len, s2_len = len(seg1), len(seg2)
channels = max(seg1.channels, seg2.channels)
seg1 = seg1.set_channels(channels)
seg2 = seg2.set_channels(channels)
frame_rate = max(seg1.frame_rate, seg2.frame_rate)
seg1 = seg1.set_frame_rate(frame_rate)
seg2 = seg2.set_frame_rate(frame_rate)
sample_width = max(seg1.sample_width, seg2.sample_width)
seg1 = seg1.set_sample_width(sample_width)
seg2 = seg2.set_sample_width(sample_width)
assert(len(seg1) == s1_len)
assert(len(seg2) == s2_len)
return seg1, seg2
def _parse_position(self, val):
if val < 0:
val = len(self) - abs(val)
val = self.frame_count(ms=len(self)) if val == float("inf") else \
self.frame_count(ms=val)
return int(val)
@classmethod
def empty(cls):
return cls(b'', metadata={
"channels": 1,
"sample_width": 1,
"frame_rate": 1,
"frame_width": 1
})
@classmethod
def silent(cls, duration=1000):
"""
Generate a silent audio segment.
duration specified in milliseconds (default: 1000ms).
"""
# lowest frame rate I've seen in actual use
frame_rate = 11025
frames = int(frame_rate * (duration / 1000.0))
data = b"\0\0" * frames
return cls(data, metadata={"channels": 1,
"sample_width": 2,
"frame_rate": frame_rate,
"frame_width": 2})
@classmethod
def from_file(cls, file, format=None):
file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False)
if format:
format = AUDIO_FILE_EXT_ALIASES.get(format, format)
if format == 'wav':
return cls.from_wav(file)
input_file = NamedTemporaryFile(mode='wb', delete=False)
input_file.write(file.read())
input_file.flush()
output = NamedTemporaryFile(mode="rb", delete=False)
convertion_command = [cls.converter,
'-y', # always overwrite existing files
]
# If format is not defined
# ffmpeg/avconv will detect it automatically
if format:
convertion_command += ["-f", format]
convertion_command += [
"-i", input_file.name, # input_file options (filename last)
"-vn", # Drop any video streams if there are any
"-f", "wav", # output options (filename last)
output.name
]
subprocess.call(convertion_command, stderr=open(os.devnull))
obj = cls.from_wav(output)
input_file.close()
output.close()
os.unlink(input_file.name)
os.unlink(output.name)
return obj
@classmethod
def from_mp3(cls, file):
return cls.from_file(file, 'mp3')
@classmethod
def from_flv(cls, file):
return cls.from_file(file, 'flv')
@classmethod
def from_ogg(cls, file):
return cls.from_file(file, 'ogg')
@classmethod
def from_wav(cls, file):
file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False)
file.seek(0)
return cls(data=file)
def export(self, out_f=None, format='mp3', codec=None, bitrate=None, parameters=None, tags=None, id3v2_version='4'):
"""
Export an AudioSegment to a file with given options
out_f (string):
Path to destination audio file
format (string)
Format for destination audio file.
('mp3', 'wav', 'ogg' or other ffmpeg/avconv supported files)
codec (string)
Codec used to encoding for the destination.
bitrate (string)
Bitrate used when encoding destination file. (128, 256, 312k...)
parameters (string)
Aditional ffmpeg/avconv parameters
tags (dict)
Set metadata information to destination files
usually used as tags. ({title='Song Title', artist='Song Artist'})
id3v2_version (string)
Set ID3v2 version for tags. (default: '4')
"""
id3v2_allowed_versions = ['3', '4']
out_f = _fd_or_path_or_tempfile(out_f, 'wb+')
out_f.seek(0)
# for wav output we can just write the data directly to out_f
if format == "wav":
data = out_f
else:
data = NamedTemporaryFile(mode="wb", delete=False)
wave_data = wave.open(data, 'wb')
wave_data.setnchannels(self.channels)
wave_data.setsampwidth(self.sample_width)
wave_data.setframerate(self.frame_rate)
# For some reason packing the wave header struct with
# a float in python 2 doesn't throw an exception
wave_data.setnframes(int(self.frame_count()))
wave_data.writeframesraw(self._data)
wave_data.close()
# for wav files, we're done (wav data is written directly to out_f)
if format == 'wav':
return out_f
output = NamedTemporaryFile(mode="w+b", delete=False)
# build converter command to export
convertion_command = [
self.converter,
'-y', # always overwrite existing files
"-f", "wav", "-i", data.name, # input options (filename last)
]
if codec is None:
codec = self.DEFAULT_CODECS.get(format, None)
if codec is not None:
# force audio encoder
convertion_command.extend(["-acodec", codec])
if bitrate is not None:
convertion_command.extend(["-b:a", bitrate])
if parameters is not None:
# extend arguments with arbitrary set
convertion_command.extend(parameters)
if tags is not None:
if not isinstance(tags, dict):
raise InvalidTag("Tags must be a dictionary.")
else:
# Extend converter command with tags
# print(tags)
for key, value in tags.items():
convertion_command.extend(
['-metadata', '{0}={1}'.format(key, value)])
if format == 'mp3':
# set id3v2 tag version
if id3v2_version not in id3v2_allowed_versions:
raise InvalidID3TagVersion(
"id3v2_version not allowed, allowed versions: %s" % id3v2_allowed_versions)
convertion_command.extend([
"-id3v2_version", id3v2_version
])
convertion_command.extend([
"-f", format, output.name, # output options (filename last)
])
# read stdin / write stdout
subprocess.call(convertion_command,
# make converter shut up
stderr=open(os.devnull)
)
output.seek(0)
out_f.write(output.read())
data.close()
output.close()
os.unlink(data.name)
os.unlink(output.name)
out_f.seek(0)
return out_f
def get_frame(self, index):
frame_start = index * self.frame_width
frame_end = frame_start + self.frame_width
return self._data[frame_start:frame_end]
def frame_count(self, ms=None):
"""
returns the number of frames for the given number of milliseconds, or
if not specified, the number of frames in the whole AudioSegment
"""
if ms is not None:
return ms * (self.frame_rate / 1000.0)
else:
return float(len(self._data) // self.frame_width)
def set_sample_width(self, sample_width):
if sample_width == self.sample_width:
return self
data = self._data
if self.sample_width == 1:
data = audioop.bias(data, 1, -128)
if data:
data = audioop.lin2lin(data, self.sample_width, sample_width)
if sample_width == 1:
data = audioop.bias(data, 1, 128)
frame_width = self.channels * sample_width
return self._spawn(data, overrides={'sample_width': sample_width,
'frame_width': frame_width})
def set_frame_rate(self, frame_rate):
if frame_rate == self.frame_rate:
return self
if self._data:
converted, _ = audioop.ratecv(self._data, self.sample_width,
self.channels, self.frame_rate,
frame_rate, None)
else:
converted = self._data
return self._spawn(data=converted,
overrides={'frame_rate': frame_rate})
def set_channels(self, channels):
if channels == self.channels:
return self
if channels == 2 and self.channels == 1:
fn = audioop.tostereo
frame_width = self.frame_width * 2
elif channels == 1 and self.channels == 2:
fn = audioop.tomono
frame_width = self.frame_width // 2
converted = fn(self._data, self.sample_width, 1, 1)
return self._spawn(data=converted,
overrides={
'channels': channels,
'frame_width': frame_width})
@property
def rms(self):
if self.sample_width == 1:
return self.set_sample_width(2).rms
else:
return audioop.rms(self._data, self.sample_width)
@property
def dBFS(self):
rms = self.rms
if not rms:
return - float("infinity")
return ratio_to_db(self.rms / self.max_possible_amplitude)
@property
def max(self):
return audioop.max(self._data, self.sample_width)
@property
def max_possible_amplitude(self):
bits = self.sample_width * 8
max_possible_val = (2 ** bits)
# since half is above 0 and half is below the max amplitude is divided
return max_possible_val / 2
@property
def duration_seconds(self):
return self.frame_rate and self.frame_count() / self.frame_rate or 0.0
def apply_gain(self, volume_change):
return self._spawn(data=audioop.mul(self._data, self.sample_width,
db_to_float(float(volume_change))))
def overlay(self, seg, position=0, loop=False):
output = TemporaryFile()
seg1, seg2 = AudioSegment._sync(self, seg)
sample_width = seg1.sample_width
spawn = seg1._spawn
output.write(seg1[:position]._data)
# drop down to the raw data
seg1 = seg1[position:]._data
seg2 = seg2._data
pos = 0
seg1_len = len(seg1)
seg2_len = len(seg2)
while True:
remaining = max(0, seg1_len - pos)
if seg2_len >= remaining:
seg2 = seg2[:remaining]
seg2_len = remaining
loop = False
output.write(audioop.add(seg1[pos:pos + seg2_len], seg2,
sample_width))
pos += seg2_len
if not loop:
break
output.write(seg1[pos:])
return spawn(data=output)
def append(self, seg, crossfade=100):
output = TemporaryFile()
seg1, seg2 = AudioSegment._sync(self, seg)
if not crossfade:
return seg1._spawn(seg1._data + seg2._data)
xf = seg1[-crossfade:].fade(to_gain=-120, start=0, end=float('inf'))
xf *= seg2[:crossfade].fade(from_gain=-120, start=0, end=float('inf'))
output.write(seg1[:-crossfade]._data)
output.write(xf._data)
output.write(seg2[crossfade:]._data)
output.seek(0)
return seg1._spawn(data=output)
def fade(self, to_gain=0, from_gain=0, start=None, end=None,
duration=None):
"""
Fade the volume of this audio segment.
to_gain (float):
resulting volume_change in db
start (int):
default = beginning of the segment
when in this segment to start fading in milliseconds
end (int):
default = end of the segment
when in this segment to start fading in milliseconds
duration (int):
default = until the end of the audio segment
the duration of the fade
"""
if None not in [duration, end, start]:
raise TypeError('Only two of the three arguments, "start", '
'"end", and "duration" may be specified')
# no fade == the same audio
if to_gain == 0 and from_gain == 0:
return self
start = min(len(self), start) if start is not None else None
end = min(len(self), end) if end is not None else None
if start is not None and start < 0:
start += len(self)
if end is not None and end < 0:
end += len(self)
if duration is not None and duration < 0:
raise InvalidDuration("duration must be a positive integer")
if duration:
if start is not None:
end = start + duration
elif end is not None:
start = end - duration
else:
duration = end - start
from_power = db_to_float(from_gain)
output = []
# original data - up until the crossfade portion, as is
before_fade = self[:start]._data
if from_gain != 0:
before_fade = audioop.mul(before_fade,
self.sample_width,
from_power)
output.append(before_fade)
gain_delta = db_to_float(to_gain) - from_power
# fades longer than 100ms can use coarse fading (one gain step per ms),
# shorter fades will have audible clicks so they use precise fading
#(one gain step per sample)
if duration > 100:
scale_step = gain_delta / duration
for i in range(duration):
volume_change = from_power + (scale_step * i)
chunk = self[start + i]
chunk = audioop.mul(chunk._data,
self.sample_width,
volume_change)
output.append(chunk)
else:
start_frame = self.frame_count(ms=start)
end_frame = self.frame_count(ms=end)
fade_frames = end_frame - start_frame
scale_step = gain_delta / fade_frames
for i in range(int(fade_frames)):
volume_change = from_power + (scale_step * i)
sample = self.get_frame(int(start_frame + i))
sample = audioop.mul(sample, self.sample_width, volume_change)
output.append(sample)
# original data after the crossfade portion, at the new volume
after_fade = self[end:]._data
if to_gain != 0:
after_fade = audioop.mul(after_fade,
self.sample_width,
db_to_float(to_gain))
output.append(after_fade)
return self._spawn(data=output)
def fade_out(self, duration):
return self.fade(to_gain=-120, duration=duration, end=float('inf'))
def fade_in(self, duration):
return self.fade(from_gain=-120, duration=duration, start=0)
def reverse(self):
return self._spawn(
data=audioop.reverse(self._data, self.sample_width)
)
from . import effects
|
{
"content_hash": "0b3fc4bcd79e0b5ff39823a2bad3c007",
"timestamp": "",
"source": "github",
"line_count": 703,
"max_line_length": 120,
"avg_line_length": 31.86628733997155,
"alnum_prop": 0.5469600928488528,
"repo_name": "joshuaMarple/phase-vocoder",
"id": "18ba73860987bd821d65466f4064abc095806369",
"size": "22402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/pydub/audio_segment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "127562"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import responses
from six.moves.urllib.parse import parse_qs
from sentry.utils import json
from sentry.models import Integration
from sentry.testutils.cases import RuleTestCase
from sentry.integrations.slack import SlackNotifyServiceAction
class SlackNotifyActionTest(RuleTestCase):
rule_cls = SlackNotifyServiceAction
def setUp(self):
event = self.get_event()
self.integration = Integration.objects.create(
provider="slack",
name="Awesome Team",
external_id="TXXXXXXX1",
metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"},
)
self.integration.add_organization(event.project.organization, self.user)
def assert_form_valid(self, form, expected_channel_id, expected_channel):
assert form.is_valid()
assert form.cleaned_data["channel_id"] == expected_channel_id
assert form.cleaned_data["channel"] == expected_channel
@responses.activate
def test_upgrade_notice_workspace_app(self):
event = self.get_event()
rule = self.get_rule(data={"workspace": self.integration.id, "channel": "#my-channel"})
results = list(rule.after(event=event, state=self.get_state()))
assert len(results) == 1
responses.add(
method=responses.POST,
url="https://slack.com/api/chat.postMessage",
body='{"ok": true}',
status=200,
content_type="application/json",
)
# Trigger rule callback
results[0].callback(event, futures=[])
data = parse_qs(responses.calls[0].request.body)
assert "attachments" in data
attachments = json.loads(data["attachments"][0])
# all workspaces apps should have the upgrade notice
assert len(attachments) == 2
assert attachments[1]["title"] == event.title
@responses.activate
def test_no_upgrade_notice_bot_app(self):
self.integration.metadata.update(
{
"installation_type": "born_as_bot",
"access_token": "xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
}
)
self.integration.save()
event = self.get_event()
rule = self.get_rule(data={"workspace": self.integration.id, "channel": "#my-channel"})
results = list(rule.after(event=event, state=self.get_state()))
assert len(results) == 1
responses.add(
method=responses.POST,
url="https://slack.com/api/chat.postMessage",
body='{"ok": true}',
status=200,
content_type="application/json",
)
# Trigger rule callback
results[0].callback(event, futures=[])
data = parse_qs(responses.calls[0].request.body)
assert "attachments" in data
attachments = json.loads(data["attachments"][0])
assert len(attachments) == 1
assert attachments[0]["title"] == event.title
def test_render_label(self):
rule = self.get_rule(
data={"workspace": self.integration.id, "channel": "#my-channel", "tags": "one, two"}
)
assert (
rule.render_label()
== "Send a notification to the Awesome Team Slack workspace to #my-channel and show tags [one, two] in notification"
)
def test_render_label_without_integration(self):
self.integration.delete()
rule = self.get_rule(
data={"workspace": self.integration.id, "channel": "#my-channel", "tags": ""}
)
label = rule.render_label()
assert (
label
== "Send a notification to the [removed] Slack workspace to #my-channel and show tags [] in notification"
)
@responses.activate
def test_valid_channel_selected(self):
rule = self.get_rule(
data={"workspace": self.integration.id, "channel": "#my-channel", "tags": ""}
)
channels = {
"ok": "true",
"channels": [
{"name": "my-channel", "id": "chan-id"},
{"name": "other-chann", "id": "chan-id"},
],
}
responses.add(
method=responses.GET,
url="https://slack.com/api/channels.list",
status=200,
content_type="application/json",
body=json.dumps(channels),
)
form = rule.get_form_instance()
assert form.is_valid()
self.assert_form_valid(form, "chan-id", "#my-channel")
@responses.activate
def test_valid_private_channel_selected(self):
rule = self.get_rule(
data={"workspace": self.integration.id, "channel": "#my-private-channel", "tags": ""}
)
channels = {
"ok": "true",
"channels": [
{"name": "my-channel", "id": "chan-id"},
{"name": "other-chann", "id": "chan-id"},
],
}
responses.add(
method=responses.GET,
url="https://slack.com/api/channels.list",
status=200,
content_type="application/json",
body=json.dumps(channels),
)
groups = {"ok": "true", "groups": [{"name": "my-private-channel", "id": "chan-id"}]}
responses.add(
method=responses.GET,
url="https://slack.com/api/groups.list",
status=200,
content_type="application/json",
body=json.dumps(groups),
)
form = rule.get_form_instance()
self.assert_form_valid(form, "chan-id", "#my-private-channel")
@responses.activate
def test_valid_member_selected(self):
rule = self.get_rule(
data={"workspace": self.integration.id, "channel": "@morty", "tags": ""}
)
channels = {
"ok": "true",
"channels": [
{"name": "my-channel", "id": "chan-id"},
{"name": "other-chann", "id": "chan-id"},
],
}
responses.add(
method=responses.GET,
url="https://slack.com/api/channels.list",
status=200,
content_type="application/json",
body=json.dumps(channels),
)
groups = {"ok": "true", "groups": [{"name": "my-private-channel", "id": "chan-id"}]}
responses.add(
method=responses.GET,
url="https://slack.com/api/groups.list",
status=200,
content_type="application/json",
body=json.dumps(groups),
)
members = {
"ok": "true",
"members": [
{"name": "morty", "id": "morty-id"},
{"name": "other-user", "id": "user-id"},
],
}
responses.add(
method=responses.GET,
url="https://slack.com/api/users.list",
status=200,
content_type="application/json",
body=json.dumps(members),
)
form = rule.get_form_instance()
self.assert_form_valid(form, "morty-id", "@morty")
@responses.activate
def test_invalid_channel_selected(self):
rule = self.get_rule(
data={"workspace": self.integration.id, "channel": "#my-channel", "tags": ""}
)
channels = {"ok": "true", "channels": [{"name": "other-chann", "id": "chan-id"}]}
responses.add(
method=responses.GET,
url="https://slack.com/api/channels.list",
status=200,
content_type="application/json",
body=json.dumps(channels),
)
groups = {"ok": "true", "groups": [{"name": "my-private-channel", "id": "chan-id"}]}
responses.add(
method=responses.GET,
url="https://slack.com/api/groups.list",
status=200,
content_type="application/json",
body=json.dumps(groups),
)
members = {"ok": "true", "members": [{"name": "other-member", "id": "member-id"}]}
responses.add(
method=responses.GET,
url="https://slack.com/api/users.list",
status=200,
content_type="application/json",
body=json.dumps(members),
)
form = rule.get_form_instance()
assert not form.is_valid()
assert len(form.errors) == 1
def test_channel_id_provided(self):
rule = self.get_rule(
data={
"workspace": self.integration.id,
"channel": "#my-channel",
"input_channel_id": "C2349874",
"tags": "",
}
)
form = rule.get_form_instance()
assert form.is_valid()
def test_invalid_workspace(self):
# the workspace _should_ be the integration id
rule = self.get_rule(data={"workspace": "unknown", "channel": "#my-channel", "tags": ""})
form = rule.get_form_instance()
assert not form.is_valid()
assert [u"Slack workspace is a required field."] in form.errors.values()
|
{
"content_hash": "3d04bbe474d51646194eb38b92e5e88a",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 128,
"avg_line_length": 31.510344827586206,
"alnum_prop": 0.5383015977237907,
"repo_name": "beeftornado/sentry",
"id": "78ac9bf7250ca5df3d30d4b2e73171b0c50f120e",
"size": "9138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/integrations/slack/test_notify_action.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
}
|
from decimal import Decimal
from dateutil.parser import parse
LITERAL = {'false': False, 'true': True}
def compatible_bool(value):
"""it's used to reqparser.RequestParser
Example:
>>> from flask_restful import reqparse
>>> parser = reqparse.RequestParser()
>>> parser.add_argument('marriaged', type=compatible_bool, help='accept false and true string')
:param value:
:return:
"""
if isinstance(value, basestring):
json_value = value.lower()
if json_value in LITERAL:
return LITERAL.get(json_value)
return bool(value)
def compatible_datetime(value, name):
return parse(value)
def compatible_decimal(value):
try:
return Decimal(value)
except Exception:
return None
|
{
"content_hash": "6ed7eef5a52fb4e4fd31c42d72a175ff",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 103,
"avg_line_length": 24.636363636363637,
"alnum_prop": 0.6223862238622386,
"repo_name": "by46/flask-kits",
"id": "ec019aa0b4d1713b0a11b8cb0187ba0e2d7d99b5",
"size": "813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_kits/restful/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43032"
},
{
"name": "Shell",
"bytes": "694"
}
],
"symlink_target": ""
}
|
def atan2_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
"""
Args:
grad_inputs (list of :obj:`nnabla.Variable`): Propagated grads to this backward function.
inputs (list of :obj:`nnabla.Variable` and None): Input Variables of the forward function
if this backward function depends on it. Otherwise, None is set instead.
input_shapes (list of tuple of :obj:`int`): Input shapes of the forward function.
The shapes of the inputs in which None is set can be passed.
outputs (list of :obj:`nnabla.Variable` and None): Output Variables of the forward function
if this backward function depends on it. Otherwise, None is set instead.
output_shapes (list of tuple of :obj:`int`): Output shapes of the forward function.
The shapes of the outputs in which None is set can be passed.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dz = grad_inputs[0]
y = inputs[0]
x = inputs[1]
r2 = x ** 2 + y ** 2
dy = dz * x / r2
dx = - dz * y / r2
return dy, dx
|
{
"content_hash": "cb1129d70112cdd221edc91466f63db1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 97,
"avg_line_length": 50.625,
"alnum_prop": 0.6674897119341564,
"repo_name": "sony/nnabla",
"id": "0ef1e1b29a5fefd1a2c71769ee69c2bea0b57272",
"size": "1849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/nnabla/backward_function/atan2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "25938"
},
{
"name": "C++",
"bytes": "2590231"
},
{
"name": "CMake",
"bytes": "35358"
},
{
"name": "Cython",
"bytes": "180959"
},
{
"name": "Dockerfile",
"bytes": "5431"
},
{
"name": "Jupyter Notebook",
"bytes": "540006"
},
{
"name": "Makefile",
"bytes": "24294"
},
{
"name": "Python",
"bytes": "5311538"
},
{
"name": "Shell",
"bytes": "4750"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Card.price_low'
db.add_column(u'cards_card', 'price_low',
self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=10, decimal_places=2),
keep_default=False)
# Adding field 'Card.price_med'
db.add_column(u'cards_card', 'price_med',
self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=10, decimal_places=2),
keep_default=False)
# Adding field 'Card.price_high'
db.add_column(u'cards_card', 'price_high',
self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=10, decimal_places=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Card.price_low'
db.delete_column(u'cards_card', 'price_low')
# Deleting field 'Card.price_med'
db.delete_column(u'cards_card', 'price_med')
# Deleting field 'Card.price_high'
db.delete_column(u'cards_card', 'price_high')
models = {
u'cards.card': {
'Meta': {'object_name': 'Card'},
'condition': ('django.db.models.fields.CharField', [], {'default': "'M'", 'max_length': '2'}),
'edition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cards.Edition']"}),
'foil': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiverse_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'price_high': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '10', 'decimal_places': '2'}),
'price_low': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '10', 'decimal_places': '2'}),
'price_med': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '10', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'cards.edition': {
'Meta': {'object_name': 'Edition'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'set_id': ('django.db.models.fields.CharField', [], {'max_length': '6'})
}
}
complete_apps = ['cards']
|
{
"content_hash": "5eb89de524a193c1ea8dd097e1fde2c7",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 134,
"avg_line_length": 47.66101694915254,
"alnum_prop": 0.5622332859174964,
"repo_name": "EdSalisbury/mtgstore",
"id": "04d3521d93a2caa22b76b0e142ff19686bc6789c",
"size": "2836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cards/migrations/0005_auto__add_field_card_price_low__add_field_card_price_med__add_field_ca.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1597"
},
{
"name": "JavaScript",
"bytes": "140899"
},
{
"name": "Python",
"bytes": "45492"
}
],
"symlink_target": ""
}
|
"""
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one relation:
``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
2. Related instance on the forward side of a one-to-one
relation: ``ForwardOneToOneDescriptor``.
It avoids querying the database when accessing the parent link field in
a multi-table inheritance scenario.
3. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
4. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
5. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from __future__ import unicode_literals
import warnings
from operator import attrgetter
from django.db import connections, router, transaction
from django.db.models import Q, signals
from django.db.models.query import QuerySet
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property
class ForwardManyToOneDescriptor(object):
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one (via ForwardOneToOneDescriptor subclass) relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.model` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.remote_field.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
related_model = self.field.remote_field.model
if getattr(related_model._default_manager, 'use_for_related_fields', False):
if not getattr(related_model._default_manager, 'silence_use_for_related_fields_deprecation', False):
warnings.warn(
"use_for_related_fields is deprecated, instead "
"set Meta.base_manager_name on '{}'.".format(related_model._meta.label),
RemovedInDjango20Warning, 2
)
manager = related_model._default_manager
else:
manager = related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def get_object(self, instance):
qs = self.get_queryset(instance=instance)
# Assuming the database enforces foreign keys, this won't fail.
return qs.get(self.field.get_reverse_related_filter(instance))
def __get__(self, instance, cls=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
rel_obj = self.get_object(instance)
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not self.field.remote_field.multiple:
setattr(rel_obj, self.field.remote_field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` is the ``parent`` instance on the right of the equal sign
"""
# An object must be an instance of the related class.
if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.remote_field.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.remote_field.get_cache_name(), instance)
class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor):
"""
Accessor to the related object on the forward side of a one-to-one relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance.
"""
def get_object(self, instance):
if self.field.remote_field.parent_link:
deferred = instance.get_deferred_fields()
# Because it's a parent link, all the data is available in the
# instance, so populate the parent model with this data.
rel_model = self.field.remote_field.model
fields = [field.attname for field in rel_model._meta.concrete_fields]
# If any of the related model's fields are deferred, fallback to
# fetching all fields from the related model. This avoids a query
# on the related model for every deferred field.
if not any(field in fields for field in deferred):
kwargs = {field: getattr(instance, field) for field in fields}
return rel_model(**kwargs)
return super(ForwardOneToOneDescriptor, self).get_object(instance)
class ReverseOneToOneDescriptor(object):
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
related_model = self.related.related_model
if getattr(related_model._default_manager, 'use_for_related_fields', False):
if not getattr(related_model._default_manager, 'silence_use_for_related_fields_deprecation', False):
warnings.warn(
"use_for_related_fields is deprecated, instead "
"set Meta.base_manager_name on '{}'.".format(related_model._meta.label),
RemovedInDjango20Warning, 2
)
manager = related_model._default_manager
else:
manager = related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
def instance_attr(obj):
return obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, cls=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``cls`` is the ``Place`` class (unused)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` is the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
if value is None:
# Update the cached related instance (if any) & clear the cache.
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
pass
else:
delattr(instance, self.cache_name)
setattr(rel_obj, self.related.field.name, None)
elif not isinstance(value, self.related.related_model):
# An object must be an instance of the related class.
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseManyToOneDescriptor(object):
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model
return create_reverse_many_to_one_manager(
related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, cls=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``cls`` is the ``Parent`` class (unused)
"""
if instance is None:
return self
return self.related_manager_cls(instance)
def _get_set_deprecation_msg_params(self):
return ( # RemovedInDjango20Warning
'reverse side of a related set',
self.rel.get_accessor_name(),
)
def __set__(self, instance, value):
"""
Set the related objects through the reverse relation.
With the example above, when setting ``parent.children = children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``value`` is the ``children`` sequence on the right of the equal sign
"""
warnings.warn(
'Direct assignment to the %s is deprecated due to the implicit '
'save() that happens. Use %s.set() instead.' % self._get_set_deprecation_msg_params(),
RemovedInDjango20Warning, stacklevel=2,
)
manager = self.__get__(instance)
manager.set(value)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return queryset.none()
queryset._known_related_objects = {self.field: {self.instance.pk: self.instance}}
return queryset
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.field.related_query_name())
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.related_query_name()]
except (AttributeError, KeyError):
queryset = super(RelatedManager, self).get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs, **kwargs):
self._remove_prefetched_objects()
bulk = kwargs.pop('bulk', True)
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``pizza.toppings`` and ``topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super(ManyToManyDescriptor, self).__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
related_model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def _get_set_deprecation_msg_params(self):
return ( # RemovedInDjango20Warning
'%s side of a many-to-many set' % ('reverse' if self.reverse else 'forward'),
self.rel.get_accessor_name() if self.reverse else self.field.name,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super(ManyRelatedManager, self).__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super(ManyRelatedManager, self).get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(
sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
clear.alters_data = True
def set(self, objs, **kwargs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (
self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj
)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(
sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db,
)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(
sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db,
)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(
sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
return ManyRelatedManager
|
{
"content_hash": "5711e175528f5582358a9a049ed94b8a",
"timestamp": "",
"source": "github",
"line_count": 1149,
"max_line_length": 119,
"avg_line_length": 44.15839860748477,
"alnum_prop": 0.5757617564744373,
"repo_name": "erikr/django",
"id": "9b693ed19606029d44871da71bccd70390420847",
"size": "50738",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/models/fields/related_descriptors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53169"
},
{
"name": "HTML",
"bytes": "173592"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12192494"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
r"""Worker Script for generating scenes from the Semistatic-KLEVR dataset."""
# pylint: disable=logging-format-interpolation
import collections
import json
import logging
from typing import Dict, List, Optional, Tuple
import kubric as kb
import kubric.renderer.blender
import kubric.simulator.pybullet
import numpy as np
import tensorflow as tf
import bpy
# --- Some configuration values
# the region in which to place objects [(min), (max)]
# TODO(atagliasacchi): SPAWN_REGION should be specified by flags?
SPAWN_REGION = [(-4, -4, 0), (4, 4, 3)]
# the names of the KuBasic assets to use (WARNING: no "Suzanne" and "Sponge")
OBJECT_TYPES = [
"cube",
"cylinder",
"sphere",
"cone",
"torus",
"gear",
"torusknot",
"spot",
"teapot",
]
# the set of colors to sample from
COLORS = collections.OrderedDict({
"blue": kb.Color(42 / 255, 75 / 255, 215 / 255),
"brown": kb.Color(129 / 255, 74 / 255, 25 / 255),
"cyan": kb.Color(41 / 255, 208 / 255, 208 / 255),
"gray": kb.Color(87 / 255, 87 / 255, 87 / 255),
"green": kb.Color(29 / 255, 105 / 255, 20 / 255),
"purple": kb.Color(129 / 255, 38 / 255, 192 / 255),
"red": kb.Color(173 / 255, 35 / 255, 35 / 255),
"yellow": kb.Color(255 / 255, 238 / 255, 5 / 255),
})
# Valid options when choosing colors by flags.
COLOR_STRATEGIES = ["clevr", "uniform", "black", "gray", "white"]
# the sizes of objects to sample from
SIZES = {
"small": 0.7,
"large": 1.4,
}
# Valid options when choosing sizes by flags.
SIZE_STRATEGIES = ["clevr", "uniform", "const"]
# Percentage of frames that fall into the training split.
_TRAIN_TEST_RATIO = 0.7
def parse_flags():
"""ArgumentParser flags parsing."""
# --- CLI arguments
parser = kb.ArgumentParser()
# parser = argparse_flags.ArgumentParser()
parser.add_argument("--num_objects_static", type=int, default=5)
parser.add_argument("--num_objects_dynamic", type=int, default=3)
parser.add_argument(
"--object_types",
nargs="+",
default=["cube", "cylinder", "sphere", "torus", "gear"],
choices=["ALL"] + OBJECT_TYPES)
parser.add_argument(
"--object_colors", choices=COLOR_STRATEGIES, default="clevr")
parser.add_argument(
"--object_sizes", choices=SIZE_STRATEGIES, default="clevr")
parser.add_argument("--floor_color", choices=COLOR_STRATEGIES, default="gray")
parser.add_argument("--floor_friction", type=float, default=0.3)
parser.add_argument(
"--background_color", choices=COLOR_STRATEGIES, default="white")
parser.add_argument("--norender", action="store_true")
parser.add_argument(
"--assets_path", type=str, default="gs://kubric-public/assets/KuBasic.json")
parser.add_argument(
"--jitter", type=bool, default=False)
parser.set_defaults(
# Default is 1 for optical flow (unused here)
frame_start=1,
frame_end=301, # Controls number of rendered frames per scene.
frame_rate=12,
resolution=(256, 256),
)
return parser.parse_args()
def _sample_color(
strategy: str,
rng: np.random.RandomState,
) -> Tuple[Optional[str], kb.Color]:
"""Samples an object's color.
Args:
strategy: Sampling strategy.
rng: Source of randomness.
Returns:
color_label: Human-readable description of "color".
color: Kubric color for this object.
"""
# Note: If you add an option here, update COLOR_STRATEGIES as well.
if strategy in ["black", "gray", "white"]:
return strategy, kb.get_color(strategy)
elif strategy == "clevr":
color_label = rng.choice(list(COLORS.keys()))
return color_label, COLORS[color_label]
elif strategy == "uniform":
return None, kb.random_hue_color(rng=rng)
else:
raise ValueError(f"Unknown color sampling strategy {strategy}")
def _sample_sizes(
strategy: str,
rng: np.random.RandomState,
) -> Tuple[Optional[str], float]:
"""Samples an object's size.
Args:
strategy: Sampling strategy.
rng: Source of randomness.
Returns:
size_label: Human-readable description of "size".
size: Size multiplier. Use as object scale.
"""
if strategy == "clevr":
size_label = rng.choice(list(SIZES.keys()))
size = SIZES[size_label]
return size_label, size
elif strategy == "uniform":
return None, rng.uniform(0.7, 1.4)
elif strategy == "const":
return None, 1
else:
raise ValueError(f"Unknown size sampling strategy {strategy}")
def _get_scene_boundaries(
floor: kb.Cube,
spawn_region: List[Tuple[float, float, float]],
) -> Dict[str, List[int]]:
"""Constructs bounding box for all scene-varying content.
This bounding box ostensibly includes the objects themselves and their
shadows. Due to complexity, a guarantee on this cannot be made. The logic
here is best-effort.
Args:
floor: Cube representing the scene's floor. The cube is axis-aligned with
its top surface lying below all objects in the scene.
spawn_region: Lower and upper boundaries of a 3D axis-aligned box, inside
which all foreground objects are guaranteed to lie within.
Returns:
Dict with two entries, "min" and "max". These entries are 3-element lists
containing the lower and upper boundaries of a 3D axis-aligned bounding
box.
"""
spawn_min, spawn_max = np.asarray(spawn_region)
if not np.allclose(floor.aabbox[1][2], spawn_min[2]):
raise ValueError("Top of floor and bottom of bounding box be identical.")
# Define some amount of buffer volume to the scene's bounding box to
# encourage shadows on the floor to lie within the scene bounding box.
boundary_epsilon = 0.1
scene_min = spawn_min - boundary_epsilon
scene_max = spawn_max + boundary_epsilon
return {
"min": list(scene_min),
"max": list(scene_max),
}
def _get_floor_scale_position_kwargs(
spawn_region: List[Tuple[int, int, int]],
) -> Dict[str, Tuple[float, float, float]]:
"""Constructs scale, position of the cube representing the floor.
Cube's scale and position are chosen such that the cube's top surface lies
strictly inside of spawn_region.
Args:
spawn_region: Region of the floor.
Returns:
{'scale': <XYZ multipliers for [-1, 1] cube>,
'position': <XYZ center of floor cube>}
"""
spawn_min, spawn_max = np.array(spawn_region)
# Center of spawn position.
position = (spawn_max + spawn_min) / 2
# Set center of floor to be equal to bottom of spawn region.
position[-1] = spawn_min[-1]
# Default cube has coordinates [[-1, -1, -1], [1, 1, 1]], so default cube
# length is 2. So we scale floor by 2.
scale = (spawn_max - spawn_min) / 2
scale[-1] = 1e-8 # Floor height is minimal.
return dict(
position=tuple(position),
scale=tuple(scale),
)
def main(flags) -> None:
if "ALL" in flags.object_types:
objects_types = OBJECT_TYPES
else:
objects_types = flags.object_types
# --- Common setups & resources
kb.setup_logging(flags.logging_level)
kb.log_my_flags(flags)
scratch_dir, output_dir = kb.setup_directories(flags)
if flags.seed is None:
raise ValueError("You must specify --seed to proceed.")
seed = flags.seed
rng = np.random.RandomState(seed=seed)
scene = kb.Scene.from_flags(flags)
simulator = kubric.simulator.pybullet.PyBullet(scene, scratch_dir)
renderer = kubric.renderer.blender.Blender(
scene, scratch_dir,
adaptive_sampling=False) # Removes salt-and-pepper artifacts in shadows.
logging.info("Loading assets from %s", flags.assets_path)
kubasic = kb.AssetSource.from_manifest(flags.assets_path)
# --- Populate the scene
logging.info("Creating a large cube as the floor...")
# Create the "floor" of the scene. This is a large, flat cube of
# x_length=200, y_length=200 and z_length=2 centered at x=0, y=0, z=-1.
# This means that the floor's elevation is z + z_length/2 == -1 + 1 == 0.
_, floor_color = _sample_color(flags.floor_color, rng)
floor_material = kb.PrincipledBSDFMaterial(
color=floor_color, roughness=1., specular=0.)
floor = kb.Cube(
**_get_floor_scale_position_kwargs(SPAWN_REGION),
material=floor_material,
friction=flags.floor_friction,
static=True,
background=True)
scene.add(floor)
# Set background color.
_, background_color = _sample_color(flags.background_color, rng)
logging.info("Setting background color to %s...", background_color)
scene.background = background_color
logging.info("Adding several lights to the scene...")
sun = kb.DirectionalLight(
color=kb.get_color("white"),
shadow_softness=0.2,
intensity=3.,
position=(11.6608, -6.62799, 25.8232))
lamp_back = kb.RectAreaLight(
color=kb.get_color("white"),
intensity=50.,
position=(-1.1685, 2.64602, 5.81574))
lamp_key = kb.RectAreaLight(
color=kb.get_color(0xffedd0),
intensity=100.,
width=0.5,
height=0.5,
position=(6.44671, -2.90517, 4.2584))
lamp_fill = kb.RectAreaLight(
color=kb.get_color("#c2d0ff"),
intensity=30.,
width=0.5,
height=0.5,
position=(-4.67112, -4.0136, 3.01122))
lights = [sun, lamp_back, lamp_key, lamp_fill]
for light in lights:
light.look_at((0, 0, 0))
scene.add(light)
scene.ambient_illumination = kb.Color(0.05, 0.05, 0.05)
logging.info("Setting up the Camera...")
original_camera_position = (7.48113, -6.50764, 5.34367)
scene.camera = kb.PerspectiveCamera(
focal_length=35., sensor_width=32, position=original_camera_position)
scene.camera.look_at((0, 0, 0))
# TODO: Refactor this so that we can render both a "spherical view"
# and a "lattice view" through the scene.
# Render cameras at the same general distance from the origin, but at
# different positions.
#
# We will use spherical coordinates (r, theta, phi) to do this.
# x = r * cos(theta) * sin(phi)
# y = r * sin(theta) * sin(phi)
# z = r * cos(phi)
r = np.sqrt(sum(a * a for a in original_camera_position))
phi = np.arccos(original_camera_position[2] / r)
theta = np.arccos(original_camera_position[0] / (r * np.sin(phi)))
# At each frame, we will modify theta so that we move in a circle.
# At each given theta value, we will also render three values of phi so our
# data "matches" LLFF (i.e., we have motion in z as well as x and y).
num_frames_to_render = (scene.frame_end - scene.frame_start)
# TODO: Either parameterize this or turn it into a constant
# matching the style guide recommendation.
num_phi_values_per_theta = 4
def add_random_objects(num_objects, objects_types, rng):
"""Returns the list of created objects."""
logging.info(f"Randomly placing {num_objects} objects")
objects = list()
for _ in range(num_objects):
shape_name = rng.choice(objects_types)
_, size = _sample_sizes("const", rng)
_, color = _sample_color(flags.object_colors, rng)
segmentation_id_val = objects_types.index(shape_name) + 1
obj = kubasic.create(
asset_id=shape_name, scale=size, segmentation_id=segmentation_id_val)
material_name = "Rubber"
obj.material = kb.PrincipledBSDFMaterial(
color=color, metallic=0., ior=1.25, roughness=0.7, specular=0.33)
scene.add(obj)
obj.metadata = {
"shape": shape_name.lower(),
"size": size,
"material": material_name.lower(),
"color": color.rgb,
}
# Place object randomly within SPAWN_REGION
kb.move_until_no_overlap(
obj, simulator, spawn_region=SPAWN_REGION, rng=rng)
objects += [obj]
logging.info("Added object %s", obj)
return objects
# --- Place random objects (static in time)
add_random_objects(
flags.num_objects_static, objects_types=objects_types, rng=rng)
# --- Place dynamic objects (will change in time)
dyn_objects = add_random_objects(
flags.num_objects_dynamic, objects_types=[
"suzanne",
], rng=rng)
# --- if in jitter mode shake the monkeys instead
if flags.jitter:
for dyn_object in dyn_objects:
dyn_object.orig_position = dyn_object.position
# import pdb; pdb.set_trace();
# exit(0)
theta_change = (2 * np.pi) / (num_frames_to_render / num_phi_values_per_theta)
for frame in range(scene.frame_start, scene.frame_end + 1):
i = (frame - scene.frame_start)
theta_new = (i // num_phi_values_per_theta) * theta_change + theta
# These values of (x, y, z) will lie on the same sphere as the original
# camera.
x = r * np.cos(theta_new) * np.sin(phi)
y = r * np.sin(theta_new) * np.sin(phi)
z = r * np.cos(phi)
# To ensure have "roughly LLFF-style" data (multiple z values for the same
# x and y), we will also adjust z. The camera is no longer guaranteed to
# remain on the same sphere as the original camera.
z_shift_direction = (i % num_phi_values_per_theta) - 1
z = z + z_shift_direction * 1.2
scene.camera.position = (x, y, z)
scene.camera.look_at((0, 0, 0))
scene.camera.keyframe_insert("position", frame)
scene.camera.keyframe_insert("quaternion", frame)
# --- randomly change the position of the dynamic objects in each frame
for obj in dyn_objects:
# --- hide dynamic objects once every 30 frames (evaluation set)
if (i % 30) != 0:
if not flags.jitter:
kb.move_until_no_overlap(
obj, simulator, spawn_region=SPAWN_REGION, rng=rng)
else:
obj.position = obj.orig_position + .1*np.random.randn(3)
else:
random_dir = np.random.randn(3)
random_dir = random_dir / np.linalg.norm(random_dir)
large_offset = 10e3
position = (large_offset * random_dir).tolist()
obj.position = position
obj.keyframe_insert("position", frame)
# print(f"moved from {old_position} to {obj.position}")
# --- Rendering
logging.info(f'Saving {output_dir / "scene.blend"}')
renderer.save_state(output_dir / "scene.blend")
print(f"jitter mode? {flags.jitter}")
if flags.norender:
logging.warning("rendering not executed")
kb.done()
exit(0)
logging.info("Rendering the scene ...")
render_data = renderer.render()
# replace asset index (in scene.assets) with segmentation_id
render_data["segmentation"] = kb.adjust_segmentation_idxs(
render_data["segmentation"], scene.assets, scene.assets)
# export all rendered images / segmentations / depth / ... to output dir
kb.write_image_dict(render_data, output_dir)
# --- Metadata
logging.info("Collecting and storing metadata for each object.")
kb.write_pkl(
{
"metadata": kb.get_scene_metadata(scene, seed=seed),
"camera": kb.get_camera_info(scene.camera),
"instances": kb.get_instance_info(scene),
"background": {
"background_color": background_color,
"floor_color": floor_color.rgb,
"floor_friction": flags.floor_friction,
}
}, output_dir / "metadata.pkl")
# Save the scene camera in a JSON file.
scene_metadata = kb.get_scene_metadata(scene, seed=seed)
scene_camera = kb.get_camera_info(
scene.camera,
height=scene_metadata["resolution"][0],
width=scene_metadata["resolution"][1],
)
scene_camera["positions"] = scene_camera["positions"].tolist()
scene_camera["quaternions"] = scene_camera["quaternions"].tolist()
scene_camera["K"] = scene_camera["K"].tolist()
scene_camera["R"] = scene_camera["R"].tolist()
# Generate the train/test ids
train_ids, test_ids = _make_train_test_ids(rng, scene_metadata["num_frames"])
logging.info(
f"Split {num_frames_to_render} frames: {len(train_ids)} train frames, "
f"{len(test_ids)} test frames.")
metadata = {
"metadata": scene_metadata,
"camera": scene_camera,
"segmentation_labels": objects_types,
"scene_boundaries": _get_scene_boundaries(floor, SPAWN_REGION),
"split_ids": {
"train": train_ids,
"test": test_ids,
},
}
kb.write_json(metadata, output_dir / "metadata.json")
scene_gltf_file = str(scratch_dir / "scene.glb")
logging.info("Saving %s ", scene_gltf_file)
bpy.ops.export_scene.gltf(filepath=scene_gltf_file)
output_scene_gltf_file = str(output_dir / "scene.glb")
tf.io.gfile.copy(scene_gltf_file, output_scene_gltf_file, overwrite=True)
kb.done()
def _make_train_test_ids(
rng: np.random.RandomState,
num_frames: int,
) -> Tuple[List[int], List[int]]:
"""Randomly generate train / test ids."""
all_idx = rng.permutation(num_frames)
boundary = int(num_frames * _TRAIN_TEST_RATIO)
train_ids = all_idx[:boundary]
test_ids = all_idx[boundary:]
# np.int64 is not serializable by Json, so need to convert to int first.
return list(int(x) for x in train_ids), list(int(x) for x in test_ids)
if __name__ == "__main__":
main(flags=parse_flags())
|
{
"content_hash": "c72cf6fb45c34d499aa4941d2bf95ed5",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 82,
"avg_line_length": 33.86172344689379,
"alnum_prop": 0.6552642480913772,
"repo_name": "google-research/kubric",
"id": "bcf755c34489527f25a3725de63fffa160a1e978",
"size": "17480",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "challenges/robust_nerf/worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8497"
},
{
"name": "Jupyter Notebook",
"bytes": "6178517"
},
{
"name": "Makefile",
"bytes": "9775"
},
{
"name": "Python",
"bytes": "804300"
},
{
"name": "Shell",
"bytes": "5970"
}
],
"symlink_target": ""
}
|
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'win32file.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
|
{
"content_hash": "a9ad5ea4ccb2761dded7c39f9fbb67f4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 58,
"avg_line_length": 28.75,
"alnum_prop": 0.5826086956521739,
"repo_name": "IronLanguages/ironpython2",
"id": "f43c9727f6ad456119b205849d6c3de4e7e99466",
"size": "346",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Src/StdLib/Lib/site-packages/isapi/test/build/bdist.win32/winexe/temp/win32file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4080"
},
{
"name": "C",
"bytes": "20290"
},
{
"name": "C#",
"bytes": "12424667"
},
{
"name": "C++",
"bytes": "69156"
},
{
"name": "Classic ASP",
"bytes": "2117"
},
{
"name": "HTML",
"bytes": "13181412"
},
{
"name": "JavaScript",
"bytes": "1656"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "67035"
},
{
"name": "Python",
"bytes": "27860071"
},
{
"name": "Roff",
"bytes": "21"
},
{
"name": "Shell",
"bytes": "193"
},
{
"name": "Smalltalk",
"bytes": "3"
},
{
"name": "VBScript",
"bytes": "974"
},
{
"name": "XSLT",
"bytes": "2058"
}
],
"symlink_target": ""
}
|
"""Functional tests for shape inference helper classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class DimensionTest(test_util.TensorFlowTestCase):
def testDimension(self):
dim = tensor_shape.Dimension(12)
self.assertEqual(12, dim.value)
self.assertEqual(12, int(dim))
self.assertEqual(dim, tensor_shape.Dimension(12))
self.assertEqual(tensor_shape.Dimension(15),
dim + tensor_shape.Dimension(3))
self.assertEqual(tensor_shape.Dimension(15), dim + 3)
self.assertEqual(tensor_shape.Dimension(24),
dim * tensor_shape.Dimension(2))
self.assertEqual(tensor_shape.Dimension(24), dim * 2)
self.assertEqual(
tensor_shape.Dimension(6), dim // tensor_shape.Dimension(2))
self.assertEqual(tensor_shape.Dimension(6), dim // 2)
self.assertEqual(tensor_shape.Dimension(12),
dim.merge_with(tensor_shape.Dimension(12)))
self.assertEqual(tensor_shape.Dimension(12), dim.merge_with(12))
self.assertLess(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12))
self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertGreaterEqual(tensor_shape.Dimension(12),
tensor_shape.Dimension(12))
self.assertGreaterEqual(tensor_shape.Dimension(13),
tensor_shape.Dimension(12))
with self.assertRaises(ValueError):
dim.merge_with(tensor_shape.Dimension(13))
def testUnknownDimension(self):
dim = tensor_shape.Dimension(None)
self.assertIs(None, dim.value)
self.assertEqual(dim.value, tensor_shape.Dimension(None).value)
self.assertEqual(tensor_shape.Dimension(None).value,
(dim + tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
(dim * tensor_shape.Dimension(None)).value)
self.assertEqual(
tensor_shape.Dimension(None).value,
(dim // tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
dim.merge_with(tensor_shape.Dimension(None)).value)
self.assertIs(None,
tensor_shape.Dimension(None) < tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) <= tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) > tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) >= tensor_shape.Dimension(None))
def testKnownAndUnknownDimensions(self):
known = tensor_shape.Dimension(12)
unknown = tensor_shape.Dimension(None)
self.assertEqual(
tensor_shape.Dimension(None).value, (known + unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown + known).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (known * unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown * known).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (known // unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown // known).value)
self.assertEqual(
tensor_shape.Dimension(12), known.merge_with(unknown))
self.assertEqual(
tensor_shape.Dimension(12), unknown.merge_with(known))
self.assertIs(None,
tensor_shape.Dimension(12) < tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(12) <= tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(12) > tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(12) >= tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) < tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) <= tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) > tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) >= tensor_shape.Dimension(12))
def testAsDimension(self):
self.assertEqual(tensor_shape.Dimension(12),
tensor_shape.as_dimension(tensor_shape.Dimension(12)))
self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(12))
self.assertEqual(
tensor_shape.Dimension(None).value,
tensor_shape.as_dimension(tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
tensor_shape.as_dimension(None).value)
def testEquality(self):
self.assertTrue(tensor_shape.Dimension(12) == tensor_shape.Dimension(12))
self.assertFalse(tensor_shape.Dimension(12) == tensor_shape.Dimension(13))
self.assertIs(None,
tensor_shape.Dimension(12) == tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) == tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) == tensor_shape.Dimension(None))
self.assertTrue(tensor_shape.Dimension(12) == "12")
self.assertTrue(tensor_shape.Dimension(12) == 24.0 / 2)
# None indicates ambiguous comparison, but comparison vs the wrong type
# is unambigously False.
self.assertIsNotNone(tensor_shape.Dimension(12) == "_")
self.assertIsNotNone(tensor_shape.Dimension(None) == 12.99)
self.assertFalse(tensor_shape.Dimension(12) == "_")
self.assertFalse(tensor_shape.Dimension(None) == 12.99)
self.assertIs(None, tensor_shape.Dimension(None) == "13")
self.assertIs(None, tensor_shape.Dimension(None) == None) # pylint: disable=g-equals-none
self.assertFalse(tensor_shape.Dimension(12) == 12.99)
def testInequality(self):
self.assertTrue(tensor_shape.Dimension(12) != tensor_shape.Dimension(13))
self.assertFalse(tensor_shape.Dimension(12) != tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(12) != tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) != tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) != tensor_shape.Dimension(None))
# None indicates ambiguous comparison, but comparison vs the wrong type
# is unambigously False.
self.assertIsNotNone(tensor_shape.Dimension(12) != "_")
self.assertIsNotNone(tensor_shape.Dimension(None) != 12.99)
self.assertTrue(tensor_shape.Dimension(12) != "_")
self.assertTrue(tensor_shape.Dimension(None) != 12.99)
self.assertIs(None, tensor_shape.Dimension(None) != "13")
self.assertIs(None, tensor_shape.Dimension(None) != None) # pylint: disable=g-equals-none
self.assertTrue(tensor_shape.Dimension(12) != 12.99)
def testRepr(self):
self.assertEqual(repr(tensor_shape.Dimension(7)), "Dimension(7)")
self.assertEqual(repr(tensor_shape.Dimension(None)), "Dimension(None)")
def testStr(self):
self.assertEqual(str(tensor_shape.Dimension(7)), "7")
self.assertEqual(str(tensor_shape.Dimension(None)), "?")
class ShapeTest(test_util.TensorFlowTestCase):
def testUnknownShape(self):
s = tensor_shape.TensorShape(None)
with self.assertRaises(ValueError):
s.assert_is_fully_defined()
self.assertIs(None, s.ndims)
with self.assertRaises(ValueError):
len(s)
self.assertFalse(s)
self.assertIs(None, s.dims)
def testFullyDefinedShape(self):
s = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s.assert_is_fully_defined()
self.assertEqual(3, s.ndims)
self.assertEqual(3, len(s))
self.assertTrue(s)
s.assert_has_rank(3)
self.assertEqual([tensor_shape.Dimension(3),
tensor_shape.Dimension(4),
tensor_shape.Dimension(7)], s.dims)
self.assertEqual(tensor_shape.Dimension(3), s[0])
self.assertEqual(tensor_shape.Dimension(4), s[1])
self.assertEqual(tensor_shape.Dimension(7), s[2])
self.assertEqual([3, 4, 7], s.as_list())
s.assert_is_compatible_with([3, 4, 7])
s.assert_same_rank([6, 3, 7])
def testPartiallyDefinedShape(self):
s = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(None), tensor_shape.Dimension(7)])
with self.assertRaises(ValueError):
s.assert_is_fully_defined()
self.assertEqual(3, s.ndims)
self.assertEqual(3, len(s))
self.assertTrue(s)
s.assert_has_rank(3)
self.assertEqual(tensor_shape.Dimension(3), s[0])
self.assertEqual(tensor_shape.Dimension(None).value, s[1].value)
self.assertEqual(tensor_shape.Dimension(7), s[2])
s.assert_same_rank([6, 3, 7])
def testMergeFullShapes(self):
self.assertEqual([3, 4, 7],
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape([3, 4, 7])).as_list())
with self.assertRaises(ValueError):
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape([6, 3, 7]))
def testMergePartialShapes(self):
s1 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(None), tensor_shape.Dimension(7)])
s2 = tensor_shape.TensorShape([tensor_shape.Dimension(
None), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
self.assertEqual([3, 4, 7], s1.merge_with(s2).as_list())
def testMergeFullAndUnknownShape(self):
self.assertEqual([3, 4, 7],
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape(None)).as_list())
def testSlice(self):
known = tensor_shape.TensorShape([0, 1, 2, 3, 4])
self.assertEqual(tensor_shape.Dimension(2), known[2])
tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(known[1:4])
unknown = tensor_shape.TensorShape(None)
self.assertEqual(tensor_shape.Dimension(None).value, unknown[2].value)
tensor_shape.TensorShape(
[None, None, None]).assert_is_compatible_with(unknown[1:4])
def testConcatenate(self):
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
tensor_shape.TensorShape([1, 2]).concatenate(
tensor_shape.TensorShape([3, 4])))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
tensor_shape.TensorShape([1, 2]).concatenate(
tensor_shape.TensorShape(None)))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
tensor_shape.TensorShape(None).concatenate(
tensor_shape.TensorShape([3, 4])))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
tensor_shape.TensorShape(None).concatenate(
tensor_shape.TensorShape(None)))
tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(
tensor_shape.TensorShape([1, 2]).concatenate(
tensor_shape.Dimension(3)))
def testHelpers(self):
tensor_shape.TensorShape([]).assert_is_compatible_with(
tensor_shape.scalar())
tensor_shape.TensorShape([37]).assert_is_compatible_with(
tensor_shape.vector(37))
tensor_shape.TensorShape(
[94, 43]).assert_is_compatible_with(tensor_shape.matrix(94, 43))
def testTruedivFails(self):
unknown = tensor_shape.Dimension(None)
self.assertEqual((unknown // unknown).value, None)
with self.assertRaisesRegexp(TypeError, r"unsupported operand type"):
unknown / unknown # pylint: disable=pointless-statement
def testConvertFromProto(self):
def make_tensor_shape_proto(shape):
return tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=x) for x in shape])
proto = make_tensor_shape_proto([])
self.assertEqual(tensor_shape.TensorShape([]),
tensor_shape.TensorShape(proto))
self.assertEqual(tensor_shape.TensorShape([]),
tensor_shape.as_shape(proto))
proto = make_tensor_shape_proto([1, 37, 42])
self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
tensor_shape.TensorShape(proto))
self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
tensor_shape.as_shape(proto))
partial_proto_shape = tensor_shape.as_shape(
make_tensor_shape_proto([-1, 37, 42]))
partial_shape = tensor_shape.TensorShape([None, 37, 42])
self.assertNotEqual(partial_proto_shape, partial_shape)
self.assertEqual(partial_proto_shape[0].value, None)
self.assertEqual(partial_proto_shape[1].value, 37)
self.assertEqual(partial_proto_shape[2].value, 42)
self.assertTrue(partial_shape.is_compatible_with(partial_proto_shape))
def testStr(self):
self.assertEqual("<unknown>", str(tensor_shape.unknown_shape()))
self.assertEqual("(?,)", str(tensor_shape.unknown_shape(ndims=1)))
self.assertEqual("(?, ?)", str(tensor_shape.unknown_shape(ndims=2)))
self.assertEqual("(?, ?, ?)", str(tensor_shape.unknown_shape(ndims=3)))
self.assertEqual("()", str(tensor_shape.scalar()))
self.assertEqual("(7,)", str(tensor_shape.vector(7)))
self.assertEqual("(3, 8)", str(tensor_shape.matrix(3, 8)))
self.assertEqual("(4, 5, 2)", str(tensor_shape.TensorShape([4, 5, 2])))
self.assertEqual("(32, ?, 1, 9)",
str(tensor_shape.TensorShape([32, None, 1, 9])))
def testAsProto(self):
self.assertTrue(tensor_shape.unknown_shape().as_proto().unknown_rank)
self.assertFalse(
tensor_shape.unknown_shape(ndims=3).as_proto().unknown_rank)
self.assertFalse(
tensor_shape.TensorShape([1, 2, 3]).as_proto().unknown_rank)
self.assertFalse(
tensor_shape.TensorShape([1, None, 3]).as_proto().unknown_rank)
def testEquality(self):
s1 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s2 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s3 = tensor_shape.TensorShape([tensor_shape.Dimension(3),
tensor_shape.Dimension(4), None])
self.assertTrue(s1 == s2)
self.assertFalse(s1 != s2)
self.assertFalse(s1 == "a string")
self.assertTrue(s1 != "a string")
self.assertNotEqual(s1, "347", "Should not equal an ambiguous string.")
self.assertEqual(s1, ["3", "4", "7"])
# Test with an unknown shape in s3
self.assertTrue(s1 != s3)
self.assertFalse(s3 == "a string")
self.assertTrue(s3 != "a string")
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "941532749ed9a6c847c89bb03486130a",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 94,
"avg_line_length": 45.08849557522124,
"alnum_prop": 0.6652927706902192,
"repo_name": "ninotoshi/tensorflow",
"id": "9aa55e47e3e87c50e7c70ac5d42972dc04056d5f",
"size": "15963",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/tensor_shape_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151630"
},
{
"name": "C++",
"bytes": "6579490"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "657597"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16175"
},
{
"name": "Jupyter Notebook",
"bytes": "777942"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "66333"
},
{
"name": "Python",
"bytes": "3809695"
},
{
"name": "Shell",
"bytes": "66697"
},
{
"name": "TypeScript",
"bytes": "329009"
}
],
"symlink_target": ""
}
|
from condensation import flavor as flavors
class Vm(object):
""" This class is representation of OpenStack System"""
def __init__(self, node, vm_id, flavor):
self.vm_id = vm_id
self.node = None
self.flavor = None
if flavor is None:
flavor = flavors.default
self.link_node(node)
self.link_flavor(flavor)
def link_node(self, node):
"""
This method links Vm with given Node
in case we allready have node - we need to notify about it
"""
if self.node:
self.node.unlink_vm(self)
self.node = node
self.node.link_vm(self)
def link_flavor(self, flavor):
"""
This method links Vm with given Flavor
"""
self.flavor = flavor
self.flavor.link_vm(self)
|
{
"content_hash": "3d7f65f20ed0dd603ecf5197b65586e9",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 70,
"avg_line_length": 26.3125,
"alnum_prop": 0.5581947743467933,
"repo_name": "archyufa/CloudFerry",
"id": "7acb265a0c8a9fbc7f5e1b7c88f7dcfe2855e7e3",
"size": "1417",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "condensation/vm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "971902"
},
{
"name": "Ruby",
"bytes": "2695"
},
{
"name": "Shell",
"bytes": "25415"
}
],
"symlink_target": ""
}
|
import chardet
from itertools import cycle
class Morph():
def __init__(self, surface, base, pos, pos1):
self.surface = surface
self.base = base
self.pos = pos
self.pos1 = pos1
def print_all(self):
return self.surface + "\t" + self.base + ", " + self.pos + ", " + self.pos1
def read_morpheme(cabochafile):
sentences = []
sentence = []
for line in cabochafile:
if line == "EOS\n":
sentences.append(sentence)
sentence = []
elif line[0] == "*":
continue
else:
surface, other = line.split()
others = other.split(",")
base, pos, pos1 = others[6], others[0], others[1]
morph = Morph(surface, base, pos, pos1)
sentence.append(morph)
return sentences
if __name__ == "__main__":
f = open("neko.txt.cabocha", "r")
sentences = read_morpheme(f)
for morph in sentences[2]:
print morph.print_all()
f.close()
# cas
|
{
"content_hash": "00ddd9d588e9d2d78af8eb2d16df21ab",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 83,
"avg_line_length": 25.94871794871795,
"alnum_prop": 0.532608695652174,
"repo_name": "yasutaka/nlp_100",
"id": "ce9d6198831f8d9593e77e4c3439de548f191794",
"size": "1013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alex/40_cabocha_problem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2330"
},
{
"name": "CoffeeScript",
"bytes": "422"
},
{
"name": "Go",
"bytes": "653"
},
{
"name": "HTML",
"bytes": "2286"
},
{
"name": "JavaScript",
"bytes": "1156"
},
{
"name": "Python",
"bytes": "96015"
},
{
"name": "R",
"bytes": "797"
},
{
"name": "Ruby",
"bytes": "31591"
},
{
"name": "Scala",
"bytes": "297"
},
{
"name": "Shell",
"bytes": "1091"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_sqlfirewallrule_facts
version_added: "2.8"
short_description: Get Azure SQL Firewall Rule facts.
description:
- Get facts of SQL Firewall Rule.
options:
resource_group:
description:
- The name of the resource group that contains the server.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the firewall rule.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get instance of SQL Firewall Rule
azure_rm_sqlfirewallrule_facts:
resource_group: myResourceGroup
server_name: testserver
name: testrule
- name: List instances of SQL Firewall Rule
azure_rm_sqlfirewallrule_facts:
resource_group: myResourceGroup
server_name: testserver
'''
RETURN = '''
rules:
description: A list of dict results containing the facts for matching SQL firewall rules.
returned: always
type: complex
contains:
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/xxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/testser
ver/firewallRules/testrule"
resource_group:
description:
- Resource group name.
returned: always
type: str
sample: testgroup
server_name:
description:
- SQL server name.
returned: always
type: str
sample: testserver
name:
description:
- Firewall rule name.
returned: always
type: str
sample: testrule
start_ip_address:
description:
- The start IP address of the firewall rule.
returned: always
type: str
sample: 10.0.0.1
end_ip_address:
description:
- The start IP address of the firewall rule.
returned: always
type: str
sample: 10.0.0.5
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.sql import SqlManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMSqlFirewallRuleFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMSqlFirewallRuleFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.name is not None):
self.results['rules'] = self.get()
else:
self.results['rules'] = self.list_by_server()
return self.results
def get(self):
'''
Gets facts of the specified SQL Firewall Rule.
:return: deserialized SQL Firewall Ruleinstance state dictionary
'''
response = None
results = []
try:
response = self.sql_client.firewall_rules.get(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for FirewallRules.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
'''
Gets facts of the specified SQL Firewall Rule.
:return: deserialized SQL Firewall Ruleinstance state dictionary
'''
response = None
results = []
try:
response = self.sql_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for FirewallRules.')
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'id': d['id'],
'resource_group': self.resource_group,
'server_name': self.server_name,
'name': d['name'],
'start_ip_address': d['start_ip_address'],
'end_ip_address': d['end_ip_address']
}
return d
def main():
AzureRMSqlFirewallRuleFacts()
if __name__ == '__main__':
main()
|
{
"content_hash": "b8129aea751ffd1cf05f2303f07be251",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 144,
"avg_line_length": 28.86764705882353,
"alnum_prop": 0.5545933095601969,
"repo_name": "SergeyCherepanov/ansible",
"id": "be79e642eab07014823bcde8d558f33dca3725c6",
"size": "6067",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from .tracer import Tracer # noqa
|
{
"content_hash": "1e420bb8ab7c45b1fe0200e17704775a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 38,
"avg_line_length": 25,
"alnum_prop": 0.7466666666666667,
"repo_name": "nornagon/xray-python-opentracing",
"id": "0ec145b06f3645322fbaea01f63bb218a69b6523",
"size": "75",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "xray_ot/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16213"
}
],
"symlink_target": ""
}
|
from swift.common import utils as swift_utils
from swift.common.middleware import acl as swift_acl
from swift.common.swob import HTTPNotFound, HTTPForbidden, HTTPUnauthorized
class KeystoneAuth(object):
"""Swift middleware to Keystone authorization system.
In Swift's proxy-server.conf add this middleware to your pipeline::
[pipeline:main]
pipeline = catch_errors cache authtoken keystoneauth proxy-server
Make sure you have the authtoken middleware before the
keystoneauth middleware.
The authtoken middleware will take care of validating the user and
keystoneauth will authorize access.
The authtoken middleware is shipped directly with keystone it
does not have any other dependences than itself so you can either
install it by copying the file directly in your python path or by
installing keystone.
If support is required for unvalidated users (as with anonymous
access) or for tempurl/formpost middleware, authtoken will need
to be configured with ``delay_auth_decision`` set to 1. See the
Keystone documentation for more detail on how to configure the
authtoken middleware.
In proxy-server.conf you will need to have the setting account
auto creation to true::
[app:proxy-server]
account_autocreate = true
And add a swift authorization filter section, such as::
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = admin, swiftoperator
This maps tenants to account in Swift.
The user whose able to give ACL / create Containers permissions
will be the one that are inside the ``operator_roles``
setting which by default includes the admin and the swiftoperator
roles.
If you need to have a different reseller_prefix to be able to
mix different auth servers you can configure the option
``reseller_prefix`` in your keystoneauth entry like this::
reseller_prefix = NEWAUTH_
Make sure you have a underscore at the end of your new
``reseller_prefix`` option.
:param app: The next WSGI app in the pipeline
:param conf: The dict of configuration values
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = swift_utils.get_logger(conf, log_route='keystoneauth')
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_').strip()
self.operator_roles = conf.get('operator_roles',
'admin, swiftoperator')
self.reseller_admin_role = conf.get('reseller_admin_role',
'ResellerAdmin')
config_is_admin = conf.get('is_admin', "false").lower()
self.is_admin = swift_utils.config_true_value(config_is_admin)
config_overrides = conf.get('allow_overrides', 't').lower()
self.allow_overrides = swift_utils.config_true_value(config_overrides)
def __call__(self, environ, start_response):
identity = self._keystone_identity(environ)
# Check if one of the middleware like tempurl or formpost have
# set the swift.authorize_override environ and want to control the
# authentication
if (self.allow_overrides and
environ.get('swift.authorize_override', False)):
msg = 'Authorizing from an overriding middleware (i.e: tempurl)'
self.logger.debug(msg)
return self.app(environ, start_response)
if identity:
self.logger.debug('Using identity: %r' % (identity))
environ['keystone.identity'] = identity
environ['REMOTE_USER'] = identity.get('tenant')
environ['swift.authorize'] = self.authorize
if self.reseller_admin_role in identity.get('roles', []):
environ['reseller_request'] = True
else:
self.logger.debug('Authorizing as anonymous')
environ['swift.authorize'] = self.authorize_anonymous
environ['swift.clean_acl'] = swift_acl.clean_acl
return self.app(environ, start_response)
def _keystone_identity(self, environ):
"""Extract the identity from the Keystone auth component."""
if environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed':
return
roles = []
if 'HTTP_X_ROLES' in environ:
roles = environ['HTTP_X_ROLES'].split(',')
identity = {'user': environ.get('HTTP_X_USER_NAME'),
'tenant': (environ.get('HTTP_X_TENANT_ID'),
environ.get('HTTP_X_TENANT_NAME')),
'roles': roles}
return identity
def _get_account_for_tenant(self, tenant_id):
return '%s%s' % (self.reseller_prefix, tenant_id)
def _reseller_check(self, account, tenant_id):
"""Check reseller prefix."""
return account == self._get_account_for_tenant(tenant_id)
def _authorize_cross_tenant(self, user, tenant_id, tenant_name, roles):
""" Check cross-tenant ACLs
Match tenant_id:user, tenant_name:user, and *:user.
:param user: The user name from the identity token.
:param tenant_id: The tenant ID from the identity token.
:param tenant_name: The tenant name from the identity token.
:param roles: The given container ACL.
:returns: True if tenant_id:user, tenant_name:user, or *:user matches
the given ACL. False otherwise.
"""
wildcard_tenant_match = '*:%s' % (user)
tenant_id_user_match = '%s:%s' % (tenant_id, user)
tenant_name_user_match = '%s:%s' % (tenant_name, user)
return (wildcard_tenant_match in roles
or tenant_id_user_match in roles
or tenant_name_user_match in roles)
def authorize(self, req):
env = req.environ
env_identity = env.get('keystone.identity', {})
tenant_id, tenant_name = env_identity.get('tenant')
user = env_identity.get('user', '')
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
#allow OPTIONS requests to proceed as normal
if req.method == 'OPTIONS':
return
try:
part = req.split_path(1, 4, True)
version, account, container, obj = part
except ValueError:
return HTTPNotFound(request=req)
user_roles = env_identity.get('roles', [])
# Give unconditional access to a user with the reseller_admin
# role.
if self.reseller_admin_role in user_roles:
msg = 'User %s has reseller admin authorizing'
self.logger.debug(msg % tenant_id)
req.environ['swift_owner'] = True
return
# cross-tenant authorization
if self._authorize_cross_tenant(user, tenant_id, tenant_name, roles):
log_msg = 'user %s:%s, %s:%s, or *:%s allowed in ACL authorizing'
self.logger.debug(log_msg % (tenant_name, user,
tenant_id, user, user))
return
acl_authorized = self._authorize_unconfirmed_identity(req, obj,
referrers,
roles)
if acl_authorized:
return
# Check if a user tries to access an account that does not match their
# token
if not self._reseller_check(account, tenant_id):
log_msg = 'tenant mismatch: %s != %s' % (account, tenant_id)
self.logger.debug(log_msg)
return self.denied_response(req)
# Check the roles the user is belonging to. If the user is
# part of the role defined in the config variable
# operator_roles (like admin) then it will be
# promoted as an admin of the account/tenant.
for role in self.operator_roles.split(','):
role = role.strip()
if role in user_roles:
log_msg = 'allow user with role %s as account admin' % (role)
self.logger.debug(log_msg)
req.environ['swift_owner'] = True
return
# If user is of the same name of the tenant then make owner of it.
if self.is_admin and user == tenant_name:
self.logger.warning("the is_admin feature has been deprecated "
"and will be removed in the future "
"update your config file")
req.environ['swift_owner'] = True
return
if acl_authorized is not None:
return self.denied_response(req)
# Check if we have the role in the userroles and allow it
for user_role in user_roles:
if user_role in roles:
log_msg = 'user %s:%s allowed in ACL: %s authorizing'
self.logger.debug(log_msg % (tenant_name, user, user_role))
return
return self.denied_response(req)
def authorize_anonymous(self, req):
"""
Authorize an anonymous request.
:returns: None if authorization is granted, an error page otherwise.
"""
try:
part = req.split_path(1, 4, True)
version, account, container, obj = part
except ValueError:
return HTTPNotFound(request=req)
#allow OPTIONS requests to proceed as normal
if req.method == 'OPTIONS':
return
is_authoritative_authz = (account and
account.startswith(self.reseller_prefix))
if not is_authoritative_authz:
return self.denied_response(req)
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
authorized = self._authorize_unconfirmed_identity(req, obj, referrers,
roles)
if not authorized:
return self.denied_response(req)
def _authorize_unconfirmed_identity(self, req, obj, referrers, roles):
""""
Perform authorization for access that does not require a
confirmed identity.
:returns: A boolean if authorization is granted or denied. None if
a determination could not be made.
"""
# Allow container sync.
if (req.environ.get('swift_sync_key')
and (req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', None))
and 'x-timestamp' in req.headers):
log_msg = 'allowing proxy %s for container-sync' % req.remote_addr
self.logger.debug(log_msg)
return True
# Check if referrer is allowed.
if swift_acl.referrer_allowed(req.referer, referrers):
if obj or '.rlistings' in roles:
log_msg = 'authorizing %s via referer ACL' % req.referrer
self.logger.debug(log_msg)
return True
return False
def denied_response(self, req):
"""Deny WSGI Response.
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if req.remote_user:
return HTTPForbidden(request=req)
else:
return HTTPUnauthorized(request=req)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return KeystoneAuth(app, conf)
return auth_filter
|
{
"content_hash": "5bd86e8c38ad9df3009dae588090de51",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 79,
"avg_line_length": 39.552542372881355,
"alnum_prop": 0.598388755570792,
"repo_name": "Triv90/SwiftUml",
"id": "8c0b8bfb3161638ede704fc16039905bad61563d",
"size": "12292",
"binary": false,
"copies": "2",
"ref": "refs/heads/AddUml",
"path": "swift/common/middleware/keystoneauth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2678359"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
}
|
from pythonds.basic import Deque
def palchecker(aString):
chardeque = Deque()
for ch in aString:
chardeque.addRear(ch)
stillEqual = True
while chardeque.size() > 1 and stillEqual:
first = chardeque.removeFront()
last = chardeque.removeRear()
if first != last:
stillEqual = False
return stillEqual
|
{
"content_hash": "46d88d8c2f567ad68776c48d6853d44a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 46,
"avg_line_length": 22.8125,
"alnum_prop": 0.6301369863013698,
"repo_name": "robin1885/algorithms-exercises-using-python",
"id": "ea8dbf8d2fc0d74219a47417052741a37c52ba70",
"size": "365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source-code-from-author-book/Listings-for-Second-Edition/listing_3_15.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182896"
}
],
"symlink_target": ""
}
|
"""This code example creates new orders. To determine which orders exist, run
get_all_orders.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
order_service = client.GetService('OrderService', version='v201211')
# Set advertiser (company), salesperson, and trafficker to assign to each order.
advertiser_id = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
salesperson_id = 'INSERT_SALESPERSON_ID_HERE'
trafficker_id = 'INSERT_TRAFFICKER_ID_HERE'
# Create order objects.
orders = []
for i in xrange(5):
order = {
'name': 'Order #%s' % Utils.GetUniqueName(),
'advertiserId': advertiser_id,
'salespersonId': salesperson_id,
'traffickerId': trafficker_id
}
orders.append(order)
# Add orders.
orders = order_service.CreateOrders(orders)
# Display results.
for order in orders:
print ('Order with id \'%s\' and name \'%s\' was created.'
% (order['id'], order['name']))
|
{
"content_hash": "18055d289e039e552b12aeb8dde020ad",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 80,
"avg_line_length": 30.22222222222222,
"alnum_prop": 0.6933823529411764,
"repo_name": "caioserra/apiAdwords",
"id": "f4bed2b8ffb6f619715d4609c051fcaf9c150b81",
"size": "1978",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201211/create_orders.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "47375"
},
{
"name": "Python",
"bytes": "3481410"
},
{
"name": "Shell",
"bytes": "14782"
}
],
"symlink_target": ""
}
|
class Line(object):
def __init__(self, start, end, width, layer):
self.start = start
self.end = end
self.width = width
self.layer = layer
class Rectangle(object):
def __init__(self, topleft, bottomright, layer):
self.topleft = topleft
self.bottomright = bottomright
self.layer = layer
class Polygon(object):
def __init__(self, vertices, width, layer):
self.vertices = vertices
self.width = width
self.layer = layer
class Circle(object):
def __init__(self, position, radius, width, layer):
self.position = position
self.radius = radius
self.width = width
self.layer = layer
class Text(object):
def __init__(self, s, position, size, ratio, layer):
self.s = s
self.position = position
self.size = size
self.ratio = ratio
self.layer = layer
|
{
"content_hash": "a37b6d92679f34f18d0258b5234d1bb9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 56,
"avg_line_length": 24.783783783783782,
"alnum_prop": 0.5823336968375137,
"repo_name": "storborg/pypcb",
"id": "f6a761f8abf13e2df73d2825bf7034c91bdc350d",
"size": "917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypcb/drawing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2196"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError:
"""
I am still trying to make sense of this ez_setup stuff
commenting it out for now
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
"""
setup(
name='nom',
version='0.1',
url="https://github.com/JamesMcMahon/nom",
license="MIT License",
author="James McMahon",
description="Simple app to store configuration files",
packages=['nom'],
requires=('GitPython (>=0.3.1)',),
entry_points={
'console_scripts': [
'nom = nom.main:main',
]
},
)
|
{
"content_hash": "f3b2aa9e076c0ee36e19fc5abfd6941f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 55,
"avg_line_length": 20.40740740740741,
"alnum_prop": 0.6896551724137931,
"repo_name": "JamesMcMahon/nom",
"id": "484b29982149e7ebeee6dc01386696bfe7bd0bf9",
"size": "573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8155"
},
{
"name": "Shell",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import MagicMock
from ansible.template.vars import AnsibleJ2Vars
class TestVars(unittest.TestCase):
def setUp(self):
self.mock_templar = MagicMock(name='mock_templar')
def test(self):
ajvars = AnsibleJ2Vars(None, None)
print(ajvars)
def test_globals_empty_2_8(self):
ajvars = AnsibleJ2Vars(self.mock_templar, {})
res28 = self._dict_jinja28(ajvars)
self.assertIsInstance(res28, dict)
def test_globals_empty_2_9(self):
ajvars = AnsibleJ2Vars(self.mock_templar, {})
res29 = self._dict_jinja29(ajvars)
self.assertIsInstance(res29, dict)
def _assert_globals(self, res):
self.assertIsInstance(res, dict)
self.assertIn('foo', res)
self.assertEqual(res['foo'], 'bar')
def test_globals_2_8(self):
ajvars = AnsibleJ2Vars(self.mock_templar, {'foo': 'bar', 'blip': [1, 2, 3]})
res28 = self._dict_jinja28(ajvars)
self._assert_globals(res28)
def test_globals_2_9(self):
ajvars = AnsibleJ2Vars(self.mock_templar, {'foo': 'bar', 'blip': [1, 2, 3]})
res29 = self._dict_jinja29(ajvars)
self._assert_globals(res29)
def _dicts(self, ajvars):
print(ajvars)
res28 = self._dict_jinja28(ajvars)
res29 = self._dict_jinja29(ajvars)
# res28_other = self._dict_jinja28(ajvars, {'other_key': 'other_value'})
# other = {'other_key': 'other_value'}
# res29_other = self._dict_jinja29(ajvars, *other)
print('res28: %s' % res28)
print('res29: %s' % res29)
# print('res28_other: %s' % res28_other)
# print('res29_other: %s' % res29_other)
# return (res28, res29, res28_other, res29_other)
# assert ajvars == res28
# assert ajvars == res29
return (res28, res29)
def _dict_jinja28(self, *args, **kwargs):
return dict(*args, **kwargs)
def _dict_jinja29(self, the_vars):
return dict(the_vars)
|
{
"content_hash": "aa9887b9cb0c8a0a3df24035f82e5744",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 84,
"avg_line_length": 33.73015873015873,
"alnum_prop": 0.6103529411764705,
"repo_name": "thaim/ansible",
"id": "74e6783925b8aecf55a559cb78b687e81e2b423a",
"size": "2870",
"binary": false,
"copies": "94",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/template/test_vars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
import os.path as op
from mne.datasets import testing
from mne.preprocessing._fine_cal import (read_fine_calibration,
write_fine_calibration)
from mne.utils import _TempDir, object_hash, run_tests_if_main
# Define fine calibration filepaths
data_path = testing.data_path(download=False)
fine_cal_fname = op.join(data_path, 'SSS', 'sss_cal_3053.dat')
fine_cal_fname_3d = op.join(data_path, 'SSS', 'sss_cal_3053_3d.dat')
@testing.requires_testing_data
def test_read_write_fine_cal():
"""Test round trip reading/writing of fine calibration .dat file."""
temp_dir = _TempDir()
temp_fname = op.join(temp_dir, 'fine_cal_temp.dat')
for fname in [fine_cal_fname, fine_cal_fname_3d]:
# Load fine calibration file
fine_cal_dict = read_fine_calibration(fname)
# Save temp version of fine calibration file
write_fine_calibration(temp_fname, fine_cal_dict)
fine_cal_dict_reload = read_fine_calibration(temp_fname)
# Load temp version of fine calibration file and compare hashes
assert (object_hash(fine_cal_dict) ==
object_hash(fine_cal_dict_reload))
run_tests_if_main()
|
{
"content_hash": "495f6c6a5676fa5d8e0c4df9e30d49dc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 36.24242424242424,
"alnum_prop": 0.6722408026755853,
"repo_name": "adykstra/mne-python",
"id": "2f3bbaf67edcad594cc91bb90c411835ef88b053",
"size": "1267",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mne/preprocessing/tests/test_fine_cal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6001033"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
import pandas as pd
from datetime import timedelta
from nilmtk.tests.testingtools import data_dir
from os.path import join
import itertools
from collections import OrderedDict
import numpy as np
from nilmtk.consts import JOULES_PER_KWH
from nilmtk.measurement import measurement_columns, AC_TYPES
from nilmtk.utils import flatten_2d_list
MAX_SAMPLE_PERIOD = 15
def power_data(simple=True):
"""
Returns
-------
DataFrame
"""
if simple:
STEP = 10
data = [0, 0, 0, 100, 100, 100, 150,
150, 200, 0, 0, 100, 5000, 0]
secs = np.arange(start=0, stop=len(data) * STEP, step=STEP)
else:
data = [0, 0, 0, 100, 100, 100, 150,
150, 200, 0, 0, 100, 5000, 0]
secs = [0, 10, 20, 30, 200, 210, 220,
230, 240, 249, 260, 270, 290, 1000]
data = np.array(data, dtype=np.float32)
active = data
reactive = data * 0.9
apparent = data * 1.1
index = [pd.Timestamp('2010-01-01') + timedelta(seconds=sec)
for sec in secs]
column_tuples = [('power', ac_type)
for ac_type in ['active', 'reactive', 'apparent']]
df = pd.DataFrame(np.array([active, reactive, apparent]).transpose(),
index=index, dtype=np.float32,
columns=measurement_columns(column_tuples))
# calculate energy
# this is not cumulative energy
timedelta_secs = np.diff(secs).clip(
0, MAX_SAMPLE_PERIOD).astype(np.float32)
for ac_type in AC_TYPES:
joules = timedelta_secs * df['power', ac_type].values[:-1]
joules = np.concatenate([joules, [0]])
kwh = joules / JOULES_PER_KWH
if ac_type == 'reactive':
df['energy', ac_type] = kwh
elif ac_type == 'apparent':
df['cumulative energy', ac_type] = kwh.cumsum()
return df
def create_random_df_hierarchical_column_index():
N_PERIODS = 1E4
N_METERS = 5
N_MEASUREMENTS_PER_METER = 3
meters = ['meter{:d}'.format(i) for i in range(1, N_METERS + 1)]
meters = [[m] * N_MEASUREMENTS_PER_METER for m in meters]
meters = flatten_2d_list(meters)
level2 = ['power', 'power', 'voltage'][
:N_MEASUREMENTS_PER_METER] * N_METERS
level3 = ['active', 'reactive', ''][:N_MEASUREMENTS_PER_METER] * N_METERS
columns = [meters, level2, level3]
columns = pd.MultiIndex.from_arrays(columns)
rng = pd.date_range('2012-01-01', freq='S', periods=N_PERIODS)
data = np.random.randint(low=0, high=1000,
size=(N_PERIODS,
N_METERS * N_MEASUREMENTS_PER_METER))
return pd.DataFrame(data=data, index=rng, columns=columns, dtype=np.float32)
MEASUREMENTS = [('power', 'active'), ('energy', 'reactive'), ('voltage', '')]
def create_random_df():
N_PERIODS = 1E4
rng = pd.date_range('2012-01-01', freq='S', periods=N_PERIODS)
data = np.random.randint(
low=0, high=1000, size=(N_PERIODS, len(MEASUREMENTS)))
return pd.DataFrame(data=data, index=rng, dtype=np.float32,
columns=measurement_columns(MEASUREMENTS))
TEST_METER = {'manufacturer': 'Test Manufacturer',
'model': 'Random Meter',
'sample_period': 10,
'max_sample_period': MAX_SAMPLE_PERIOD,
'measurements': []}
for col in MEASUREMENTS:
TEST_METER['measurements'].append({
'physical_quantity': col[0], 'type': col[1],
'lower_limit': 0, 'upper_limit': 6000})
def add_building_metadata(store, elec_meters, key='building1', appliances=[]):
node = store.get_node(key)
md = {
'instance': 1,
'elec_meters': elec_meters,
'appliances': appliances
}
node._f_setattr('metadata', md)
def create_co_test_hdf5():
FILENAME = join(data_dir(), 'co_test.h5')
N_METERS = 3
chunk = 1000
N_PERIODS = 4 * chunk
rng = pd.date_range('2012-01-01', freq='S', periods=N_PERIODS)
dfs = OrderedDict()
data = OrderedDict()
# mains meter data
data[1] = np.array([0, 200, 1000, 1200] * chunk)
# appliance 1 data
data[2] = np.array([0, 200, 0, 200] * chunk)
# appliance 2 data
data[3] = np.array([0, 0, 1000, 1000] * chunk)
for i in range(1, 4):
dfs[i] = pd.DataFrame(data=data[i], index=rng, dtype=np.float32,
columns=measurement_columns([('power', 'active')]))
store = pd.HDFStore(FILENAME, 'w', complevel=9, complib='zlib')
elec_meter_metadata = {}
for meter in range(1, N_METERS + 1):
key = 'building1/elec/meter{:d}'.format(meter)
print("Saving", key)
store.put(key, dfs[meter], format='table')
elec_meter_metadata[meter] = {
'device_model': TEST_METER['model'],
'submeter_of': 1,
'data_location': key
}
# For mains meter, we need to specify that it is a site meter
del elec_meter_metadata[1]['submeter_of']
elec_meter_metadata[1]['site_meter'] = True
# Save dataset-wide metadata
store.root._v_attrs.metadata = {
'meter_devices': {TEST_METER['model']: TEST_METER}}
print(store.root._v_attrs.metadata)
# Building metadata
add_building_metadata(store, elec_meter_metadata)
for key in store.keys():
print(store[key])
store.flush()
store.close()
def create_random_hdf5():
FILENAME = join(data_dir(), 'random.h5')
N_METERS = 5
store = pd.HDFStore(FILENAME, 'w', complevel=9, complib='zlib')
elec_meter_metadata = {}
for meter in range(1, N_METERS + 1):
key = 'building1/elec/meter{:d}'.format(meter)
print("Saving", key)
store.put(key, create_random_df(), format='table')
elec_meter_metadata[meter] = {
'device_model': TEST_METER['model'],
'submeter_of': 1,
'data_location': key
}
# Save dataset-wide metadata
store.root._v_attrs.metadata = {
'meter_devices': {TEST_METER['model']: TEST_METER}}
print(store.root._v_attrs.metadata)
# Building metadata
add_building_metadata(store, elec_meter_metadata)
store.flush()
store.close()
def create_energy_hdf5(simple=True):
fname = 'energy.h5' if simple else 'energy_complex.h5'
FILENAME = join(data_dir(), fname)
df = power_data(simple=simple)
meter_device = {
'manufacturer': 'Test Manufacturer',
'model': 'Energy Meter',
'sample_period': 10,
'max_sample_period': MAX_SAMPLE_PERIOD,
'measurements': []
}
for physical_quantity, ac_type in df.columns.tolist():
meter_device['measurements'].append({
'physical_quantity': physical_quantity, 'type': ac_type,
'lower_limit': 0, 'upper_limit': 6000})
store = pd.HDFStore(FILENAME, 'w', complevel=9, complib='zlib')
elec_meter_metadata = {}
# Save sensor data
for meter_i in [1, 2, 3]:
key = 'building1/elec/meter{:d}'.format(meter_i)
print("Saving", key)
store.put(key, df, format='table')
meta = {
'device_model': meter_device['model'],
'data_location': key
}
additional_meta = {
1: {'site_meter': True},
2: {'submeter_of': 1},
3: {'submeter_of': 2}
}
meta.update(additional_meta[meter_i])
elec_meter_metadata[meter_i] = meta
# Save dataset-wide metadata
store.root._v_attrs.metadata = {
'meter_devices': {meter_device['model']: meter_device}}
# Add building metadata
add_building_metadata(store, elec_meter_metadata)
store.flush()
store.close()
def create_all():
create_energy_hdf5()
create_energy_hdf5(simple=False)
create_random_hdf5()
create_co_test_hdf5()
|
{
"content_hash": "9147e3097794706f790a53f2b1419fe5",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 81,
"avg_line_length": 31.098425196850393,
"alnum_prop": 0.5834915812128117,
"repo_name": "josemao/nilmtk",
"id": "7c2de876cff68d8c21303a6288984ff69fd93d44",
"size": "7899",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nilmtk/tests/generate_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2735"
},
{
"name": "Jupyter Notebook",
"bytes": "16159926"
},
{
"name": "Python",
"bytes": "470818"
}
],
"symlink_target": ""
}
|
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit.plugins.common import base
from cloudbaseinit.plugins.common import trim
from cloudbaseinit.tests import testutils
class TrimPluginPluginTests(unittest.TestCase):
def setUp(self):
self._trim_plugin = trim.TrimConfigPlugin()
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
def _test_trim(self, mock_get_os_utils, status):
shared_data = 'fake_shared_data'
mock_os_utils = mock.Mock()
mock_get_os_utils.return_value = mock_os_utils
with testutils.ConfPatcher('trim_enabled', status):
response = self._trim_plugin.execute(mock.Mock(), shared_data)
mock_os_utils.enable_trim.assert_called_once_with(status)
self.assertEqual(response, (base.PLUGIN_EXECUTION_DONE, False))
def test_trim_enable(self):
self._test_trim(status=True)
def test_trim_disable(self):
self._test_trim(status=False)
def test_get_os_requirements(self):
response = self._trim_plugin.get_os_requirements()
self.assertEqual(response, ('win32', (6, 1)))
|
{
"content_hash": "2cc59d5cd194287b6f34bee8d00d16e6",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 30,
"alnum_prop": 0.6871794871794872,
"repo_name": "ader1990/cloudbase-init",
"id": "488ea9f10f29523aab768e01484218a190fa9cee",
"size": "1786",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cloudbaseinit/tests/plugins/common/test_trim.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1245243"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import datetime
import json
import jsonpath_rw
from datetime import datetime
def validate_input(helper, definition):
"""Implement your own validation logic to validate the input stanza configurations"""
# This example accesses the modular input variable
# server = definition.parameters.get('server', None)
# port = definition.parameters.get('port', None)
pass
def validate_input(helper, definition):
"""Implement your own validation logic to validate the input stanza configurations"""
# This example accesses the modular input variable
# server = definition.parameters.get('server', None)
# port = definition.parameters.get('port', None)
pass
def collect_events(helper, ew):
import datetime
import json
import jsonpath_rw
method = 'GET'
api_request = 'application/json'
api_token = helper.get_global_setting("token_")
server = helper.get_arg('server_')
port = helper.get_arg('port_')
pe_token = helper.get_arg('token_')
pe_link = helper.get_arg('puppet_enterprise_server_')
url = server + ":" + port + "/status/v1/services"
if pe_link:
input_source = pe_link
else:
input_source = pe_link
headers = {
'X-Authentication': pe_token,
'Content-type': api_request
}
response = helper.send_http_request(url,
method,
parameters=None,
payload=None,
headers=headers,
cookies=None,
verify=False,
cert=None,
timeout=None,
use_proxy=True)
r_status = response.status_code
response.raise_for_status()
helper.log_error (response.text)
r= response.json()
input_type = helper.get_input_type()
for stanza_name in helper.get_input_stanza_names():
data = json.dumps(r, sort_keys=False)
event = helper.new_event(source=input_source, index=helper.get_output_index(stanza_name), sourcetype=helper .get_sourcetype(stanza_name), data=data)
helper.log_error (response.text)
try:
ew.write_event(event)
helper.log_error (response.text)
except Exception as e:
raise e
return;
|
{
"content_hash": "f6c5b5109819f7f5ae852ed82954ec01",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 156,
"avg_line_length": 31.695121951219512,
"alnum_prop": 0.550596383224317,
"repo_name": "domeger/SplunkTAforPuppetEnterprise",
"id": "645f5833e91a352d0d286c6f804e7d87942453a7",
"size": "2620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/input_module_puppet_enterprise_status_overview.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5936"
},
{
"name": "CSS",
"bytes": "4504"
},
{
"name": "HTML",
"bytes": "5156"
},
{
"name": "Python",
"bytes": "5659367"
}
],
"symlink_target": ""
}
|
__title__ = "spacy"
__version__ = "3.2.1"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects"
__projects_branch__ = "v3"
|
{
"content_hash": "db16a58666265356ae86d0d45621898f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 104,
"avg_line_length": 51.666666666666664,
"alnum_prop": 0.7096774193548387,
"repo_name": "honnibal/spaCy",
"id": "c253d5052757e4dba5b9e138013ccc21eb47f082",
"size": "321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacy/about.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "729544"
},
{
"name": "HTML",
"bytes": "26303"
},
{
"name": "JavaScript",
"bytes": "234039"
},
{
"name": "Jinja",
"bytes": "10482"
},
{
"name": "Makefile",
"bytes": "1576"
},
{
"name": "Python",
"bytes": "3361067"
},
{
"name": "Sass",
"bytes": "56639"
},
{
"name": "Shell",
"bytes": "984"
}
],
"symlink_target": ""
}
|
import json
import logging
from base64 import b64decode
from datetime import datetime
from urlparse import parse_qs, urlparse
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from oauth2client.service_account import ServiceAccountCredentials
from apiclient.discovery import build
from apiclient.errors import HttpError
import httplib2
enabled = True
except ImportError as e:
enabled = False
types_conv = dict(
STRING=TYPE_STRING,
INTEGER=TYPE_INTEGER,
FLOAT=TYPE_FLOAT,
DATE=TYPE_DATE,
DATETIME=TYPE_DATETIME
)
def parse_ga_response(response):
columns = []
for h in response['columnHeaders']:
if h['name'] in ('ga:date', 'mcf:conversionDate'):
h['dataType'] = 'DATE'
elif h['name'] == 'ga:dateHour':
h['dataType'] = 'DATETIME'
columns.append({
'name': h['name'],
'friendly_name': h['name'].split(':', 1)[1],
'type': types_conv.get(h['dataType'], 'string')
})
rows = []
for r in response['rows']:
d = {}
for c, value in enumerate(r):
column_name = response['columnHeaders'][c]['name']
column_type = filter(lambda col: col['name'] == column_name, columns)[0]['type']
# mcf results come a bit different than ga results:
if isinstance(value, dict):
if 'primitiveValue' in value:
value = value['primitiveValue']
elif 'conversionPathValue' in value:
steps = []
for step in value['conversionPathValue']:
steps.append('{}:{}'.format(step['interactionType'], step['nodeValue']))
value = ', '.join(steps)
else:
raise Exception("Results format not supported")
if column_type == TYPE_DATE:
value = datetime.strptime(value, '%Y%m%d')
elif column_type == TYPE_DATETIME:
if len(value) == 10:
value = datetime.strptime(value, '%Y%m%d%H')
elif len(value) == 12:
value = datetime.strptime(value, '%Y%m%d%H%M')
else:
raise Exception("Unknown date/time format in results: '{}'".format(value))
d[column_name] = value
rows.append(d)
return {'columns': columns, 'rows': rows}
class GoogleAnalytics(BaseSQLQueryRunner):
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "google_analytics"
@classmethod
def name(cls):
return "Google Analytics"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'jsonKeyFile': {
"type": "string",
'title': 'JSON Key File'
}
},
'required': ['jsonKeyFile'],
'secret': ['jsonKeyFile']
}
def __init__(self, configuration):
super(GoogleAnalytics, self).__init__(configuration)
self.syntax = 'json'
def _get_analytics_service(self):
scope = ['https://www.googleapis.com/auth/analytics.readonly']
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
return build('analytics', 'v3', http=creds.authorize(httplib2.Http()))
def _get_tables(self, schema):
accounts = self._get_analytics_service().management().accounts().list().execute().get('items')
if accounts is None:
raise Exception("Failed getting accounts.")
else:
for account in accounts:
schema[account['name']] = {'name': account['name'], 'columns': []}
properties = self._get_analytics_service().management().webproperties().list(
accountId=account['id']).execute().get('items', [])
for property_ in properties:
if 'defaultProfileId' in property_ and 'name' in property_:
schema[account['name']]['columns'].append(
u'{0} (ga:{1})'.format(property_['name'], property_['defaultProfileId'])
)
return schema.values()
def test_connection(self):
try:
service = self._get_analytics_service()
service.management().accounts().list().execute()
except HttpError as e:
# Make sure we return a more readable error to the end user
raise Exception(e._get_reason())
def run_query(self, query, user):
logger.debug("Analytics is about to execute query: %s", query)
try:
params = json.loads(query)
except:
params = parse_qs(urlparse(query).query, keep_blank_values=True)
for key in params.keys():
params[key] = ','.join(params[key])
if '-' in key:
params[key.replace('-', '_')] = params.pop(key)
if 'mcf:' in params['metrics'] and 'ga:' in params['metrics']:
raise Exception("Can't mix mcf: and ga: metrics.")
if 'mcf:' in params.get('dimensions', '') and 'ga:' in params.get('dimensions', ''):
raise Exception("Can't mix mcf: and ga: dimensions.")
if 'mcf:' in params['metrics']:
api = self._get_analytics_service().data().mcf()
else:
api = self._get_analytics_service().data().ga()
if len(params) > 0:
try:
response = api.get(**params).execute()
data = parse_ga_response(response)
error = None
json_data = json.dumps(data, cls=JSONEncoder)
except HttpError as e:
# Make sure we return a more readable error to the end user
error = e._get_reason()
json_data = None
else:
error = 'Wrong query format.'
json_data = None
return json_data, error
register(GoogleAnalytics)
|
{
"content_hash": "daab76d6f4167ecadd7df105af2bac0d",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 102,
"avg_line_length": 34.46448087431694,
"alnum_prop": 0.5438401775804661,
"repo_name": "moritz9/redash",
"id": "cd14724b664a86ffeafcf61b52f1925e9028763e",
"size": "6332",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "redash/query_runner/google_analytics.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "169117"
},
{
"name": "Dockerfile",
"bytes": "431"
},
{
"name": "HTML",
"bytes": "199781"
},
{
"name": "JavaScript",
"bytes": "403365"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "816069"
},
{
"name": "Shell",
"bytes": "10557"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from models import *
from django.db.models import get_model
from reversion.admin import VersionAdmin
from seamoss.forms import PageForm
try:
tinymce = models.get_app('tinymce')
except ImproperlyConfigured:
tinymce = False
class PageAdmin(VersionAdmin):
ordering = ('title',)
list_display = ('title', 'published', 'created_on')
list_filter = ('published',)
prepopulated_fields = {'slug': ('title',)}
save_on_top = True
if tinymce:
form = PageForm
class MenuAdmin(VersionAdmin):
ordering = ('title',)
list_display = ('title', 'published', 'created_on')
list_filter = ('published',)
prepopulated_fields = {'slug': ('title',)}
save_on_top = True
class BlockAdmin(VersionAdmin):
ordering=('name',)
list_display = ('name', 'slug', 'created_on')
list_filter = ('published',)
prepopulated_fields = {'slug': ('name',)}
save_on_top = True
class MenuItemAdmin(admin.ModelAdmin):
list_display = ('name', 'created_on',)
class SettingAdmin(admin.ModelAdmin):
ordering = ('setting_key',)
list_display = ('setting_key', 'setting_value',)
admin.site.register(get_model('seamoss', 'page'), PageAdmin)
admin.site.register(get_model('seamoss', 'block'), BlockAdmin)
admin.site.register(get_model('seamoss', 'menu'), MenuAdmin)
admin.site.register(get_model('seamoss', 'menuitem'), MenuItemAdmin)
admin.site.register(get_model('seamoss', 'cmssetting'), SettingAdmin)
|
{
"content_hash": "8df248b07e66f847626c5c03b867c4dd",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 69,
"avg_line_length": 32.369565217391305,
"alnum_prop": 0.6736064472800537,
"repo_name": "gregnewman/django-seamoss",
"id": "c497ebb0f9d8123e0807e405d5eee4570e7c0e73",
"size": "1489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seamoss/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13166"
}
],
"symlink_target": ""
}
|
import unittest2
from neutron import context
class TestBase(unittest2.TestCase):
'''Class to decide which unit test class to inherit from uniformly.'''
def setUp(self):
super(TestBase, self).setUp()
self.context = context.Context('fake', 'fake', is_admin=False)
class FakeContext(object):
def __new__(cls, *args, **kwargs):
return super(FakeContext, cls).__new__(cls)
def __enter__(*args, **kwargs):
pass
def __exit__(*args, **kwargs):
pass
self.context.session.begin = FakeContext
|
{
"content_hash": "0348b4bbe34f8c8a59243f3bffa9cc13",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 26.695652173913043,
"alnum_prop": 0.5700325732899023,
"repo_name": "jkoelker/quark",
"id": "d944cf2e165fa48e50e9458dcde97c93e5f4876a",
"size": "1204",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quark/tests/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "466498"
}
],
"symlink_target": ""
}
|
import commands
import math
class MemInfo:
def __init__(self, options):
pass
def report(self):
output = commands.getoutput("vm_stat")
splitted = output.split("\n")
pageSize = int(splitted[0].split()[7])
free = int(splitted[1].split()[2][:-1])
active = int(splitted[2].split()[2][:-1])
inactive = int(splitted[3].split()[2][:-1])
speculative = int(splitted[4].split()[2][:-1])
wired = int(splitted[5].split()[3][:-1])
pageSizeShift = int(math.sqrt(pageSize >> 10))
free += speculative
total = active + inactive + wired + free
# as power!
return {
"free": free >> (10 - pageSizeShift),
"total": total >> (10 - pageSizeShift),
"active": active >> (10 - pageSizeShift),
"inactive": inactive >> (10 - pageSizeShift),
"wired": wired >> (10 - pageSizeShift),
}
|
{
"content_hash": "ba3554a75793d80c37fe95a6b9f745b8",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 57,
"avg_line_length": 29.65625,
"alnum_prop": 0.5226554267650158,
"repo_name": "diego351/monster",
"id": "f3a2be847c3a75f6fcb7151d6bf87348ee9405a6",
"size": "968",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/probes/osx/MemInfo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1601"
},
{
"name": "JavaScript",
"bytes": "18613"
},
{
"name": "Python",
"bytes": "43400"
}
],
"symlink_target": ""
}
|
import pyreally
import logging
import time
import pprint
import random
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
try:
pp = pprint.PrettyPrinter(depth=6)
really = pyreally.Really()
really.login_anonymous()
res1 = really.create(pyreally.R("/users"), {"firstName": "Kareem", "lastName": "Sameer", "age": 55})
print(res1.result())
time.sleep(2)
f = really.get(res1.result().r)
print("The result is: %s" % f.result().get_raw_response())
print("QUERY BEFORE DELETE")
ff = really.query("/users", "_r = $r", {"r": str(res1.result().r)}, include_total=True)
print(ff.result().get_raw_response())
f3 = really.delete(f.result().r)
print(" I got : %s" % f3.result().get_raw_response())
time.sleep(2)
# f = really.get(res1.result().r)
# print("I got a result: %s" % f.result().get_raw_response())
print("QUERY AFTER DELETE")
ff = really.query("/users", "_r = $r", {"r": str(f.result().r)}, include_total=True)
print(ff.result().get_raw_response())
# query = "firstName = $name"
# query_args = {"name": "Ahmed"}
# f2 = really.query("/users")
# print("The QUERY result:")
# result = f2.result()
# print(result)
# pp.pprint(map(lambda obj: obj.body, result.items))
# nextToken = result.next_token
# print("NEXT TOKEN: %s" % nextToken)
# f2 = really.query("/users", limit=2, include_total=True, pagination_token=nextToken)
# print("The QUERY result:")
# pp.pprint(f2.result().items)
# print("Total: %s" % f2.result().count)
#
# result = f2.result()
# pp.pprint(result.get_raw_response())
# nextToken = result.next_token
# print("NEXT TOKEN: %s" % nextToken)
# f2 = really.query("/users", limit=2, include_total=True, pagination_token=nextToken)
# print("The QUERY result:")
# pp.pprint(f2.result().items)
# f4 = really.delete(res.r)
# print("DELETE result: %s" % f4.result())
# names = ["Ahmed", "khaled", "ibrahim", "hussein", "zaki", "Ismail", "Sinar", "Refaey", "Amal" , "Ihab"]
# results = []
# for i in range(500):
# results.append(really.create(pyreally.R("/users"), {"firstName": random.choice(names), "lastName": random.choice(names), "age": 55}))
#
# for result in results:
# print("The QUERY result: %s" % result.result())
# currentHatem = result.result()
# pp.pprint(currentHatem.body['fullName'])
# sub_result = really.subscribe(currentHatem.r, None, currentHatem.rev, ['firstName'])
# print("I got %s" % sub_result.result().get_raw_response())
# # hatem = really.get('/users/5967264813380931584/')
#
# ops = [pyreally.Update('set', 'lastName', 'Medhat')]
# resp = really.update(currentHatem.r, ops, currentHatem.rev)
# print(resp.result().get_raw_response())
# hatem2 = really.get(currentHatem.r)
# print("Hatem is now %s:" % hatem2.result().body)
#
#
# ops = [pyreally.Update('set', 'firstName', 'Mika')]
# logging.info("Hatem now is: %s" % currentHatem.body)
# resp = really.update(currentHatem.r, ops, hatem2.result().rev)
# print(resp.result().get_raw_response())
# hatem2 = really.get(currentHatem.r)
# print("Hatem is now %s:" % hatem2.result().body)
time.sleep(5)
except KeyboardInterrupt:
print("KEYBOARD INTERRUPT, Closing REALLY")
really.close()
except pyreally.OperationError as e:
print("OPERATION FAILED %s" % e)
time.sleep(10)
really.close()
|
{
"content_hash": "9d8efa702dce8d6e7a6c24bb6e6e0e86",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 147,
"avg_line_length": 40.924731182795696,
"alnum_prop": 0.5641093011035208,
"repo_name": "reallylabs/pyreally",
"id": "e2460ecb8d75efb5310a95f2a9e43757144cba4f",
"size": "3806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26939"
}
],
"symlink_target": ""
}
|
import unittest
import runner
class TestRunnerMethods(unittest.TestCase):
def test_count_syllables(self):
syllables = runner.countSyllables('hello')
self.assertEqual(syllables, 2)
def test_is_haiku(self):
trueHaiku = runner.isHaiku("I am a haiku trust me this is a haiku, I'm dead serious")
falseHaiku = runner.isHaiku("I'm a donut")
self.assertTrue(trueHaiku)
self.assertFalse(falseHaiku)
#def test_generate_random_haiku(self):
# haiku = runner.generateHaiku('the')
# self.assertIsInstance(haiku, basestring)
# self.assertEqual(haiku, 'Hello, World')
def test_pick_random_word(self):
word = runner.pickRandomWord(1)
from models import Unigram
queryWord = Unigram.query.filter(Unigram.word1 == word)[0].word1
self.assertIsInstance(word, basestring)
self.assertEqual(word, queryWord)
def test_generate_line(self):
line = runner.generateLine(4, "the")
lineSyllables = runner.countSyllables(line)
self.assertEqual(lineSyllables, 5)
def test_grab_possible_words(self):
from models import Unigram
unigrams = Unigram.query.filter(Unigram.word1 == 'the')
possibleWords = runner.grabPossibleWords("the", 1)
self.assertIsInstance(possibleWords, list)
self.assertEqual(len(possibleWords), 1)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "37f1211286a87a62c8ad76e02c27d9fb",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 89,
"avg_line_length": 31.785714285714285,
"alnum_prop": 0.7131086142322097,
"repo_name": "hollabaq86/haikuna-matata",
"id": "28b9ce3677dc27d46bf0234d36065fca34f1926d",
"size": "1335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runner_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2628"
},
{
"name": "HTML",
"bytes": "6169"
},
{
"name": "JavaScript",
"bytes": "2841"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "19501"
}
],
"symlink_target": ""
}
|
import os
import yaml
from .storage import Storage
class LocalStorage(Storage):
def __init__(self, path):
if path is None:
path = '~/.spinbot/cache'
self.path = os.path.expanduser(path)
super().__init__()
def store(self, key, val):
with open(self.path, 'w+') as f:
props = yaml.safe_load(f)
if props is None:
props = {}
props[key] = val
f.write(yaml.safe_dump(props))
def load(self, key):
try:
with open(self.path, 'r') as f:
props = yaml.safe_load(f)
if props is None:
props = {}
return props.get(key, None)
except FileNotFoundError:
return None
|
{
"content_hash": "45fb07fb1a1d5e30d3ef845f59d40fa5",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 44,
"avg_line_length": 26.862068965517242,
"alnum_prop": 0.4890885750962773,
"repo_name": "ewiseblatt/spinnaker",
"id": "eb43c9ded7190c1c17946d13e1a18ff3f3265f9b",
"size": "779",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spinbot/storage/local_storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1832"
},
{
"name": "Go",
"bytes": "8690"
},
{
"name": "HTML",
"bytes": "614"
},
{
"name": "Jsonnet",
"bytes": "35034"
},
{
"name": "Makefile",
"bytes": "276"
},
{
"name": "Python",
"bytes": "1250457"
},
{
"name": "Shell",
"bytes": "185432"
},
{
"name": "Smarty",
"bytes": "2087"
}
],
"symlink_target": ""
}
|
import copy
import itertools
import re
from abc import ABC, abstractmethod
from typing import Iterable, Type
from morelia.exceptions import MissingStepError
from morelia.i18n import TRANSLATIONS
PLACEHOLDER_RE = re.compile(r"\<(\w+)\>")
class Visitor(ABC): # pragma: nocover
@abstractmethod
def visit_feature(self, node: "Feature", children: Iterable["Node"]) -> None:
pass
@abstractmethod
def visit_scenario(self, node: "Scenario", children: Iterable["Node"]) -> None:
pass
@abstractmethod
def visit_step(self, node: "Step", children: Iterable["Node"]) -> None:
pass
@abstractmethod
def visit_background(self, node: "Background") -> None:
pass
@abstractmethod
def visit_row(self, node: "Row") -> None:
pass
@abstractmethod
def visit_examples(self, node: "Examples", children: Iterable["Node"]) -> None:
pass
@abstractmethod
def visit_comment(self, node: "Comment") -> None:
pass
def visit_children(self, children: Iterable["Node"]) -> None:
for child in children:
child.accept(self)
class Node(ABC):
allowed_parents = () # type: Iterable[Type[Node]]
def __init__(
self, source="", line_number=0, language="en", labels=None, predecessors=[]
):
self.__source = source
self.__line_number = line_number
self.__language = language
self.__labels = labels if labels is not None else []
self.steps = []
self.parent = None
self.__predicate = self.__extract_predicate()
self.parent = self.__find_parent(predecessors)
self.__connect_to_parent()
self._validate_predicate()
def __connect_to_parent(self):
if self.parent:
self.parent.add_child(self)
def __find_parent(self, predecessors):
allowed_parents = self.allowed_parents
if not allowed_parents and predecessors:
self.enforce(False, "Only one Feature per file")
for step in predecessors[::-1]:
if isinstance(step, allowed_parents):
return step
return None
def __extract_predicate(self):
node_re = self.__get_compiled_pattern(self.__language)
return node_re.sub("", self.source).strip()
@classmethod
def match(cls, line, language):
node_re = cls.__get_compiled_pattern(language)
return node_re.match(line)
@classmethod
def __get_compiled_pattern(cls, language, __memo={}):
try:
return __memo[cls, language]
except KeyError:
pattern = cls._get_pattern(language)
node_re = __memo[cls, language] = re.compile(pattern)
return node_re
@classmethod
def _get_pattern(cls, language):
class_name = cls.__name__
name = class_name.lower()
name = TRANSLATIONS[language].get(name, class_name)
return r"^\s*({name}):?(\s+|$)".format(name=name)
@property
def source(self):
return self.__source
@property
def line_number(self):
return self.__line_number
@property
def predicate(self):
return self.__predicate
def append_line(self, line):
self.__source += "\n" + line
self.__predicate = (self.__predicate + "\n" + line.strip()).strip()
self._validate_predicate()
def _validate_predicate(self):
return # looks good! (-:
def get_labels(self):
labels = self.__labels[:]
if self.parent:
labels.extend(self.parent.get_labels())
return labels
def get_all_steps(self):
return itertools.chain.from_iterable(
child.get_all_steps() for child in self.steps
)
def add_child(self, child):
self.steps.append(child)
def enforce(self, condition, diagnostic):
if not condition:
text = ""
offset = 1
if self.parent:
text = self.parent.source
offset = 5
text += self.source
text = text.replace("\n\n", "\n").replace("\n", "\n\t")
raise SyntaxError(
diagnostic, (self.get_filename(), self.line_number, offset, text)
)
def get_filename(self):
try:
return self.parent.get_filename() if self.parent else self.filename
except AttributeError:
return None
def interpolated_source(self):
return self.source + "\n"
def format_fault(self, diagnostic):
parent_reconstruction = ""
if self.parent:
parent_reconstruction = self.parent.source.strip("\n")
args = (
self.get_filename(),
self.line_number,
parent_reconstruction,
self.source,
diagnostic,
)
return '\n File "%s", line %s, in %s\n %s\n%s' % args
@abstractmethod
def accept(self, visitor: Visitor) -> None: # pragma: nocover
pass
class Feature(Node):
def accept(self, visitor: Visitor) -> None:
visitor.visit_feature(self, self.steps)
def prepend_steps(self, scenario):
background = self.steps[0]
try:
background.prepend_steps(scenario)
except AttributeError:
pass
class Scenario(Node):
allowed_parents = (Feature,)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.row_indices = [0]
def accept(self, visitor: Visitor) -> None:
self.parent.prepend_steps(self)
self.enforce(
0 < len(self.steps),
"Scenario without step(s) - Step, Given, When, Then, And, or #",
)
schedule = self.permute_schedule()
old_row_indices = self.row_indices
try:
for indices in schedule:
self.row_indices = indices
visitor.visit_scenario(self, self.steps)
finally:
self.row_indices = old_row_indices
def permute_schedule(self):
dims = self.count_Row_dimensions()
return _permute_indices(dims)
def count_Row_dimensions(self):
return [step.rows_number for step in self.steps if isinstance(step, RowParent)]
def get_all_steps(self):
return (step for step in self.steps if isinstance(step, Step))
class Background(Node):
allowed_parents = (Feature,)
def accept(self, visitor: Visitor) -> None:
visitor.visit_background(self)
def prepend_steps(self, scenario):
try:
return scenario.background_steps
except AttributeError:
background_steps = []
for step in self.steps:
new_step = copy.copy(step)
new_step.parent = scenario
background_steps.append(new_step)
scenario.steps = background_steps + scenario.steps
scenario.background_steps = background_steps
return background_steps
def count_Row_dimensions(self):
return [0]
def get_all_steps(self):
return (step for step in self.steps if isinstance(step, Step))
class RowParent(Node):
@property
def rows_number(self):
rows_number = len(self.get_rows()) - 1 # do not count header
return max(0, rows_number)
def get_rows(self):
return [step for step in self.steps if isinstance(step, Row)]
class Step(RowParent):
allowed_parents = (Scenario, Background)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.payload = ""
def accept(self, visitor: Visitor) -> None:
visitor.visit_step(self, self.steps)
def find_method(self, matcher):
"""Find method matching step.
:param IStepMatcher matcher: object matching methods by given predicate
:returns: (method, args, kwargs) tuple
:rtype: tuple
:raises MissingStepError: if method maching step not found
"""
predicate = self.predicate
augmented_predicate = self.__get_interpolated_predicate()
method, args, kwargs = matcher.find(predicate, augmented_predicate)
if method:
return method, args, kwargs
suggest, method_name, docstring = matcher.suggest(predicate)
raise MissingStepError(predicate, suggest, method_name, docstring)
def interpolated_source(self):
augmented_predicate = self.__get_interpolated_predicate()
return self.source.replace(self.predicate, augmented_predicate)
def __get_interpolated_predicate(self):
if self.parent is None:
return self.predicate
if self.__parent_has_no_rows():
return self.predicate
placeholders = PLACEHOLDER_RE.findall(self.predicate)
if not placeholders:
return self.predicate
return self.__replace_placeholders_in_predicate(placeholders)
def __parent_has_no_rows(self):
dims = self.parent.count_Row_dimensions()
return not any(dims)
def __replace_placeholders_in_predicate(self, placeholders):
copy = self.predicate[:]
row_indices = self.parent.row_indices
siblings = self.parent.steps
for step_idx, row_idx in enumerate(row_indices):
step = siblings[step_idx]
table = step.get_rows()
if len(table) > 1:
header = table[0]
body = table[1:]
for column_idx, column_title in enumerate(header.values):
value = body[row_idx][column_idx]
copy = self.__replace_placeholders(
column_title, value, placeholders, copy
)
return copy
def __replace_placeholders(self, column_title, table_value, placeholders, copy):
for placeholder in placeholders:
if column_title == placeholder:
return self.__replace_placeholder(copy, placeholder, table_value)
return copy
def __replace_placeholder(self, copy, placeholder, table_value):
table_value = table_value.replace("\n", "\\n")
return copy.replace(
"<{placeholder}>".format(placeholder=placeholder), table_value
)
class Given(Step):
pass
class When(Step):
pass
class Then(Step):
pass
class And(Step):
pass
class But(And):
pass
class Examples(RowParent):
allowed_parents = (Scenario,)
def accept(self, visitor: Visitor) -> None:
visitor.visit_examples(self, self.steps)
class Row(Node):
allowed_parents = (Step, Examples)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._validate_predicate()
def _validate_predicate(self):
# TODO: validate that grandparent is not Background
row = re.split(r" \|", re.sub(r"\|$", "", self.predicate))
self.__values = [s.strip() for s in row]
def __getitem__(self, column_idx):
return self.__values[column_idx]
@property
def values(self):
return self.__values
def accept(self, visitor: Visitor) -> None:
visitor.visit_row(self)
@classmethod
def _get_pattern(cls, language):
return r"^\s*(\|):?\s+"
class Comment(Node):
allowed_parents = (Node,)
def accept(self, visitor: Visitor) -> None:
visitor.visit_comment(self)
@classmethod
def _get_pattern(cls, language):
return r"\s*(\#)"
def _validate_predicate(self):
self.enforce("\n" not in self.predicate, "linefeed in comment")
def _permute_indices(arr):
product_args = list(_imap(arr))
result = list(itertools.product(*product_args))
return result
# tx to Chris Rebert, et al, on the Python newsgroup for curing my brainlock here!!
def _imap(*iterables):
iterables = [iter(i) for i in iterables]
while True:
try:
args = [next(i) for i in iterables]
yield _special_range(*args)
except StopIteration:
return
def _special_range(n):
return range(n) if n else [0]
|
{
"content_hash": "6fd1c059fa8404623bb2b54fadebcf11",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 88,
"avg_line_length": 28.92124105011933,
"alnum_prop": 0.5921769268856247,
"repo_name": "kidosoft/Morelia",
"id": "3c8b4e33c7f729530cec657d5090e4829a534b3f",
"size": "12118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "morelia/grammar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "11565"
},
{
"name": "Makefile",
"bytes": "1540"
},
{
"name": "Python",
"bytes": "180369"
}
],
"symlink_target": ""
}
|
DOWNLOAD_PATH = '/tmp/backgrounds/'
CUSTOM_FOLDER = 'nasa-apod-backgrounds'
RESOLUTION_TYPE = 'stretch'
RESOLUTION_X = 1024
RESOLUTION_Y = 768
NASA_APOD_SITE = 'http://apod.nasa.gov/apod/'
IMAGE_SCROLL = True
IMAGE_DURATION = 1200
SEED_IMAGES = 10
SHOW_DEBUG = False
import glib
import subprocess
import commands
import urllib
import urllib2
import re
import os
import random
import glob
from PIL import Image
from sys import stdout
from sys import exit
from lxml import etree
from datetime import datetime, timedelta
# Use XRandR to grab the desktop resolution. If the scaling method is set to 'largest',
# we will attempt to grab it from the largest connected device. If the scaling method
# is set to 'stretch' we will grab it from the current value. Default will simply use
# what was set for the default resolutions.
def find_resolution():
if RESOLUTION_TYPE == 'default':
if SHOW_DEBUG:
print "Using default resolution of %sx%s" % (RESOLUTION_X, RESOLUTION_Y)
return RESOLUTION_X, RESOLUTION_Y
res_x = 0
res_y = 0
if SHOW_DEBUG:
print "Attempting to determine the current resolution."
if RESOLUTION_TYPE == 'largest':
regex_search = 'connected'
else:
regex_search = 'current'
p1 = subprocess.Popen(["xrandr"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", regex_search], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if RESOLUTION_TYPE == 'largest':
# We are going to go through the connected devices and get the X/Y from the largest
matches = re.finditer(" connected ([0-9]+)x([0-9]+)+", output)
if matches:
largest = 0
for match in matches:
if int(match.group(1)) * int(match.group(2)) > largest:
res_x = match.group(1)
res_y = match.group(2)
elif SHOW_DEBUG:
print "Could not determine largest screen resolution."
else:
reg = re.search(".* current (.*?) x (.*?),.*", output)
if reg:
res_x = reg.group(1)
res_y = reg.group(2)
elif SHOW_DEBUG:
print "Could not determine current screen resolution."
# If we couldn't find anything automatically use what was set for the defaults
if res_x == 0 or res_y == 0:
res_x = RESOLUTION_X
res_y = RESOLUTION_Y
if SHOW_DEBUG:
print "Could not determine resolution automatically. Using defaults."
if SHOW_DEBUG:
print "Using detected resolution of %sx%s" % (res_x, res_y)
return int(res_x), int(res_y)
# Uses GLib to find the localized "Downloads" folder
# See: http://askubuntu.com/questions/137896/how-to-get-the-user-downloads-folder-location-with-python
def set_download_folder():
downloads_dir = glib.get_user_special_dir(glib.USER_DIRECTORY_DOWNLOAD)
if downloads_dir:
# Add any custom folder
new_path = os.path.join(downloads_dir, CUSTOM_FOLDER)
if SHOW_DEBUG:
print "Using automatically detected path:", new_path
else:
new_path = DOWNLOAD_PATH
if SHOW_DEBUG:
print "Could not determine download folder with GLib. Using default."
return new_path
# Download HTML of the site
def download_site(url):
if SHOW_DEBUG:
print "Downloading contents of the site to find the image name"
opener = urllib2.build_opener()
req = urllib2.Request(url)
try:
response = opener.open(req)
reply = response.read()
except urllib2.HTTPError, error:
if SHOW_DEBUG:
print "Error downloading " + url + " - " + str(error.code)
reply = "Error: " + str(error.code)
return reply
# Finds the image URL and saves it
def get_image(text):
if SHOW_DEBUG:
print "Grabbing the image URL"
file_url, filename, file_size = get_image_info('a href', text)
# If file_url is None, the today's picture might be a video
if file_url is None:
return None
if SHOW_DEBUG:
print "Found name of image:", filename
save_to = os.path.join(DOWNLOAD_PATH, os.path.splitext(filename)[0] + '.png')
if not os.path.isfile(save_to):
# If the response body is less than 500 bytes, something went wrong
if file_size < 500:
print "Response less than 500 bytes, probably an error\nAttempting to just grab image source"
file_url, filename, file_size = get_image_info('img src', text)
# If file_url is None, the today's picture might be a video
if file_url is None:
return None
print "Found name of image:", filename
if file_size < 500:
# Give up
if SHOW_DEBUG:
print "Could not find image to download"
exit()
if SHOW_DEBUG:
print "Retrieving image"
urllib.urlretrieve(file_url, save_to, print_download_status)
# Adding additional padding to ensure entire line
if SHOW_DEBUG:
print "\rDone downloading", human_readable_size(file_size), " "
else:
urllib.urlretrieve(file_url, save_to)
elif SHOW_DEBUG:
print "File exists, moving on"
return save_to
# Resizes the image to the provided dimensions
def resize_image(filename):
if SHOW_DEBUG:
print "Opening local image"
image = Image.open(filename)
current_x, current_y = image.size
if (current_x, current_y) == (RESOLUTION_X, RESOLUTION_Y):
if SHOW_DEBUG:
print "Images are currently equal in size. No need to scale."
else:
if SHOW_DEBUG:
print "Resizing the image from", image.size[0], "x", image.size[1], "to", RESOLUTION_X, "x", RESOLUTION_Y
image = image.resize((RESOLUTION_X, RESOLUTION_Y), Image.ANTIALIAS)
if SHOW_DEBUG:
print "Saving the image to", filename
fhandle = open(filename, 'w')
image.save(fhandle, 'PNG')
# Sets the new image as the wallpaper
def set_gnome_wallpaper(file_path):
if SHOW_DEBUG:
print "Setting the wallpaper"
command = "gsettings set org.gnome.desktop.background picture-uri file://" + file_path
status, output = commands.getstatusoutput(command)
return status
def print_download_status(block_count, block_size, total_size):
written_size = human_readable_size(block_count * block_size)
total_size = human_readable_size(total_size)
# Adding space padding at the end to ensure we overwrite the whole line
stdout.write("\r%s bytes of %s " % (written_size, total_size))
stdout.flush()
def human_readable_size(number_bytes):
for x in ['bytes', 'KB', 'MB']:
if number_bytes < 1024.0:
return "%3.2f%s" % (number_bytes, x)
number_bytes /= 1024.0
# Creates the necessary XML so background images will scroll through
def create_desktop_background_scoll(filename):
if not IMAGE_SCROLL:
return filename
if SHOW_DEBUG:
print "Creating XML file for desktop background switching."
filename = DOWNLOAD_PATH + '/nasa_apod_desktop_backgrounds.xml'
# Create our base, background element
background = etree.Element("background")
# Grab our PNGs we have downloaded
images = glob.glob(DOWNLOAD_PATH + "/*.png")
num_images = len(images)
if num_images < SEED_IMAGES:
# Let's seed some images
# Start with yesterday and continue going back until we have enough
if SHOW_DEBUG:
print "Downloading some seed images as well"
days_back = 0
seed_images_left = SEED_IMAGES
while seed_images_left > 0:
days_back+=1
if SHOW_DEBUG:
print "Downloading seed image (" + str(seed_images_left) + " left):"
day_to_try = datetime.now() - timedelta(days=days_back)
# Filenames look like /apYYMMDD.html
seed_filename = NASA_APOD_SITE + "ap" + day_to_try.strftime("%y%m%d") + ".html"
seed_site_contents = download_site(seed_filename)
# Make sure we didn't encounter an error for some reason
if seed_site_contents == "error":
continue
seed_filename = get_image(seed_site_contents)
# If the content was an video or some other error occurred, skip the
# rest.
if seed_filename is None:
continue
resize_image(seed_filename)
# Add this to our list of images
images.append(seed_filename)
seed_images_left-=1
if SHOW_DEBUG:
print "Done downloading seed images"
# Get our images in a random order so we get a new order every time we get a new file
random.shuffle(images)
# Recalculate the number of pictures
num_images = len(images)
for i, image in enumerate(images):
# Create a static entry for keeping this image here for IMAGE_DURATION
static = etree.SubElement(background, "static")
# Length of time the background stays
duration = etree.SubElement(static, "duration")
duration.text = str(IMAGE_DURATION)
# Assign the name of the file for our static entry
static_file = etree.SubElement(static, "file")
static_file.text = images[i]
# Create a transition for the animation with a from and to
transition = etree.SubElement(background, "transition")
# Length of time for the switch animation
transition_duration = etree.SubElement(transition, "duration")
transition_duration.text = "5"
# We are always transitioning from the current file
transition_from = etree.SubElement(transition, "from")
transition_from.text = images[i]
# Create our tranition to element
transition_to = etree.SubElement(transition, "to")
# Check to see if we're at the end, if we are use the first image as the image to
if i + 1 == num_images:
transition_to.text = images[0]
else:
transition_to.text = images[i + 1]
xml_tree = etree.ElementTree(background)
xml_tree.write(filename, pretty_print=True)
return filename
def get_image_info(element, text):
# Grabs information about the image
regex = '<' + element + '="(image.*?)"'
reg = re.search(regex, text, re.IGNORECASE)
if reg:
if 'http' in reg.group(1):
# Actual URL
file_url = reg.group(1)
else:
# Relative path, handle it
file_url = NASA_APOD_SITE + reg.group(1)
else:
if SHOW_DEBUG:
print "Could not find an image. May be a video today."
return None, None, None
# Create our handle for our remote file
if SHOW_DEBUG:
print "Opening remote URL"
remote_file = urllib.urlopen(file_url)
filename = os.path.basename(file_url)
file_size = float(remote_file.headers.get("content-length"))
return file_url, filename, file_size
if __name__ == '__main__':
# Our program
if SHOW_DEBUG:
print "Starting"
# Find desktop resolution
RESOLUTION_X, RESOLUTION_Y = find_resolution()
# Set a localized download folder
DOWNLOAD_PATH = set_download_folder()
# Create the download path if it doesn't exist
if not os.path.exists(os.path.expanduser(DOWNLOAD_PATH)):
os.makedirs(os.path.expanduser(DOWNLOAD_PATH))
# Grab the HTML contents of the file
site_contents = download_site(NASA_APOD_SITE)
if site_contents == "error":
if SHOW_DEBUG:
print "Could not contact site."
exit()
# Download the image
filename = get_image(site_contents)
if filename is not None:
# Resize the image
resize_image(filename)
# Create the desktop switching xml
filename = create_desktop_background_scoll(filename)
# If the script was unable todays image and IMAGE_SCROLL is set to False,
# the script exits
if filename is None:
if SHOW_DEBUG:
print "Today's image could not be downloaded."
exit()
# Set the wallpaper
status = set_gnome_wallpaper(filename)
if SHOW_DEBUG:
print "Finished!"
|
{
"content_hash": "b723a7ae2887fd33dbf4a6ba2668dd3d",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 117,
"avg_line_length": 34.375,
"alnum_prop": 0.6243232323232323,
"repo_name": "mikuyves/nasa-apod-desktop",
"id": "dc609af7a4af710aa48295d9a61d47fe54dd3baf",
"size": "15553",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nasa_apod_desktop.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15553"
}
],
"symlink_target": ""
}
|
import os
import sys
import json
import time
import threading
from requests_futures.sessions import FuturesSession
extensions = ["php", "txt", "pdf", "zip", "tgz", "asp", "inc", "tpl"]
session = FuturesSession(max_workers=100)
randomseed = "1enjfn3i"
dumb_code = 404
dumb_length = 0
url_cache = {}
current_dir = ""
lock = threading.Lock()
difficult_extensions = []
tested_extensions = 0
def ext_callback(sess, resp):
print "Ext URL: %s" %resp.url
print "Extension callback %d" %resp.status_code
test_ext = resp.url.split(".")[-1]
ext_code = resp.status_code
if resp.status_code != 404:
lock.acquire()
++tested_extensions
difficult_extensions.append(test_ext)
lock.release()
def dumb_callback(sess, resp):
lock.acquire()
print "Dumb callback called %s" %resp.status_code
dumb_code = resp.status_code
dumb_length = len(resp.text)
lock.release()
def bg_cb(sess, resp):
if (resp.status_code != 404
and resp.status_code != dumb_code
and bool(filter(None, resp.text.split("\n"))) == True
and resp.url not in url_cache.keys()
and resp.url.count("-") <= 2
and resp.url.count("_") <= 2):
lock.acquire()
url_cache[resp.url] = 1
lock.release()
x = {"%s" %resp.url: resp.status_code}
print json.dumps(x)
def main():
minimal = open("minimal.wl", "r").read()
words = [line for line in minimal.split("\n")[:-1]]
url = "%s%s" %(sys.argv[1], randomseed)
future = session.get(url, background_callback=dumb_callback)
future.result()
for ext in extensions:
future = session.get("%s%s%s.%s" %(url, current_dir,
randomseed, ext), background_callback=ext_callback)
for word in words:
url = "%s%s%s" %(sys.argv[1], word, ext)
future = session.get(url, background_callback=bg_cb)
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: ./pixel.py [url with trailing /]"
else:
main()
|
{
"content_hash": "48c157668dbaf69b3fd3099016eabd05",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 69,
"avg_line_length": 28.02857142857143,
"alnum_prop": 0.6223241590214067,
"repo_name": "zulla/pixel",
"id": "907e4a2aabbe54062fc943b8b1e7fd204b2c5e98",
"size": "2125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pixel.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "411760"
},
{
"name": "Python",
"bytes": "2125"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
import shutil
import sys
import tempfile
import traceback
import merge_api
MISSING_SHARDS_MSG = r"""Missing results from the following shard(s): %s
This can happen in following cases:
* Test failed to start (missing *.dll/*.so dependency for example)
* Test crashed or hung
* Task expired because there are not enough bots available and are all used
* Swarming service experienced problems
Please examine logs to figure out what happened.
"""
def emit_warning(title, log=None):
print '@@@STEP_WARNINGS@@@'
print title
if log:
title = title.rstrip()
for line in log.splitlines():
print '@@@STEP_LOG_LINE@%s@%s@@@' % (title, line.rstrip())
print '@@@STEP_LOG_END@%s@@@' % title
def merge_shard_results(summary_json, jsons_to_merge):
"""Reads JSON test output from all shards and combines them into one.
Returns dict with merged test output on success or None on failure. Emits
annotations.
"""
# summary.json is produced by swarming client itself. We are mostly interested
# in the number of shards.
try:
with open(summary_json) as f:
summary = json.load(f)
except (IOError, ValueError):
emit_warning(
'summary.json is missing or can not be read',
'Something is seriously wrong with swarming client or the bot.')
return None
# Merge all JSON files together. Keep track of missing shards.
merged = {
'all_tests': set(),
'disabled_tests': set(),
'global_tags': set(),
'missing_shards': [],
'per_iteration_data': [],
'swarming_summary': summary,
'test_locations': {},
}
for index, result in enumerate(summary['shards']):
if result is None:
merged['missing_shards'].append(index)
continue
# Author note: this code path doesn't trigger convert_to_old_format() in
# client/swarming.py, which means the state enum is saved in its string
# name form, not in the number form.
state = result.get('state')
if state == u'BOT_DIED':
emit_warning('Shard #%d had a Swarming internal failure' % index)
elif state == u'EXPIRED':
emit_warning('There wasn\'t enough capacity to run your test')
elif state == u'TIMED_OUT':
emit_warning(
'Test runtime exceeded allocated time',
'Either it ran for too long (hard timeout) or it didn\'t produce '
'I/O for an extended period of time (I/O timeout)')
elif state != u'COMPLETED':
emit_warning('Invalid Swarming task state: %s' % state)
json_data, err_msg = load_shard_json(index, result.get('task_id'),
jsons_to_merge)
if json_data:
# Set-like fields.
for key in ('all_tests', 'disabled_tests', 'global_tags'):
merged[key].update(json_data.get(key), [])
# Dict-like fields.
for key in ('test_locations',):
merged[key].update(json_data.get(key, {}))
# 'per_iteration_data' is a list of dicts. Dicts should be merged
# together, not the 'per_iteration_data' list itself.
merged['per_iteration_data'] = merge_list_of_dicts(
merged['per_iteration_data'],
json_data.get('per_iteration_data', []))
else:
merged['missing_shards'].append(index)
emit_warning('No result was found: %s' % err_msg)
# If some shards are missing, make it known. Continue parsing anyway. Step
# should be red anyway, since swarming.py return non-zero exit code in that
# case.
if merged['missing_shards']:
as_str = ', '.join(map(str, merged['missing_shards']))
emit_warning(
'some shards did not complete: %s' % as_str,
MISSING_SHARDS_MSG % as_str)
# Not all tests run, combined JSON summary can not be trusted.
merged['global_tags'].add('UNRELIABLE_RESULTS')
# Convert to jsonish dict.
for key in ('all_tests', 'disabled_tests', 'global_tags'):
merged[key] = sorted(merged[key])
return merged
OUTPUT_JSON_SIZE_LIMIT = 100 * 1024 * 1024 # 100 MB
def load_shard_json(index, task_id, jsons_to_merge):
"""Reads JSON output of the specified shard.
Args:
output_dir: The directory in which to look for the JSON output to load.
index: The index of the shard to load data for, this is for old api.
task_id: The directory of the shard to load data for, this is for new api.
Returns: A tuple containing:
* The contents of path, deserialized into a python object.
* An error string.
(exactly one of the tuple elements will be non-None).
"""
# 'output.json' is set in swarming/api.py, gtest_task method.
matching_json_files = [
j for j in jsons_to_merge
if (os.path.basename(j) == 'output.json' and
(os.path.basename(os.path.dirname(j)) == str(index) or
os.path.basename(os.path.dirname(j)) == task_id))]
if not matching_json_files:
print >> sys.stderr, 'shard %s test output missing' % index
return (None, 'shard %s test output was missing' % index)
elif len(matching_json_files) > 1:
print >> sys.stderr, 'duplicate test output for shard %s' % index
return (None, 'shard %s test output was duplicated' % index)
path = matching_json_files[0]
try:
filesize = os.stat(path).st_size
if filesize > OUTPUT_JSON_SIZE_LIMIT:
print >> sys.stderr, 'output.json is %d bytes. Max size is %d' % (
filesize, OUTPUT_JSON_SIZE_LIMIT)
return (None, 'shard %s test output exceeded the size limit' % index)
with open(path) as f:
return (json.load(f), None)
except (IOError, ValueError, OSError) as e:
print >> sys.stderr, 'Missing or invalid gtest JSON file: %s' % path
print >> sys.stderr, '%s: %s' % (type(e).__name__, e)
return (None, 'shard %s test output was missing or invalid' % index)
def merge_list_of_dicts(left, right):
"""Merges dicts left[0] with right[0], left[1] with right[1], etc."""
output = []
for i in xrange(max(len(left), len(right))):
left_dict = left[i] if i < len(left) else {}
right_dict = right[i] if i < len(right) else {}
merged_dict = left_dict.copy()
merged_dict.update(right_dict)
output.append(merged_dict)
return output
def standard_gtest_merge(
output_json, summary_json, jsons_to_merge):
output = merge_shard_results(summary_json, jsons_to_merge)
with open(output_json, 'wb') as f:
json.dump(output, f)
return 0
def main(raw_args):
parser = merge_api.ArgumentParser()
args = parser.parse_args(raw_args)
return standard_gtest_merge(
args.output_json, args.summary_json, args.jsons_to_merge)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "cecb14a9bd5829994c25ba3dbb69c3a4",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 80,
"avg_line_length": 33.37185929648241,
"alnum_prop": 0.6494503839783166,
"repo_name": "endlessm/chromium-browser",
"id": "99adf7bc7e961bebbc1b889c0fa81212d1d535bd",
"size": "6826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/merge_scripts/standard_gtest_merge.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from typing import Callable, NamedTuple, Tuple
import jax
import jax.numpy as jnp
from blackjax.types import PyTree
class SMCInfo(NamedTuple):
"""Additional information on the tempered SMC step.
weights: jnp.ndarray
The weights after the MCMC pass.
proposals: PyTree
The particles that were proposed by the MCMC pass.
ancestors: jnp.ndarray
The index of the particles proposed by the MCMC pass that were selected
by the resampling step.
log_likelihood_increment: float
The log-likelihood increment due to the current step of the SMC algorithm.
"""
weights: jnp.ndarray
proposals: PyTree
ancestors: jnp.ndarray
log_likelihood_increment: float
def kernel(
mcmc_kernel_factory: Callable,
mcmc_state_generator: Callable,
resampling_fn: Callable,
num_mcmc_iterations: int,
):
"""Build a generic SMC kernel.
In Feynman-Kac equivalent terms, the algo goes roughly as follows:
```
M_t = mcmc_kernel_factory(potential_fn)
for i in range(num_mcmc_iterations):
x_t^i = M_t(..., x_t^i)
G_t = log_weights_fn
log_weights = G_t(x_t)
idx = resample(log_weights)
x_t = x_t[idx]
```
Parameters
----------
mcmc_kernel_factory: Callable
A function of the Markov potential that returns a mcmc_kernel.
mcmc_state_generator: Callable
A function that creates a new mcmc state from a position and a potential.
resampling_fn: Callable
A function that resamples the particles generated by the MCMC kernel,
based of previously computed weights.
num_mcmc_iterations: int
Number of iterations of the MCMC kernel
Returns
-------
A kernel that takes a PRNGKey, a set of particles, the log-likehood of the
distribution and the Feynman-Kac potential at time `t`. The kernel returns
a new set of particles.
"""
def one_step(
rng_key: jnp.ndarray,
particles: PyTree,
logprob_fn: Callable,
log_weight_fn: Callable,
) -> Tuple[PyTree, SMCInfo]:
"""
Parameters
----------
rng_key: DeviceArray[int],
JAX PRNGKey for randomness.
particles: PyTree
Current particles sample of the SMC algorithm.
logprob_fn: Callable
Log probability function we wish to sample from.
log_weight_fn: Callable
A function that represents the Feynman-Kac log potential at time t.
Returns
-------
particles: PyTree,
The updated set of particles.
info: SMCInfo,
Additional information on the SMC step
"""
num_particles = jax.tree_util.tree_flatten(particles)[0][0].shape[0]
scan_key, resampling_key = jax.random.split(rng_key, 2)
# First advance the particles using the MCMC kernel
mcmc_kernel = mcmc_kernel_factory(logprob_fn)
def mcmc_body_fn(curr_particles, curr_key):
keys = jax.random.split(curr_key, num_particles)
new_particles, _ = jax.vmap(mcmc_kernel, in_axes=(0, 0))(
keys, curr_particles
)
return new_particles, None
mcmc_state = jax.vmap(mcmc_state_generator, in_axes=(0, None))(
particles, logprob_fn
)
keys = jax.random.split(scan_key, num_mcmc_iterations)
proposed_states, _ = jax.lax.scan(mcmc_body_fn, mcmc_state, keys)
proposed_particles = proposed_states.position
# Resample the particles depending on their respective weights
log_weights = jax.vmap(log_weight_fn, in_axes=(0,))(proposed_particles)
weights, log_likelihood_increment = _normalize(log_weights)
resampling_index = resampling_fn(weights, resampling_key)
particles = jax.tree_map(lambda x: x[resampling_index], proposed_particles)
info = SMCInfo(
weights, proposed_particles, resampling_index, log_likelihood_increment
)
return particles, info
return one_step
def _normalize(log_weights):
"""Normalize log-weights into weights and return resulting weights and log-likelihood increment."""
n = log_weights.shape[0]
max_logw = jnp.max(log_weights)
w = jnp.exp(log_weights - max_logw)
w_mean = w.mean()
log_likelihood_increment = jnp.log(w_mean) + max_logw
w = w / (n * w_mean)
return w, log_likelihood_increment
|
{
"content_hash": "7de7b093734230d8e903c1f6a398e3f9",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 103,
"avg_line_length": 31.836879432624112,
"alnum_prop": 0.6331031410113611,
"repo_name": "blackjax-devs/blackjax",
"id": "22509de4efeab0799471e45d6160880a9618f8fd",
"size": "5075",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "blackjax/smc/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "480"
},
{
"name": "Python",
"bytes": "462048"
}
],
"symlink_target": ""
}
|
"""Benchmark for KPL implementation of vocabulary columns + indicator from lists with dense inputs."""
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.eager.def_function import function as tf_function
from tensorflow.python.feature_column import feature_column_v2 as fcv2
from tensorflow.python.framework import dtypes as dt
from tensorflow.python.keras.layers.preprocessing import category_encoding
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
from tensorflow.python.platform import test as tf_test
# This is required as of 3/2021 because otherwise we drop into graph mode.
v2_compat.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
vocab_size = 32768
vocab = fc_bm.create_vocabulary(vocab_size)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)
# Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(max_length,), name="data", dtype=dt.string))
model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None))
model.add(
category_encoding.CategoryEncoding(
num_tokens=vocab_size + 1, output_mode="count"))
# FC implementation
fc = fcv2.indicator_column(
fcv2.categorical_column_with_vocabulary_list(
key="data", vocabulary_list=vocab, num_oov_buckets=1))
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(fcv2.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "vocab_list_indicator|dense|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf_test.main()
|
{
"content_hash": "404465a8ec92f458d45e653963f7d136",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 102,
"avg_line_length": 36.225352112676056,
"alnum_prop": 0.7270606531881804,
"repo_name": "petewarden/tensorflow",
"id": "2c7f2f68f6bc45e30642888ebec451e33269bf84",
"size": "3261",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/preprocessing/benchmarks/category_vocab_list_indicator_dense_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
try:
import torch
except ImportError:
torch = None
### Evaluator for node property prediction
class Evaluator:
def __init__(self, name):
self.name = name
meta_info = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col = 0)
if not self.name in meta_info:
print(self.name)
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(meta_info.keys())
raise ValueError(error_mssg)
self.num_tasks = int(meta_info[self.name]['num tasks'])
self.eval_metric = meta_info[self.name]['eval metric']
def _parse_and_check_input(self, input_dict):
if self.eval_metric == 'rocauc' or self.eval_metric == 'acc':
if not 'y_true' in input_dict:
raise RuntimeError('Missing key of y_true')
if not 'y_pred' in input_dict:
raise RuntimeError('Missing key of y_pred')
y_true, y_pred = input_dict['y_true'], input_dict['y_pred']
'''
y_true: numpy ndarray or torch tensor of shape (num_nodes num_tasks)
y_pred: numpy ndarray or torch tensor of shape (num_nodes num_tasks)
'''
# converting to torch.Tensor to numpy on cpu
if torch is not None and isinstance(y_true, torch.Tensor):
y_true = y_true.detach().cpu().numpy()
if torch is not None and isinstance(y_pred, torch.Tensor):
y_pred = y_pred.detach().cpu().numpy()
## check type
if not (isinstance(y_true, np.ndarray) and isinstance(y_true, np.ndarray)):
raise RuntimeError('Arguments to Evaluator need to be either numpy ndarray or torch tensor')
if not y_true.shape == y_pred.shape:
raise RuntimeError('Shape of y_true and y_pred must be the same')
if not y_true.ndim == 2:
raise RuntimeError('y_true and y_pred must to 2-dim arrray, {}-dim array given'.format(y_true.ndim))
if not y_true.shape[1] == self.num_tasks:
raise RuntimeError('Number of tasks for {} should be {} but {} given'.format(self.name, self.num_tasks, y_true.shape[1]))
return y_true, y_pred
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
def eval(self, input_dict):
if self.eval_metric == 'rocauc':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_rocauc(y_true, y_pred)
elif self.eval_metric == 'acc':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_acc(y_true, y_pred)
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
@property
def expected_input_format(self):
desc = '==== Expected input format of Evaluator for {}\n'.format(self.name)
if self.eval_metric == 'rocauc':
desc += '{\'y_true\': y_true, \'y_pred\': y_pred}\n'
desc += '- y_true: numpy ndarray or torch tensor of shape (num_nodes num_tasks)\n'
desc += '- y_pred: numpy ndarray or torch tensor of shape (num_nodes num_tasks)\n'
desc += 'where y_pred stores score values (for computing ROC-AUC),\n'
desc += 'num_task is {}, and '.format(self.num_tasks)
desc += 'each row corresponds to one node.\n'
elif self.eval_metric == 'acc':
desc += '{\'y_true\': y_true, \'y_pred\': y_pred}\n'
desc += '- y_true: numpy ndarray or torch tensor of shape (num_nodes num_tasks)\n'
desc += '- y_pred: numpy ndarray or torch tensor of shape (num_nodes num_tasks)\n'
desc += 'where y_pred stores predicted class label (integer),\n'
desc += 'num_task is {}, and '.format(self.num_tasks)
desc += 'each row corresponds to one node.\n'
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
return desc
@property
def expected_output_format(self):
desc = '==== Expected output format of Evaluator for {}\n'.format(self.name)
if self.eval_metric == 'rocauc':
desc += '{\'rocauc\': rocauc}\n'
desc += '- rocauc (float): ROC-AUC score averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'acc':
desc += '{\'acc\': acc}\n'
desc += '- acc (float): Accuracy score averaged across {} task(s)\n'.format(self.num_tasks)
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
return desc
def _eval_rocauc(self, y_true, y_pred):
'''
compute ROC-AUC and AP score averaged across tasks
'''
rocauc_list = []
for i in range(y_true.shape[1]):
#AUC is only defined when there is at least one positive data.
if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == 0) > 0:
is_labeled = y_true[:,i] == y_true[:,i]
rocauc_list.append(roc_auc_score(y_true[is_labeled,i], y_pred[is_labeled,i]))
if len(rocauc_list) == 0:
raise RuntimeError('No positively labeled data available. Cannot compute ROC-AUC.')
return {'rocauc': sum(rocauc_list)/len(rocauc_list)}
def _eval_acc(self, y_true, y_pred):
acc_list = []
for i in range(y_true.shape[1]):
is_labeled = y_true[:,i] == y_true[:,i]
correct = y_true[is_labeled,i] == y_pred[is_labeled,i]
acc_list.append(float(np.sum(correct))/len(correct))
return {'acc': sum(acc_list)/len(acc_list)}
if __name__ == '__main__':
### rocauc case
evaluator = Evaluator('ogbn-proteins')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = torch.tensor(np.random.randint(2, size = (100,112)))
y_pred = torch.tensor(np.random.randn(100,112))
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
### acc case
evaluator = Evaluator('ogbn-products')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = np.random.randint(5, size = (100,1))
y_pred = np.random.randint(5, size = (100,1))
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
### acc case
evaluator = Evaluator('ogbn-arxiv')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = np.random.randint(5, size = (100,1))
y_pred = np.random.randint(5, size = (100,1))
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
|
{
"content_hash": "18d0c1915fdf7a5017909b0fa6e0327a",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 137,
"avg_line_length": 39.340782122905026,
"alnum_prop": 0.5785288270377733,
"repo_name": "snap-stanford/ogb",
"id": "d5c88ff535430f2751d8688fee5f47cdb40d1f1c",
"size": "7042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ogb/nodeproppred/evaluate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "271886"
}
],
"symlink_target": ""
}
|
import io
import unittest
from unittest.mock import patch
from kattis import k_modulararithmetic
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input(self):
'''Run and assert problem statement sample input and output.'''
inputs = []
inputs.append('1000 3')
inputs.append('1 / 999')
inputs.append('1 / 998')
inputs.append('578 * 178')
inputs.append('13 4')
inputs.append('7 / 9')
inputs.append('9 * 3')
inputs.append('0 - 9')
inputs.append('10 + 10')
inputs.append('0 0')
inputs = '\n'.join(inputs) + '\n'
outputs = []
outputs.append('999')
outputs.append('-1')
outputs.append('884')
outputs.append('8')
outputs.append('1')
outputs.append('4')
outputs.append('7')
outputs = '\n'.join(outputs) + '\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_modulararithmetic.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "a5cf992e0fffae76721944f72f7a4cbf",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 31.8,
"alnum_prop": 0.5010482180293501,
"repo_name": "ivanlyon/exercises",
"id": "8e3208995df570e944fa84baaf8fcec6ce2bf9cd",
"size": "1431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_k_modulararithmetic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1283"
},
{
"name": "HTML",
"bytes": "9068"
},
{
"name": "Python",
"bytes": "96419"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from django.template import Context, Template
from ddcz.html import (
encode_valid_html,
unsafe_encode_valid_creation_html,
unsafe_encode_any_creation_html,
)
class TestInsecureHtmlRender(TestCase):
def assert_output(self, entity_string, expected):
output = unsafe_encode_any_creation_html(entity_string)
self.assertEqual(expected, output)
def test_span_table(self):
s = """<table>
<tr><th colspan="5">Tabulka poznání nestvůry </th></tr>
<tr><th>známost tvora</th><th>pravděpodobnost</th><th>použitá vlastnost</th><th>připravenost</th><th>obvyklost prostředí</th></tr>
<tr><td>zapomenutý</td><td>-30%</td><td rowspan="6">úroveň + INT%</td><td rowspan="6">Postava se může připravit, dopředu se seznámit s tvory, které jsou pro daný kraj typičtí atd. Za toto získává bonus +10%</td><td rowspan="6">Pokud se tvor nachází ve svém obvyklém prostředí, má učenec větší šanci si ho vybavit a naopak. Postava má bonus či postih 10%.</td></tr>
<tr><td>téměř neznámý</td><td>0%</td></tr>
<tr><td>neobvyklý</td><td>15%</td></tr>
<tr><td>běžný</td><td>30%</td></tr>
<tr><td>velmi známý</td><td>45%</td></tr>
<tr><td>slavný</td><td>60%</td></tr>
</table>"""
exp = """<table>
<tr><th colspan="5">Tabulka poznání nestvůry </th></tr>
<tr><th>známost tvora</th><th>pravděpodobnost</th><th>použitá vlastnost</th><th>připravenost</th><th>obvyklost prostředí</th></tr>
<tr><td>zapomenutý</td><td>-30%</td><td rowspan="6">úroveň + INT%</td><td rowspan="6">Postava se může připravit, dopředu se seznámit s tvory, které jsou pro daný kraj typičtí atd. Za toto získává bonus +10%</td><td rowspan="6">Pokud se tvor nachází ve svém obvyklém prostředí, má učenec větší šanci si ho vybavit a naopak. Postava má bonus či postih 10%.</td></tr>
<tr><td>téměř neznámý</td><td>0%</td></tr>
<tr><td>neobvyklý</td><td>15%</td></tr>
<tr><td>běžný</td><td>30%</td></tr>
<tr><td>velmi známý</td><td>45%</td></tr>
<tr><td>slavný</td><td>60%</td></tr>
</table>"""
self.assert_output(s, exp)
class TestUnsafeHtmlRenderTemplate(TestCase):
def assert_output(self, entity_string, expected, template_str):
template = Template(template_str)
output = template.render(Context({"entity_string": entity_string}))
self.assertEqual(expected, output)
def test_easy_template(self):
entity_string = "<h2><a>Mnich</a></h2>"
exp = "<h2><a>Mnich</a></h2>"
t = "{% load html %}{{ entity_string|render_html_insecurely|safe }}"
self.assert_output(entity_string, exp, t)
class TestUserHtmlRender(TestCase):
def assert_output(self, entity_string, expected):
output = encode_valid_html(entity_string)
self.assertEqual(expected, output)
def test_text_returned(self):
s = "Simple plain text"
self.assert_output(s, s)
def test_pair_tag_uppercase_accepted(self):
s = "simple <B>bold</B> text"
exp = "simple <b>bold</b> text"
self.assert_output(s, exp)
def test_par_tag_lowercase_accepted(self):
s = "simple <b>bold</b> text"
exp = "simple <b>bold</b> text"
self.assert_output(s, exp)
def test_attributes_unsupported(self):
s = "simple <B onclick="javascript:script(alert)">bold</B> text"
exp = "simple <B onclick="javascript:script(alert)">bold</B> text"
self.assert_output(s, exp)
def test_nonpair_accepted(self):
s = "simple <HR> line"
exp = "simple <hr> line"
self.assert_output(s, exp)
def test_break_variant_3(self):
s = "text that has been <BR>breaked"
exp = "text that has been <br>breaked"
self.assert_output(s, exp)
def test_break_variant_2(self):
s = "text that has been <BR/>breaked"
exp = "text that has been <br>breaked"
self.assert_output(s, exp)
def test_break_variant_1(self):
s = "text that has been <BR />breaked"
exp = "text that has been <br>breaked"
self.assert_output(s, exp)
def test_bad_trailing_arrow(self):
s = "<h2>Skřetí zabijáci</h2><"
exp = "<h2>Skřetí zabijáci</h2><"
self.assert_output(s, exp)
def test_proper_tag_not_encoded(self):
# Verifies <https://github.com/dracidoupe/graveyard/issues/182>
s = "<p><i>"A quote intended to be in italic."</p>"
exp = "<p><i>"A quote intended to be in italic."</p>"
self.assert_output(s, exp)
class TestDeprecatedUnsafeHtmlRender(TestCase):
def assert_output(self, entity_string, expected):
output = unsafe_encode_valid_creation_html(entity_string)
self.assertEqual(expected, output)
def test_text_returned(self):
s = "Simple plain text"
self.assert_output(s, s)
def test_pair_tag_uppercase_accepted(self):
s = "simple <B>bold</B> text"
exp = "simple <b>bold</b> text"
self.assert_output(s, exp)
def test_par_tag_lowercase_accepted(self):
s = "simple <b>bold</b> text"
exp = "simple <b>bold</b> text"
self.assert_output(s, exp)
def test_attributes_unsupported_without_pair_check(self):
s = "simple <B onclick="javascript:script(alert)">bold</B> text"
exp = (
"simple <B onclick="javascript:script(alert)">bold</b> text"
)
self.assert_output(s, exp)
def test_nonpair_accepted(self):
s = "simple <HR> line"
exp = "simple <hr> line"
self.assert_output(s, exp)
def test_break_variant_3(self):
s = "text that has been <BR>breaked"
exp = "text that has been <br>breaked"
self.assert_output(s, exp)
def test_break_variant_2(self):
s = "text that has been <BR/>breaked"
exp = "text that has been <br>breaked"
self.assert_output(s, exp)
def test_break_variant_1(self):
s = "text that has been <BR />breaked"
exp = "text that has been <br>breaked"
self.assert_output(s, exp)
def test_empty_link(self):
s = "text with <a>empty link</a>"
exp = "text with <a>empty link</a>"
self.assert_output(s, exp)
def test_empty_link_in_heading(self):
s = "<h2><a>Mnich</a></h2>"
exp = "<h2><a>Mnich</a></h2>"
self.assert_output(s, exp)
|
{
"content_hash": "79ca9862bbe3334995ca5a1b17b61c04",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 466,
"avg_line_length": 42.49090909090909,
"alnum_prop": 0.6280131222364855,
"repo_name": "dracidoupe/graveyard",
"id": "8972ce066700c2f68f9caf987d9bbb15d63bcc5b",
"size": "7119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ddcz/tests/test_html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "API Blueprint",
"bytes": "4273"
},
{
"name": "CSS",
"bytes": "37578"
},
{
"name": "Dockerfile",
"bytes": "208"
},
{
"name": "HTML",
"bytes": "101149"
},
{
"name": "JavaScript",
"bytes": "2417"
},
{
"name": "Python",
"bytes": "766548"
},
{
"name": "Shell",
"bytes": "5103"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0056_repopulate_series_head'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='is_series_head',
),
]
|
{
"content_hash": "382af4c3cf4ce5de42881e69598628fa",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 47,
"avg_line_length": 19.352941176470587,
"alnum_prop": 0.5896656534954408,
"repo_name": "patchew-project/patchew",
"id": "c0dc4760e127585edc3b9c4cccd1776e499c53be",
"size": "403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/migrations/0057_remove_message_is_series_head.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "57256"
},
{
"name": "HTML",
"bytes": "37890"
},
{
"name": "JavaScript",
"bytes": "40258"
},
{
"name": "Jinja",
"bytes": "1238"
},
{
"name": "Python",
"bytes": "500595"
},
{
"name": "Shell",
"bytes": "5477"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/reactor/shared_base_reactor_subcomponent_mk2.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","base_reactor_subcomponent_mk2")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "abb1e6d7ef7f5fc2148b762b350f8691",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 98,
"avg_line_length": 27.23076923076923,
"alnum_prop": 0.7231638418079096,
"repo_name": "obi-two/Rebelion",
"id": "a8f1a0396849f87fbc8e3e9f558332612a3027f1",
"size": "499",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/ship/crafted/reactor/shared_base_reactor_subcomponent_mk2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import errno
import os
import tempfile
import fixtures
import six
from essential import processutils
from essential import test
class UtilsTest(test.BaseTestCase):
# NOTE(jkoelker) Moar tests from nova need to be ported. But they
# need to be mock'd out. Currently they requre actually
# running code.
def test_execute_unknown_kwargs(self):
self.assertRaises(processutils.UnknownArgumentError,
processutils.execute,
hozer=True)
class ProcessExecutionErrorTest(test.BaseTestCase):
def test_defaults(self):
err = processutils.ProcessExecutionError()
self.assertTrue('None\n' in six.text_type(err))
self.assertTrue('code: -\n' in six.text_type(err))
def test_with_description(self):
description = 'The Narwhal Bacons at Midnight'
err = processutils.ProcessExecutionError(description=description)
self.assertTrue(description in six.text_type(err))
def test_with_exit_code(self):
exit_code = 0
err = processutils.ProcessExecutionError(exit_code=exit_code)
self.assertTrue(str(exit_code) in six.text_type(err))
def test_with_cmd(self):
cmd = 'telinit'
err = processutils.ProcessExecutionError(cmd=cmd)
self.assertTrue(cmd in six.text_type(err))
def test_with_stdout(self):
stdout = """
Lo, praise of the prowess of people-kings
of spear-armed Danes, in days long sped,
we have heard, and what honot the athelings won!
Oft Scyld the Scefing from squadroned foes,
from many a tribe, the mead-bench tore,
awing the earls. Since erse he lay
friendless, a foundling, fate repaid him:
for he waxed under welkin, in wealth he trove,
till before him the folk, both far and near,
who house by the whale-path, heard his mandate,
gabe him gits: a good king he!
To him an heir was afterward born,
a son in his halls, whom heaven sent
to favor the fol, feeling their woe
that erst they had lacked an earl for leader
so long a while; the Lord endowed him,
the Wielder of Wonder, with world's renown.
""".strip()
err = processutils.ProcessExecutionError(stdout=stdout)
print(six.text_type(err))
self.assertTrue('people-kings' in six.text_type(err))
def test_with_stderr(self):
stderr = 'Cottonian library'
err = processutils.ProcessExecutionError(stderr=stderr)
self.assertTrue(stderr in six.text_type(err))
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If stdin fails to get passed during one of the runs, make a note.
if ! grep -q foo
then
echo 'failure' > "$1"
fi
# If stdin has failed to get passed during this or a previous run, exit early.
if grep failure "$1"
then
exit 1
fi
runs="$(cat $1)"
if [ -z "$runs" ]
then
runs=0
fi
runs=$(($runs + 1))
echo $runs > "$1"
exit 1
''')
fp.close()
os.chmod(tmpfilename, 0o755)
self.assertRaises(processutils.ProcessExecutionError,
processutils.execute,
tmpfilename, tmpfilename2, attempts=10,
process_input='foo',
delay_on_retry=False)
fp = open(tmpfilename2, 'r')
runs = fp.read()
fp.close()
self.assertNotEqual(runs.strip(), 'failure', 'stdin did not '
'always get passed '
'correctly')
runs = int(runs.strip())
self.assertEqual(runs, 10, 'Ran %d times instead of 10.' % (runs,))
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
def test_unknown_kwargs_raises_error(self):
self.assertRaises(processutils.UnknownArgumentError,
processutils.execute,
'/usr/bin/env', 'true',
this_is_not_a_valid_kwarg=True)
def test_check_exit_code_boolean(self):
processutils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(processutils.ProcessExecutionError,
processutils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
def test_check_exit_code_list(self):
processutils.execute('/usr/bin/env', 'sh', '-c', 'exit 101',
check_exit_code=(101, 102))
processutils.execute('/usr/bin/env', 'sh', '-c', 'exit 102',
check_exit_code=(101, 102))
self.assertRaises(processutils.ProcessExecutionError,
processutils.execute,
'/usr/bin/env', 'sh', '-c', 'exit 103',
check_exit_code=(101, 102))
self.assertRaises(processutils.ProcessExecutionError,
processutils.execute,
'/usr/bin/env', 'sh', '-c', 'exit 0',
check_exit_code=(101, 102))
def test_no_retry_on_success(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write("""#!/bin/sh
# If we've already run, bail out.
grep -q foo "$1" && exit 1
# Mark that we've run before.
echo foo > "$1"
# Check that stdin gets passed correctly.
grep foo
""")
fp.close()
os.chmod(tmpfilename, 0o755)
processutils.execute(tmpfilename,
tmpfilename2,
process_input='foo',
attempts=2)
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
def test_retry_on_communicate_error(self):
self.called = False
def fake_communicate(*args, **kwargs):
if self.called:
return ('', '')
self.called = True
e = OSError('foo')
e.errno = errno.EAGAIN
raise e
self.useFixture(fixtures.MonkeyPatch(
'subprocess.Popen.communicate', fake_communicate))
processutils.execute('/usr/bin/env', 'true', check_exit_code=False)
self.assertTrue(self.called)
def fake_execute(*cmd, **kwargs):
return 'stdout', 'stderr'
def fake_execute_raises(*cmd, **kwargs):
raise processutils.ProcessExecutionError(exit_code=42,
stdout='stdout',
stderr='stderr',
cmd=['this', 'is', 'a',
'command'])
class TryCmdTestCase(test.BaseTestCase):
def test_keep_warnings(self):
self.useFixture(fixtures.MonkeyPatch(
'essential.processutils.execute', fake_execute))
o, e = processutils.trycmd('this is a command'.split(' '))
self.assertNotEqual('', o)
self.assertNotEqual('', e)
def test_keep_warnings_from_raise(self):
self.useFixture(fixtures.MonkeyPatch(
'essential.processutils.execute', fake_execute_raises))
o, e = processutils.trycmd('this is a command'.split(' '),
discard_warnings=True)
self.assertIsNotNone(o)
self.assertNotEqual('', e)
def test_discard_warnings(self):
self.useFixture(fixtures.MonkeyPatch(
'essential.processutils.execute', fake_execute))
o, e = processutils.trycmd('this is a command'.split(' '),
discard_warnings=True)
self.assertIsNotNone(o)
self.assertEqual('', e)
class FakeSshChannel(object):
def __init__(self, rc):
self.rc = rc
def recv_exit_status(self):
return self.rc
class FakeSshStream(six.StringIO):
def setup_channel(self, rc):
self.channel = FakeSshChannel(rc)
class FakeSshConnection(object):
def __init__(self, rc):
self.rc = rc
def exec_command(self, cmd):
stdout = FakeSshStream('stdout')
stdout.setup_channel(self.rc)
return (six.StringIO(),
stdout,
six.StringIO('stderr'))
class SshExecuteTestCase(test.BaseTestCase):
def test_invalid_addl_env(self):
self.assertRaises(processutils.InvalidArgumentError,
processutils.ssh_execute,
None, 'ls', addl_env='important')
def test_invalid_process_input(self):
self.assertRaises(processutils.InvalidArgumentError,
processutils.ssh_execute,
None, 'ls', process_input='important')
def test_works(self):
o, e = processutils.ssh_execute(FakeSshConnection(0), 'ls')
self.assertEqual('stdout', o)
self.assertEqual('stderr', e)
def test_fails(self):
self.assertRaises(processutils.ProcessExecutionError,
processutils.ssh_execute, FakeSshConnection(1), 'ls')
|
{
"content_hash": "1fd79c8a0598540ed4467ba7f4f90b27",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 79,
"avg_line_length": 35.40823970037453,
"alnum_prop": 0.5645229532473027,
"repo_name": "zhangxiaolins/python_base",
"id": "fadc4f22970da28eb551f25156719e502d67378d",
"size": "10091",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_processutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1440663"
}
],
"symlink_target": ""
}
|
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.volume import group_types as schema
from tempest.lib.common import rest_client
from tempest.lib.services.volume import base_client
class GroupTypesClient(base_client.BaseClient):
"""Client class to send CRUD Volume V3 Group Types API requests"""
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'group-type'
def create_group_type(self, **kwargs):
"""Create group_type.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#create-group-type
"""
post_body = json.dumps({'group_type': kwargs})
resp, body = self.post('group_types', post_body)
body = json.loads(body)
self.validate_response(schema.create_group_type, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_group_type(self, group_type_id):
"""Deletes the specified group_type."""
resp, body = self.delete("group_types/%s" % group_type_id)
self.validate_response(schema.delete_group_type, resp, body)
return rest_client.ResponseBody(resp, body)
def list_group_types(self, **params):
"""List all the group_types created.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#list-group-types
"""
url = 'group_types'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_group_types, resp, body)
return rest_client.ResponseBody(resp, body)
def show_default_group_type(self):
"""Returns the details of default group_type.
For more information, please refer to the official API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#show-default-group-type-details
"""
url = 'group_types/default'
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def show_group_type(self, group_type_id):
"""Returns the details of a single group_type.
For more information, please refer to the official API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#show-group-type-details
"""
url = "group_types/%s" % group_type_id
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.show_group_type, resp, body)
return rest_client.ResponseBody(resp, body)
def update_group_type(self, group_type_id, **kwargs):
"""Updates a group type.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#update-group-type
"""
post_body = json.dumps({'group_type': kwargs})
resp, body = self.put('group_types/%s' % group_type_id, post_body)
body = json.loads(body)
self.validate_response(schema.update_group_type, resp, body)
return rest_client.ResponseBody(resp, body)
def create_or_update_group_type_specs(self, group_type_id, group_specs):
"""Creates new group specs or updates existing group specs.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#create-or-update-group-specs-for-a-group-type
"""
url = "group_types/%s/group_specs" % group_type_id
post_body = json.dumps({'group_specs': group_specs})
resp, body = self.post(url, post_body)
body = json.loads(body)
self.validate_response(
schema.create_or_update_group_type_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def list_group_type_specs(self, group_type_id):
"""Lists all group specs for a given group type."""
url = 'group_types/%s/group_specs' % group_type_id
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_group_type_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def show_group_type_specs_item(self, group_type_id, spec_id):
"""Shows specified item of group specs for a given group type."""
url = "group_types/%s/group_specs/%s" % (group_type_id, spec_id)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.show_group_type_specs_item, resp, body)
return rest_client.ResponseBody(resp, body)
def update_group_type_specs_item(self, group_type_id, spec_id, spec):
"""Updates specified item of group specs for a given group type.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#update-one-specific-group-spec-for-a-group-type
"""
url = "group_types/%s/group_specs/%s" % (group_type_id, spec_id)
put_body = json.dumps(spec)
resp, body = self.put(url, put_body)
body = json.loads(body)
self.validate_response(schema.update_group_type_specs_item, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_group_type_specs_item(self, group_type_id, spec_id):
"""Deletes specified item of group specs for a given group type."""
resp, body = self.delete("group_types/%s/group_specs/%s" % (
group_type_id, spec_id))
self.validate_response(schema.delete_group_type_specs_item, resp, body)
return rest_client.ResponseBody(resp, body)
|
{
"content_hash": "d49e96bbce641b12f04c5b485c9808aa",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 108,
"avg_line_length": 43.381294964028775,
"alnum_prop": 0.6490878938640132,
"repo_name": "cisco-openstack/tempest",
"id": "e0bf5e2ae6cf14fea3fbbb5767ac760abdf381dc",
"size": "6680",
"binary": false,
"copies": "1",
"ref": "refs/heads/proposed",
"path": "tempest/lib/services/volume/v3/group_types_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4431271"
},
{
"name": "Shell",
"bytes": "7435"
}
],
"symlink_target": ""
}
|
from docker import Client
from docker.utils import kwargs_from_env
from subprocess import call
cli = Client(**kwargs_from_env(assert_hostname=False))
def scale(container, cpu_usage, scale_dict):
scale_num = scale_dict[container]
if (cpu_usage > 30):
print("Will scale [%s] to %d instances." % (container, scale_num+1))
call(["docker-compose", "scale", "%s=%s" % (container, str(scale_num+1))])
return "scaled";
elif (cpu_usage < 5 and scale_num > 1):
print("Will reduce [%s] to %d instances." % (container, scale_num-1))
call(["docker-compose", "scale", "%s=%s" % (container, str(scale_num-1))])
return "scaled";
return "unscaled";
while True:
# ls = [x for x in lst if (("a" in x) or ("b" in x))]
# get names of all containers in an array
container_list = [str(x["Names"][0]) for x in cli.containers()]
# check only for voting-app or worker
short_cont_list = [x[1:] for x in container_list if (("result-app" in x) or ("worker" in x))]
# keep the number of instances for each category to define the number to scale
scale_dict = {}
scale_dict["result-app"] = len([x for x in short_cont_list if ("result-app" in x)])
scale_dict["worker"] = len([x for x in short_cont_list if ("worker" in x)])
for container in short_cont_list:
# gather cpu stats
stats_obj = cli.stats(container, stream=False)
cpuDelta = stats_obj['cpu_stats']['cpu_usage']['total_usage'] - stats_obj['precpu_stats']['cpu_usage']['total_usage']
systemDelta = float(stats_obj['cpu_stats']['system_cpu_usage']) - float(stats_obj['precpu_stats']['system_cpu_usage'])
cpu_usage = (cpuDelta/systemDelta) * float(len(stats_obj['cpu_stats']['cpu_usage']['percpu_usage'])) * 100.0
# decide whether to scale
scaled = ""
if "result-app" in container:
scaled = scale("result-app", cpu_usage, scale_dict)
elif "worker" in container:
scaled = scale("worker", cpu_usage, scale_dict)
# if scaled break the loop to avoid scaling multiple times
if scaled == "scaled":
break
|
{
"content_hash": "f3b80f5dc0b9e85f4f64cc8c15749612",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 122,
"avg_line_length": 43.61702127659574,
"alnum_prop": 0.6478048780487805,
"repo_name": "mwellner/voting",
"id": "ff41235d2665125b5123587800a222b4a1f3a8e7",
"size": "2050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoscale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "4009"
},
{
"name": "JavaScript",
"bytes": "8727"
},
{
"name": "Nginx",
"bytes": "387"
},
{
"name": "Python",
"bytes": "2123"
},
{
"name": "Shell",
"bytes": "202"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.utils import isolate_apps
from .models import (
AbstractBase1, AbstractBase2, AbstractBase3, Child1, Child2, Child3,
Child4, Child5, Child6, Child7, RelatedModel, RelationModel,
)
class ManagersRegressionTests(TestCase):
def test_managers(self):
a1 = Child1.objects.create(name='fred', data='a1')
a2 = Child1.objects.create(name='barney', data='a2')
b1 = Child2.objects.create(name='fred', data='b1', value=1)
b2 = Child2.objects.create(name='barney', data='b2', value=42)
c1 = Child3.objects.create(name='fred', data='c1', comment='yes')
c2 = Child3.objects.create(name='barney', data='c2', comment='no')
d1 = Child4.objects.create(name='fred', data='d1')
d2 = Child4.objects.create(name='barney', data='d2')
fred1 = Child5.objects.create(name='fred', comment='yes')
Child5.objects.create(name='barney', comment='no')
f1 = Child6.objects.create(name='fred', data='f1', value=42)
f2 = Child6.objects.create(name='barney', data='f2', value=42)
fred2 = Child7.objects.create(name='fred')
barney = Child7.objects.create(name='barney')
self.assertSequenceEqual(Child1.manager1.all(), [a1])
self.assertSequenceEqual(Child1.manager2.all(), [a2])
self.assertSequenceEqual(Child1._default_manager.all(), [a1])
self.assertSequenceEqual(Child2._default_manager.all(), [b1])
self.assertSequenceEqual(Child2.restricted.all(), [b2])
self.assertSequenceEqual(Child3._default_manager.all(), [c1])
self.assertSequenceEqual(Child3.manager1.all(), [c1])
self.assertSequenceEqual(Child3.manager2.all(), [c2])
# Since Child6 inherits from Child4, the corresponding rows from f1 and
# f2 also appear here. This is the expected result.
self.assertSequenceEqual(
Child4._default_manager.order_by('data'),
[d1, d2, f1.child4_ptr, f2.child4_ptr],
)
self.assertCountEqual(Child4.manager1.all(), [d1, f1.child4_ptr])
self.assertCountEqual(Child5._default_manager.all(), [fred1])
self.assertCountEqual(Child6._default_manager.all(), [f1, f2])
self.assertSequenceEqual(
Child7._default_manager.order_by('name'),
[barney, fred2],
)
def test_abstract_manager(self):
# Accessing the manager on an abstract model should
# raise an attribute error with an appropriate message.
# This error message isn't ideal, but if the model is abstract and
# a lot of the class instantiation logic isn't invoked; if the
# manager is implied, then we don't get a hook to install the
# error-raising manager.
msg = "type object 'AbstractBase3' has no attribute 'objects'"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase3.objects.all()
def test_custom_abstract_manager(self):
# Accessing the manager on an abstract model with a custom
# manager should raise an attribute error with an appropriate
# message.
msg = "Manager isn't available; AbstractBase2 is abstract"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase2.restricted.all()
def test_explicit_abstract_manager(self):
# Accessing the manager on an abstract model with an explicit
# manager should raise an attribute error with an appropriate
# message.
msg = "Manager isn't available; AbstractBase1 is abstract"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase1.objects.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_swappable_manager(self):
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model should
# raise an attribute error with a helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.objects.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_custom_swappable_manager(self):
class SwappableModel(models.Model):
stuff = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.stuff.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_explicit_swappable_manager(self):
class SwappableModel(models.Model):
objects = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.objects.all()
def test_regress_3871(self):
related = RelatedModel.objects.create()
relation = RelationModel()
relation.fk = related
relation.gfk = related
relation.save()
relation.m2m.add(related)
t = Template('{{ related.test_fk.all.0 }}{{ related.test_gfk.all.0 }}{{ related.test_m2m.all.0 }}')
self.assertEqual(
t.render(Context({'related': related})),
''.join([str(relation.pk)] * 3),
)
def test_field_can_be_called_exact(self):
# Make sure related managers core filters don't include an
# explicit `__exact` lookup that could be interpreted as a
# reference to a foreign `exact` field. refs #23940.
related = RelatedModel.objects.create(exact=False)
relation = related.test_fk.create()
self.assertEqual(related.test_fk.get(), relation)
@isolate_apps('managers_regress')
class TestManagerInheritance(SimpleTestCase):
def test_implicit_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
custom_manager = CustomManager()
class Meta:
abstract = True
class PlainModel(models.Model):
custom_manager = CustomManager()
self.assertIsInstance(PlainModel._base_manager, models.Manager)
self.assertIsInstance(PlainModel._default_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._base_manager, models.Manager)
self.assertIsInstance(ModelWithAbstractParent._default_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._base_manager, models.Manager)
self.assertIsInstance(ProxyModel._default_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._base_manager, models.Manager)
self.assertIsInstance(MTIModel._default_manager, CustomManager)
def test_default_manager_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
default_manager_name = 'custom_manager'
abstract = True
class PlainModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
default_manager_name = 'custom_manager'
self.assertIsInstance(PlainModel._default_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._default_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._default_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._default_manager, CustomManager)
def test_base_manager_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
base_manager_name = 'custom_manager'
abstract = True
class PlainModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
base_manager_name = 'custom_manager'
self.assertIsInstance(PlainModel._base_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._base_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._base_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._base_manager, CustomManager)
def test_manager_no_duplicates(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
custom_manager = models.Manager()
class Meta:
abstract = True
class TestModel(AbstractModel):
custom_manager = CustomManager()
self.assertEqual(TestModel._meta.managers, (TestModel.custom_manager,))
self.assertEqual(TestModel._meta.managers_map, {'custom_manager': TestModel.custom_manager})
def test_manager_class_getitem(self):
self.assertIs(models.Manager[Child1], models.Manager)
|
{
"content_hash": "7ed52183a95ab8e887b9facd72537514",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 107,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.6428834808259587,
"repo_name": "adamchainz/django",
"id": "c18cd8adda13a49d91a95baf2e1cdbcf98aed442",
"size": "10848",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tests/managers_regress/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84602"
},
{
"name": "HTML",
"bytes": "223742"
},
{
"name": "JavaScript",
"bytes": "138208"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "14372826"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
}
|
import os
import sys
import codecs
from collections import defaultdict
import unittest
from models import *
class Dump(object):
def __init__(self, downloaded_dir=None):
self.downloaded_dir = downloaded_dir
self.iso_languages_3 = {}
self.iso_languages_2 = {}
self.iso_languages_1 = {}
self.feature_classes = {}
self.feature_types_bg = {}
self.feature_types_en = {}
self.feature_types_nb = {}
self.feature_types_nn = {}
self.feature_types_no = {}
self.feature_types_ru = {}
self.feature_types_sv = {}
self.countries = {}
self.admin1_codes = {}
self.admin2_codes = {}
self.timezones = {}
self.alternate_names = defaultdict(list)
def fill(self):
self.fill_iso_languages()
self.fill_feature_classes()
self.fill_feature_codes_bg()
self.fill_feature_codes_en()
self.fill_feature_codes_nb()
self.fill_feature_codes_nn()
self.fill_feature_codes_no()
self.fill_feature_codes_ru()
self.fill_feature_codes_sv()
self.fill_country_info()
self.fill_admin1_codes()
self.fill_admin2_codes()
self.fill_timezones()
self.fill_alternate_names()
def fill_iso_languages(self):
filename = u'alternateNames/iso-languagecodes.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
iso_language = ISOLanguage(*line)
self.iso_languages_3[iso_language.iso_639_3] = iso_language
self.iso_languages_2[iso_language.iso_639_2] = iso_language
self.iso_languages_1[iso_language.iso_639_1] = iso_language
def fill_feature_classes(self):
self.feature_classes = {
'A': FeatureClass('A', 'country, state, region'),
'H': FeatureClass('H', 'stream, lake'),
'L': FeatureClass('L', 'parks, area'),
'P': FeatureClass('P', 'city, village'),
'R': FeatureClass('R', 'road, railroad'),
'S': FeatureClass('S', 'spot, building, farm'),
'T': FeatureClass('T', 'mountain, hill, rock'),
'U': FeatureClass('U', 'undersea'),
'V': FeatureClass('V', 'forest, heath'),
}
def fill_feature_codes_bg(self):
filename = u'featureCodes_bg.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
feature_code = FeatureCode(*line)
self.feature_types_bg[feature_code.name] = feature_code
def fill_feature_codes_en(self):
filename = u'featureCodes_en.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
feature_code = FeatureCode(*line)
self.feature_types_en[feature_code.name] = feature_code
def fill_feature_codes_nb(self):
filename = u'featureCodes_nb.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
feature_code = FeatureCode(*line)
self.feature_types_nb[feature_code.name] = feature_code
def fill_feature_codes_nn(self):
filename = u'featureCodes_nn.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
feature_code = FeatureCode(*line)
self.feature_types_nn[feature_code.name] = feature_code
def fill_feature_codes_no(self):
filename = u'featureCodes_no.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
feature_code = FeatureCode(*line)
self.feature_types_no[feature_code.name] = feature_code
def fill_feature_codes_ru(self):
filename = u'featureCodes_ru.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
feature_code = FeatureCode(*line)
self.feature_types_ru[feature_code.name] = feature_code
def fill_feature_codes_sv(self):
filename = u'featureCodes_sv.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
feature_code = FeatureCode(*line)
self.feature_types_sv[feature_code.name] = feature_code
def fill_country_info(self):
filename = u'countryInfo.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
if line.startswith(u'#'):
continue
line = line.split(u'\t')
line = map(str.strip, line)
country = CountryInfo(*line)
self.countries[country.ISO] = country
def fill_admin1_codes(self):
filename = u'admin1CodesASCII.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
admin = AdminCode(*line)
self.admin1_codes[admin.code] = admin
def fill_admin2_codes(self):
filename = u'admin2Codes.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
admin = AdminCode(*line)
self.admin2_codes[admin.code] = admin
def fill_timezones(self):
filename = u'timeZones.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines()[1:]:
line = line.split(u'\t')
line = map(str.strip, line)
timezone = TimeZone(*line)
self.timezones[timezone.time_zone_id] = timezone
def fill_alternate_names(self):
filename = u'alternateNames/alternateNames.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file.readlines():
line = line.split(u'\t')
line = map(str.strip, line)
alternate_name = AlternateName(*line)
self.alternate_names[alternate_name.geoname_id].append(alternate_name)
def get_geoname(self):
filename = u'allCountries/allCountries.txt'
if self.downloaded_dir:
filename = os.path.join(self.downloaded_dir, filename)
with codecs.open(filename, encoding='utf8') as file:
for line in file:
line = line.split(u'\t')
line = map(str.strip, line)
geoname = Geoname(*line)
yield geoname
class TestDump(unittest.TestCase):
def test_exist_iso_languages(self):
self.assertEqual(
dump.iso_languages_3['rus'],
ISOLanguage(iso_639_3=u'rus', iso_639_2=u'rus', iso_639_1=u'ru', language_name=u'Russian')
)
self.assertEqual(
dump.iso_languages_2['rus'],
ISOLanguage(iso_639_3=u'rus', iso_639_2=u'rus', iso_639_1=u'ru', language_name=u'Russian')
)
self.assertEqual(
dump.iso_languages_1['ru'],
ISOLanguage(iso_639_3=u'rus', iso_639_2=u'rus', iso_639_1=u'ru', language_name=u'Russian')
)
def test_exist_feature_classes(self):
self.assertEqual(
dump.feature_classes['L'],
FeatureClass(
name='L',
description='parks, area'
)
)
def test_exist_feature_codes_bg(self):
self.assertEqual(
dump.feature_types_bg['H.CNL'],
FeatureCode(
name=u'H.CNL',
short_description=u'канал',
long_description=u'an artificial watercourse'
)
)
def test_exist_feature_codes_en(self):
self.assertEqual(
dump.feature_types_en['H.CNL'],
FeatureCode(
name=u'H.CNL',
short_description=u'canal',
long_description=u'an artificial watercourse'
)
)
def test_exist_feature_codes_nb(self):
self.assertEqual(
dump.feature_types_nb['H.CNL'],
FeatureCode(
name=u'H.CNL',
short_description=u'kanal',
long_description=u'an artificial watercourse'
)
)
def test_exist_feature_codes_nn(self):
self.assertEqual(
dump.feature_types_nn['H.CNL'],
FeatureCode(
name=u'H.CNL',
short_description=u'kanal',
long_description=u'an artificial watercourse'
)
)
def test_exist_feature_codes_no(self):
self.assertEqual(
dump.feature_types_no['H.CNL'],
FeatureCode(
name=u'H.CNL',
short_description=u'kanal',
long_description=u'an artificial watercourse'
)
)
def test_exist_feature_codes_ru(self):
self.assertEqual(
dump.feature_types_ru['H.CNL'],
FeatureCode(
name=u'H.CNL',
short_description=u'канал',
long_description=u'искусственная река'
)
)
def test_exist_feature_codes_sv(self):
self.assertEqual(
dump.feature_types_sv['H.CNL'],
FeatureCode(
name=u'H.CNL',
short_description=u'kanal',
long_description=u'an artificial watercourse'
)
)
def test_exist_countries(self):
self.assertEqual(
dump.countries['RU'],
CountryInfo(
ISO=u'RU',
ISO3=u'RUS',
ISO_numeric=u'643',
fips=u'RS',
country=u'Russia',
capital=u'Moscow',
area=u'17100000',
population=u'140702000',
continent=u'EU',
tld=u'.ru',
currency_code=u'RUB',
currency_name=u'Ruble',
phone=u'7',
postal_code_format=u'######',
postal_code_regex=u'^(\\d{6})$',
Languages=u'ru,tt,xal,cau,ady,kv,ce,tyv,cv,udm,tut,mns,bua,myv,mdf,chm,ba,inh,tut,kbd,krc,ava,sah,nog',
geonameid=u'2017370',
neighbours=u'GE,CN,BY,UA,KZ,LV,PL,EE,LT,FI,MN,NO,AZ,KP',
equivalent_fips_code=u''
)
)
def test_exist_admin1_codes(self):
self.assertEqual(
dump.admin1_codes['SI.L1'],
AdminCode(
code=u'SI.L1',
name=u'Ribnica',
ascii_name=u'Ribnica',
geoname_id=u'3191679'
)
)
def test_exist_admin2_codes(self):
self.assertEqual(
dump.admin2_codes['PE.15.1506'],
AdminCode(
code=u'PE.15.1506',
name=u'Provincia de Huaral',
ascii_name=u'Provincia de Huaral',
geoname_id=u'3939284'
)
)
def test_exist_timezones(self):
self.assertEqual(
dump.timezones['Europe/Moscow'],
TimeZone(
country_code=u'RU',
time_zone_id=u'Europe/Moscow',
GMT_offset=u'4.0',
DST_offset=u'4.0',
raw_offset=u'4.0'
)
)
def test_exist_alternate_names(self):
self.assertEqual(
dump.alternate_names['17'][1],
AlternateName(
alternate_name_id=u'3556011',
geoname_id=u'17',
iso_language=u'fa',
alternate_name=u'Kūh-e Shotor Khvāb',
is_preferred_name=u'',
is_short_name=u'',
is_colloquial=u'',
is_historic=u''
)
)
def test_whitespace(self):
for _dict in (dump.iso_languages_3, dump.feature_classes, dump.feature_types_bg, dump.feature_types_en, dump.feature_types_nb, dump.feature_types_nn, dump.feature_types_no, dump.feature_types_ru, dump.feature_types_sv, dump.countries, dump.admin1_codes, dump.admin2_codes, dump.timezones):
for element in _dict.values():
for field in element._fields:
self.assertNotRegex(getattr(element, field), r'^\s+|\s+$', msg=element)
for _list in dump.alternate_names.values():
for element in _list:
for field in element._fields:
self.assertNotRegex(getattr(element, field), r'^\s+|\s+$', msg=element)
dump = Dump('downloaded')
if __name__ == '__main__':
dump.fill()
unittest.main()
|
{
"content_hash": "fbf392e2b800bb383483124ae25e93dd",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 297,
"avg_line_length": 37.34,
"alnum_prop": 0.535083020889127,
"repo_name": "DenisKuzin/geonames-dump",
"id": "375876ca70ec4b74e2f383487582e281cf1ec348",
"size": "14990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dump.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17978"
},
{
"name": "Shell",
"bytes": "783"
}
],
"symlink_target": ""
}
|
"""
Tweepy streaming API StreamListener message recognizers.
These recognize various types of incoming data based on predefined rules.
TODO: Probably don't need classes. Consider refactoring.
"""
import json
import tweepy
class MessageRecognizer(object):
def __init__(self, handler_method):
self.handler = handler_method
def match(self, stream_data):
"""Return True if this recognizer matches the incoming stream data."""
return False
def handle_message(self, stream_data):
"Handle the incoming message. Return False to stop stream processing."
return self.handler(stream_data)
class MatchAnyRecognizer(MessageRecognizer):
"""Match any stream_data."""
def match(self, stream_data):
return True
class DataStartsWithRecognizer(MessageRecognizer):
def __init__(self, handler_method, starts_with):
self.starts_with = starts_with
super(DataStartsWithRecognizer, self).__init__(handler_method)
def match(self, stream_data):
return stream_data.startswith(self.starts_with)
class DataContainsRecognizer(MessageRecognizer):
"""A simple string-in-message recognizer. Matches if a string is anywhere
in the stream_data body."""
def __init__(self, handler_method, match_string):
self.contains_string = match_string
super(DataContainsRecognizer, self).__init__(handler_method)
def match(self, stream_data):
return self.contains_string in stream_data
|
{
"content_hash": "0d7af084ccb6778aa7e5e6d93bd128e1",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 29.58823529411765,
"alnum_prop": 0.6984758117958914,
"repo_name": "inactivist/twitter-streamer",
"id": "8e4742a3fa27af1bc6d705df7ad1a45051372ea1",
"size": "1509",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "streamer/message_recognizers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27925"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
}
|
from node import *
class Bst:
def __init__(self, key_cmp = None):
self.cnt = 0
self.head = Node()
self.head.l = self.head.r = self.head.p = self.head
self.head.data = -1
def node_cmp(x, y):
if not isinstance(x, Node) or not isinstance(y, Node):
raise TypeError("Must be Node type")
else:
#print '%s cmp %s' % (x, y)
return x.data < y.data
self.key_cmp = key_cmp
if key_cmp is None:
self.key_cmp = node_cmp
def size(self):
return self.cnt
def root(self):
return parent(self.head)
def left_most(self):
return bst_left_most(self.root(), self.head)
def right_most(self):
return bst_right_most(self.root(), self.head)
def right_rotation(self, node):
bst_right_rotation(node, self.head)
def left_rotation(self, node):
bst_left_rotation(node, self.head)
def is_root(self, node):
return parent(node) is self.head
def empty(self):
return self.size() == 0
def node_str(self, node):
return str(node)
def node_shape_recusive(self, subtree, table_cnt = 0):
delimiter = table_cnt * '\t'
if subtree is self.head:
print delimiter + "[None]"
else:
self.node_shape_recusive(right(subtree), table_cnt + 1)
print delimiter + self.node_str(subtree)
self.node_shape_recusive(left(subtree), table_cnt + 1)
def tree_shape(self):
self.node_shape_recusive(self.root())
def next(self, node):
return bst_successor(node, self.head)
def prev(self, node):
return bst_predecessor(node, self.head)
def minium(self):
if not self.empty():
return self.left_most()
else:
return self.head
def maximum(self):
if not self.empty():
return self.right_most()
else:
return self.head
def find_insert_positon(self, node):
node.l = node.r = self.head
return bst_find_insert_position(self.root(), node, self.head, self.key_cmp)
def insert_one_node(self, node, unique = False):
if not isinstance(node, Node):
raise TypeError('Node Type requered')
p = self.find_insert_positon(node)
# print 'newnode: %s, pos: %s' % (node, p)
if p is self.head:
self.head.p = node
node.p = self.head
self.cnt += 1
return node
else:
if unique:
pre_node = p
if self.key_cmp(node, p):
pre_node = self.prev(p)
if pre_node is not self.head and not self.key_cmp(node, pre_node) and not self.key_cmp(pre_node, node):
return None
node.p = p
if self.key_cmp(node, p):
p.l = node
else:
p.r = node
self.cnt += 1
return node
def delete_one_node(self, node):
d = delete = node
if left(delete) is not self.head and right(delete) is not self.head:
d = bst_successor(delete, self.head)
delete.copy_data_from(d)
n = left(d) if self.head is right(d) else right(d)
p = parent(d)
if d is left(p):
p.l = n
elif d is right(p):
p.r = n
else:
p.p = n
if n is not self.head:
n.p = p
del d
self.cnt -= 1
def __iter__(self):
if self.empty():
yield None
else:
each = self.left_most()
while each is not self.head:
yield each
each = self.next(each)
def insert_data_list(self, data_list, unique = False):
for data in data_list:
node = Node(data = data)
self.insert_one_node(node, unique = False)
def insert_data(self, data, unique = False):
node = Node(data = data)
self.insert_one_node(node, unique = False)
def dot_node_option(self, node):
return {}
def render(self, dot_file_name, dot_format = 'png'):
if self.empty():
return
from graphviz import Digraph
dot = Digraph()
root = self.root()
dot.format = dot_format
def all_nodes_and_edges(subtree):
dot.node(subtree.name, label = subtree.label, **self.dot_node_option(subtree))
if subtree.l is not self.head:
dot.edge(subtree.name, subtree.l.name)
dot.edge(subtree.l.name, subtree.name)
all_nodes_and_edges(subtree.l)
else:
null = Node()
dot.node(null.name, label = 'null')
dot.edge(subtree.name, null.name)
if subtree.r is not self.head:
dot.edge(subtree.name, subtree.r.name)
dot.edge(subtree.r.name, subtree.name)
all_nodes_and_edges(subtree.r)
else:
null = Node()
dot.node(null.name, label = 'null')
dot.edge(subtree.name, null.name)
all_nodes_and_edges(root)
dot.render(dot_file_name)
# def nodes_and_links(self):
# root = self.root()
# nodes = []
# links = {}
# def all_nodes_and_links(subtree):
# if subtree is not self.head:
# nodes.append(subtree)
if __name__ == '__main__':
tree = Bst(key_cmp = lambda x, y: x.data > y.data)
for i in range(3):
node1 = tree.insert_one_node(Node(data = 1))
node2 = tree.insert_one_node(Node(data = 2))
node3 = tree.insert_one_node(Node(data = 3))
tree.tree_shape()
for i in tree:
print i
# tree.delete_one_node(node1)
l = [1, 2, 3, 4, 5, 6]
tree.insert_data_list(l)
tree.tree_shape()
cnt = tree.size()
for i in range(cnt):
tree.delete_one_node(tree.root())
tree.tree_shape()
#tree.render()
raw_input()
|
{
"content_hash": "d3474fa5fc300304ae38d54a6ffb00ef",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 119,
"avg_line_length": 29.104761904761904,
"alnum_prop": 0.5166884816753927,
"repo_name": "wfyaibaib/py-algorithms",
"id": "72426050aa3daceede1877867d8c5c636ca30554",
"size": "6112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24456"
}
],
"symlink_target": ""
}
|
__author__ = 'lachlan'
from Flask_App import db
# Create mapping from db objects to named objects for simplicity
Model = db.Model
Column = db.Column
Enum = db.Enum
String = db.String
Integer = db.Integer
DateTime = db.DateTime
Boolean = db.Boolean
Table = db.Table
ForeignKey = db.ForeignKey
Index = db.Index
Metadata = db.MetaData
relationship = db.relationship
Float = db.Float
Text = db.Text
class Request(Model):
__tablename__ = 'request'
id = Column(Integer, primary_key=True, index=True)
uuid = Column(String, unique=True, index=True)
time = Column(DateTime)
person = Column(String)
class Thing(Model):
__tablename__ = 'thing'
id = Column(Integer, primary_key=True, index=True)
uuid = Column(String, unique=True, index=True)
name = Column(String)
image_url = Column(String)
description = Column(Text)
description_url = Column(String)
request_id = Column(Integer, ForeignKey('request.id'))
request = relationship('Request', backref="things")
|
{
"content_hash": "3df068d0745a3a5a8783e420cbb16c46",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 64,
"avg_line_length": 24.658536585365855,
"alnum_prop": 0.6973293768545994,
"repo_name": "QuicklyRainbow/FieldGuideAU",
"id": "4eaf78ecf5c8d76aa502fce93209563e7d17a782",
"size": "1011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Flask_App/models/db_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "127330"
},
{
"name": "HTML",
"bytes": "16697"
},
{
"name": "JavaScript",
"bytes": "42528"
},
{
"name": "Python",
"bytes": "21984"
}
],
"symlink_target": ""
}
|
"""Language model hyper-parameters."""
# Import ModelParams to ensure that they are added to the global registry.
# pylint: disable=unused-import
import lingvo.jax.tasks.lm.params.bert
import lingvo.jax.tasks.lm.params.lm_cloud
# pylint: enable=unused-import
|
{
"content_hash": "bd0ac864a72e52034fa24993d1b3c975",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 74,
"avg_line_length": 32.625,
"alnum_prop": 0.7816091954022989,
"repo_name": "tensorflow/lingvo",
"id": "6520e9e68bca0445829e0c1e3035a52778631f3b",
"size": "950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lingvo/jax/tasks/lm/params/params.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5163"
},
{
"name": "C++",
"bytes": "556344"
},
{
"name": "Dockerfile",
"bytes": "8484"
},
{
"name": "Jupyter Notebook",
"bytes": "36721"
},
{
"name": "Python",
"bytes": "9574124"
},
{
"name": "Shell",
"bytes": "50408"
},
{
"name": "Starlark",
"bytes": "182688"
},
{
"name": "TeX",
"bytes": "37275"
}
],
"symlink_target": ""
}
|
"""TABLE BERT utility functions."""
import dataclasses
import functools
from typing import Any, Optional
from tapas.models.bert import modeling
from tapas.utils import attention_utils
from tapas.utils import tableformer_utils
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
_AttentionMode = attention_utils.RestrictAttentionMode
@dataclasses.dataclass
class CustomAttention:
attention_mask: Optional[tf.Tensor] = None
custom_attention_layer: Optional[Any] = None
@dataclasses.dataclass(frozen=True)
class CustomAttentionConfig:
"""Helper config class to generate custom attention."""
restrict_attention_mode: _AttentionMode = _AttentionMode.FULL
restrict_attention_bucket_size: int = 0
restrict_attention_header_size: int = 0
restrict_attention_row_heads_ratio: float = 0.5
restrict_attention_sort_after_projection: bool = True
attention_bias_disabled: int = 0
attention_bias_use_relative_scalar_only: bool = True
def get_token_type_features():
return [
"segment_ids", "column_ids", "row_ids", "prev_label_ids", "column_ranks",
"inv_column_ranks", "numeric_relations"
]
def get_token_type_ids(features, token_type_features, disabled_features):
token_type_ids = []
disabled_ids = []
for i, key in enumerate(token_type_features):
if disabled_features is not None and key in disabled_features:
tf.logging.info("Disable %s", key)
disabled_ids.append(i)
token_type_ids.append(features[key])
return token_type_ids, disabled_ids
def run_custom_attention(
is_training,
num_attention_heads,
config,
features,
):
"""Extracts the custumized attention."""
attention_mask = None
custom_attention_layer = None
num_row_heads = int(num_attention_heads *
config.restrict_attention_row_heads_ratio)
num_column_heads = num_attention_heads - num_row_heads
restrict_attention_mode = config.restrict_attention_mode
bucket_size = config.restrict_attention_bucket_size
header_size = config.restrict_attention_header_size
if restrict_attention_mode == _AttentionMode.HEADWISE_SAME_COLUMN_OR_ROW:
attention_mask = attention_utils.compute_headwise_sparse_attention_mask(
num_row_heads=num_row_heads,
num_column_heads=num_column_heads,
bucket_size=bucket_size,
header_size=header_size,
**features)
elif restrict_attention_mode == _AttentionMode.SAME_COLUMN_OR_ROW:
attention_mask = attention_utils.compute_sparse_attention_mask(**features)
elif restrict_attention_mode == _AttentionMode.HEADWISE_EFFICIENT:
custom_attention_layer = attention_utils.create_bucketed_attention_layer(
input_mask=features["input_mask"],
input_header=tf.math.equal(features["segment_ids"], 0),
bucket_size=bucket_size,
header_size=header_size,
sort_after_projection=config.restrict_attention_sort_after_projection,
token_type_ids=[(num_row_heads, True, features["row_ids"]),
(num_column_heads, False, features["column_ids"])])
elif restrict_attention_mode == _AttentionMode.TABLE_ATTENTION:
custom_attention_layer = functools.partial(
modeling.attention_layer,
relative_relation_ids=tableformer_utils.get_relative_relation_ids(
features, disabled_attention_bias=config.attention_bias_disabled),
use_relative_scalar_only=config.attention_bias_use_relative_scalar_only,
relative_relation_ids_vocab_size=tableformer_utils
.RELATIVE_RELATION_IDS_VOCAB_SIZE,
)
elif restrict_attention_mode == _AttentionMode.FULL:
pass
else:
raise ValueError(f"Unknown attention mode: {restrict_attention_mode}")
return CustomAttention(
attention_mask=attention_mask,
custom_attention_layer=custom_attention_layer,
)
def create_model(
features,
mode,
bert_config,
restrict_attention_mode=_AttentionMode.FULL,
restrict_attention_bucket_size=0,
restrict_attention_header_size=None,
restrict_attention_row_heads_ratio=0.5,
restrict_attention_sort_after_projection=True,
token_weights=None,
disabled_features=None,
disable_position_embeddings=False,
reset_position_index_per_cell=False,
proj_value_length=None,
attention_bias_disabled=0,
attention_bias_use_relative_scalar_only=True,
):
"""Creates a TABLE BERT model."""
is_training = (mode == tf_estimator.ModeKeys.TRAIN)
token_type_features = get_token_type_features()
token_type_ids, disabled_ids = get_token_type_ids(
features=features,
token_type_features=token_type_features,
disabled_features=disabled_features)
custom_attention = run_custom_attention(
is_training=is_training,
num_attention_heads=bert_config.num_attention_heads,
config=CustomAttentionConfig(
restrict_attention_mode=restrict_attention_mode,
restrict_attention_row_heads_ratio=restrict_attention_row_heads_ratio,
restrict_attention_bucket_size=restrict_attention_bucket_size,
restrict_attention_header_size=restrict_attention_header_size,
restrict_attention_sort_after_projection=restrict_attention_sort_after_projection,
attention_bias_disabled=attention_bias_disabled,
attention_bias_use_relative_scalar_only=attention_bias_use_relative_scalar_only,
),
features=features,
)
return modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=features["input_ids"],
input_mask=features["input_mask"],
attention_mask=custom_attention.attention_mask,
custom_attention_layer=custom_attention.custom_attention_layer,
token_weights=token_weights,
token_type_ids=token_type_ids,
disabled_ids=disabled_ids,
use_position_embeddings=not disable_position_embeddings,
reset_position_index_per_cell=reset_position_index_per_cell,
proj_value_length=proj_value_length,
)
|
{
"content_hash": "831ddd082c763919733b4c01b510d28f",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 92,
"avg_line_length": 38.61290322580645,
"alnum_prop": 0.7197994987468672,
"repo_name": "google-research/tapas",
"id": "91e0cf6e5e886d4daf0753afe6a95753d8a51259",
"size": "6600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tapas/models/bert/table_bert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "58484"
},
{
"name": "Python",
"bytes": "1182377"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
def read(fname):
"""Utility function to read the README file.
Used for long_description. It's nice, because now (1) we have a top level
README file and (2) it's easier to type in the README file than to put a raw
string in below."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="ipycache",
version="0.1.5dev",
author="Cyrille Rossant",
author_email="rossant@github",
description=("Defines a %%cache cell magic in the IPython notebook to "
"cache results of long-lasting computations in a persistent "
"pickle file."),
license="BSD-3-Clause",
keywords="ipython notebook cache",
url="https://github.com/rossant/ipycache",
py_modules=['ipycache'],
long_description=read('README.md'),
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Framework :: IPython",
"License :: OSI Approved :: BSD License",
],
)
|
{
"content_hash": "f3719c3f41adf1c8ec549ca83de32a9a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 80,
"avg_line_length": 31.941176470588236,
"alnum_prop": 0.6353591160220995,
"repo_name": "rossant/ipycache",
"id": "f957e13c26031e0f9a3a6f60c0a869fd310b6dd5",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24946"
}
],
"symlink_target": ""
}
|
read_file = open("text.txt", "r")
# Use a second file handler to open the file for writing
write_file = open("text.txt", "w")
# Write to the file
write_file.write("Not closing files is VERY BAD.")
write_file.close()
# Try to read from the file
print read_file.read()
read_file.close()
|
{
"content_hash": "72eb8ba1bf707080c59c652236a7d9fc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 56,
"avg_line_length": 22.153846153846153,
"alnum_prop": 0.6979166666666666,
"repo_name": "haozai309/hello_python",
"id": "ae4327ae1cdd3d291a453f04ead9739bc241c2b3",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codecademy/io/read_write.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "74317"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest2
from lldbsuite.test.decorators import *
from lldbsuite.test.concurrent_base import ConcurrentEventsBase
from lldbsuite.test.lldbtest import TestBase
@skipIfWindows
class ConcurrentCrashWithWatchpoint(ConcurrentEventsBase):
mydir = ConcurrentEventsBase.compute_mydir(__file__)
@skipIfFreeBSD # timing out on buildbot
# Atomic sequences are not supported yet for MIPS in LLDB.
@skipIf(triple='^mips')
@expectedFailureNetBSD
@add_test_categories(["watchpoint"])
def test(self):
""" Test a thread that crashes while another thread hits a watchpoint."""
self.build(dictionary=self.getBuildFlags())
self.do_thread_actions(num_crash_threads=1, num_watchpoint_threads=1)
|
{
"content_hash": "8707f0d528e3a85631d075bc704d2c63",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 81,
"avg_line_length": 33.78260869565217,
"alnum_prop": 0.7477477477477478,
"repo_name": "apple/swift-lldb",
"id": "bf92488babc0218b8fb5c8dd201e80767fba13c4",
"size": "777",
"binary": false,
"copies": "5",
"ref": "refs/heads/stable",
"path": "packages/Python/lldbsuite/test/functionalities/thread/concurrent_events/TestConcurrentCrashWithWatchpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "130449"
},
{
"name": "C",
"bytes": "198536"
},
{
"name": "C++",
"bytes": "27687071"
},
{
"name": "CMake",
"bytes": "172176"
},
{
"name": "DTrace",
"bytes": "334"
},
{
"name": "LLVM",
"bytes": "6106"
},
{
"name": "Makefile",
"bytes": "106804"
},
{
"name": "Objective-C",
"bytes": "106821"
},
{
"name": "Objective-C++",
"bytes": "25658"
},
{
"name": "Perl",
"bytes": "72175"
},
{
"name": "Python",
"bytes": "4680483"
},
{
"name": "Shell",
"bytes": "6573"
},
{
"name": "Swift",
"bytes": "260786"
},
{
"name": "Vim script",
"bytes": "8434"
}
],
"symlink_target": ""
}
|
"""
Connectivity
~~~~~~~~~~~~
Use the connectivity filter to remove noisy isosurfaces.
This example is very similar to `this VTK example <https://lorensen.github.io/VTKExamples/site/Python/VisualizationAlgorithms/PineRootConnectivity/>`__
"""
# sphinx_gallery_thumbnail_number = 2
import pyvista as pv
from pyvista import examples
###############################################################################
# Load a dataset that has noisy isosurfaces
mesh = examples.download_pine_roots()
cpos = [(40.6018, -280.533, 47.0172),
(40.6018, 37.2813, 50.1953),
(0.0, 0.0, 1.0)]
# Plot the raw data
p = pv.Plotter()
p.add_mesh(mesh, color='#965434')
p.add_mesh(mesh.outline())
p.show(cpos=cpos)
###############################################################################
# The mesh plotted above is very noisy. We can extract the largest connected
# isosurface in that mesh using the :func:`pyvista.DataSetFilters.connectivity`
# filter and passing ``largest=True`` to the ``connectivity``
# filter or by using the :func:`pyvista.DataSetFilters.extract_largest` filter
# (both are equivalent).
# Grab the largest connected volume present
largest = mesh.connectivity(largest=True)
# or: largest = mesh.extract_largest()
p = pv.Plotter()
p.add_mesh(largest, color='#965434')
p.add_mesh(mesh.outline())
p.camera_position = cpos
p.show()
|
{
"content_hash": "3e37095e3bebfa8fcc481d752d8e5945",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 151,
"avg_line_length": 32.38095238095238,
"alnum_prop": 0.6404411764705882,
"repo_name": "akaszynski/vtkInterface",
"id": "210e8ea0bb00400ccded4733c069e1a4e5526025",
"size": "1360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/01-filter/connectivity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "184915"
}
],
"symlink_target": ""
}
|
import logging
import os
try:
import telescope.telescope.utils as telescope_utils
except ImportError:
raise Exception(('Could not find Telescope library. '
'Please verify all submodules are checked out.'))
import site_metadata
import telescope_data_parser
def _ensure_dir_exists(dir_path):
"""Ensures that a given directory path exists (creating it if necessary).
Creates a directory path for a given file path if the directory path does
not already exist. For example, if dir_path='foo/bar/baz/' and only
directory 'foo' exists, this function will create 'foo/bar/baz'.
Args:
dir_path: (str) Directory path to create.
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def _generate_output_path(group_key, output_dir, output_type):
"""Generates the output path for an output file.
Generates the output path (including output directory and filename),
given a group key and type of output data to be written to the file.
Args:
group_key: (str) The key that identifies this dataset.
output_dir: (str) The directory to which this file will be written.
output_type: (str) The type of data to be written (either 'daily' or
'hourly').
Returns:
(str) A generated path for the output file (stripped of illegal filename
characters).
"""
filename = '%s_%s.csv' % (group_key, output_type)
filename = telescope_utils.strip_special_chars(filename)
return os.path.join(output_dir, filename)
def _write_valid_keys_file(valid_keys, valid_keys_file):
"""Writes the valid result group keys to a file.
Writes the valid keys file, indicating the keys for which we generated
output data. The keys are written in plaintext with one key per line in
alphabetically sorted order.
Args:
valid_keys: (list) A list of strings indicating the valid keys.
valid_keys_file: (file) File to which to write the keys.
"""
keys_sorted = sorted(valid_keys)
valid_keys_file.write(os.linesep.join(keys_sorted))
class ResultConverter(object):
"""Converts Telescope data into Observatory format."""
def __init__(self, result_grouper, result_reducer, observatory_file_writer,
output_dir, valid_keys_path):
"""Creates a converter from Telescope data to Observatory data.
Args:
result_grouper: Result grouper, which groups Telescope results according
to their metadata.
result_reducer: Result reducer, which reduces sets of raw results into
aggregate values compatible with Observatory.
observatory_file_writer: File writer to write processed results into a
file format that Observatory can read from.
output_dir: (str) The directory to which to write converted results.
valid_keys_path: (str) The file path to which to write the valid group
keys created during the convert operation.
"""
self._logger = logging.getLogger('telescope-convert')
self._result_grouper = result_grouper
self._result_reducer = result_reducer
self._observatory_file_writer = observatory_file_writer
self._output_dir = output_dir
self._valid_keys_path = valid_keys_path
def convert_to_observatory_format(self, input_filenames):
"""Converts a list of files in Telescope format into Observatory format.
Parses a list of files output from Telescope and converts them to files
that Observatory can read, placing the results into self._output_dir.
Args:
input_filenames: (list) A list of files created by Telescope.
"""
result_readers = []
for filename in input_filenames:
result_readers.append(telescope_data_parser.SingleTelescopeResultReader(
filename))
result_groups = self._result_grouper.group_results(result_readers)
self._convert_result_groups(result_groups)
def _convert_result_groups(self, result_groups):
"""Converts Telescope result groups into Observatory format.
Args:
result_groups: (dict) A dictionary of raw Telescope results, keyed by
group key, then by metric name, for example:
{
'lga01_comcast': {
'download_throughput': [
(<datetime-2014-10-22@12:35:01>, 24.5),
(<datetime-2014-10-01@04:42:23>, 14.3),
(<datetime-2014-10-02@06:19:22>, 21.3),
...
],
'upload_throughput': ...,
},
'sea01_verizon': {
'download_throughput': ...,
'upload_throughput': ...,
},
'mia02_twc': ...,
...
}
"""
group_keys = sorted(result_groups.keys())
for index, key in enumerate(group_keys):
self._logger.info('Converting result group %s (%u/%u)',
key, index + 1, len(group_keys))
metro = key[:3]
result_group_local = self._adjust_result_group_timezone(
metro, result_groups[key])
self._convert_result_group_by_day(key, result_group_local)
self._convert_result_group_by_hour(key, result_group_local)
_ensure_dir_exists(os.path.dirname(self._valid_keys_path))
with open(self._valid_keys_path, 'w') as valid_keys_file:
_write_valid_keys_file(group_keys, valid_keys_file)
def _adjust_result_group_timezone(self, metro, metric_results):
"""Converts the timestamps on a result group to local time.
Given a result group associated with a particular metro, creates a new
result group where all timestamps are local to the given metro.
Args:
metro: (str) Name of a metropolitan region associated with these results
(e.g. 'lga' or 'lax').
metric_results: (dict) A dictionary of raw Telescope results, keyed by
metric name, for example:
{
'download_throughput': [
(<datetime-2014-10-22@12:35:01>, 24.5),
(<datetime-2014-10-01@04:42:23>, 14.3),
(<datetime-2014-10-02@06:19:22>, 21.3),
...
],
'upload_throughput': [
(<datetime-2014-10-22@12:35:01>, 4.1),
(<datetime-2014-10-01@04:42:23>, 6.2),
(<datetime-2014-10-02@06:19:22>, 8.9),
...
]
}
Returns:
(dict) A dictionary in the same form as metric_results, but with the
timestamps converted to the local timezone.
"""
timezone = site_metadata.get_metro_timezone(metro)
metric_results_local = {}
for metric, values in metric_results.iteritems():
metric_results_local[metric] = []
for timestamp_utc, value in values:
timestamp_local = timestamp_utc.astimezone(timezone)
metric_results_local[metric].append((timestamp_local, value))
return metric_results_local
def _convert_result_group_by_day(self, group_key, metric_results):
self._convert_result_group(
group_key, metric_results, 'daily',
self._result_reducer.reduce_by_day,
self._observatory_file_writer.write_daily_datafile)
def _convert_result_group_by_hour(self, group_key, metric_results):
self._convert_result_group(
group_key, metric_results, 'hourly',
self._result_reducer.reduce_by_hour_of_day_per_month,
self._observatory_file_writer.write_hourly_datafile)
def _convert_result_group(self, group_key, metric_results, output_type,
reducer_func, writer_func):
"""Converts a group of Telescope results into Observatory files.
Args:
group_key: (str) The key that identifies this result group (e.g.
lga01_comcast).
metric_results: (dict) A dictionary of raw Telescope results, keyed by
metric name, for example:
{
'download_throughput': [
(<datetime-2014-10-22@12:35:01>, 24.5),
(<datetime-2014-10-01@04:42:23>, 14.3),
(<datetime-2014-10-02@06:19:22>, 21.3),
...
],
'upload_throughput': [
(<datetime-2014-10-22@12:35:01>, 4.1),
(<datetime-2014-10-01@04:42:23>, 6.2),
(<datetime-2014-10-02@06:19:22>, 8.9),
...
]
}
output_type: (str) The type of data to be written (either 'daily' or
'hourly').
reducer_func: (function) Function to reduce sets of raw results into
aggregate metrics that Observatory can display.
writer_func: (function) Function to write results to an Observatory-
compatible file.
"""
results_reduced = reducer_func(metric_results)
_ensure_dir_exists(self._output_dir)
output_path = _generate_output_path(group_key, self._output_dir,
output_type)
with open(output_path, 'w') as output_file:
writer_func(results_reduced, output_file)
|
{
"content_hash": "658a0a4a020365a3cfcea201c6f30b32",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 78,
"avg_line_length": 37.75,
"alnum_prop": 0.6416990180406485,
"repo_name": "mtlynch/mlab-observatory",
"id": "c73cd2458900e945238c772a5237686990de6ca6",
"size": "9386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convert_from_telescope/convert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14095"
},
{
"name": "HTML",
"bytes": "2034"
},
{
"name": "JavaScript",
"bytes": "92167"
},
{
"name": "Python",
"bytes": "101099"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, make_response
from gevent import monkey; monkey.patch_socket()
from gevent.wsgi import WSGIServer
import redis
import json
r = redis.StrictRedis()
app = Flask(__name__)
app.config.from_object('settings')
r = redis.StrictRedis(host=app.config.get('PUBSUB_HOST', '127.0.0.1'),
port=app.config.get('PUBSUB_PORT', 6379))
auth = app.config.get('AUTH', '')
if auth:
r.execute_command('auth', auth)
@app.route('/', methods=['POST'])
def generic():
message = request.form['message']
channel = request.form['channel']
notified = r.publish(channel, message)
return str(notified)
@app.route('/github', methods=['POST'])
def github():
payload = json.loads(request.form['payload'])
owner = payload['repository']['owner']['name']
repo_name = owner + '/' + payload['repository']['name']
headcommit = payload['commits'][0]
commit_message = headcommit['message']
author = headcommit['author']['name']
message = '%s commited to %s: %s' % (author, repo_name, commit_message)
notified = r.publish(repo_name, message)
return str(notified)
@app.route('/twilio', methods=['POST'])
def twilio():
channel = request.form['To']
title = 'Message from ' + request.form['From']
message = title + ':' + request.form['Body']
notified = r.publish(channel, message)
response = make_response(channel + " " + str(notified), 200)
response.headers['Content-Type'] = 'text/plain'
return response
def start_server():
port = app.config.get('WEB_PORT', 8080)
WSGIServer(('0.0.0.0', port), app).serve_forever()
if __name__ == '__main__':
start_server()
|
{
"content_hash": "b22d37f9cefc3dbd6e54104f0cfda0f3",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 75,
"avg_line_length": 27.29032258064516,
"alnum_prop": 0.6353427895981087,
"repo_name": "zhemao/lerner",
"id": "18640054256be982cb41b44e372f6deb8d47ea2b",
"size": "1715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/webserver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13452"
},
{
"name": "Shell",
"bytes": "509"
}
],
"symlink_target": ""
}
|
from minibus import MiniBusTwistedClient
import sys
sys.DONT_WRITE_BYTECODE = True
class ServiceClient(MiniBusTwistedClient):
def __init__(self):
MiniBusTwistedClient.__init__(self, name="ServiceClient")
self.echoback = self.service_client("echoback", { }, { }, self.echoback_reply, self.echoback_error)
def echoback_reply(self, reqstid, data):
print "Received Reply:", reqstid, data
self.exit_()
def echoback_error(self, reqstid, data):
print "Received Error:", reqstid, data
self.exit_()
def run(self):
idstr = self.echoback("This is a test")
print "waiting to hear on", idstr
if __name__ == "__main__":
client = ServiceClient()
client.exec_()
|
{
"content_hash": "ceb141869a9f9770104d31644cef8b6a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 107,
"avg_line_length": 28.5,
"alnum_prop": 0.6383265856950068,
"repo_name": "allohakdan/minibus",
"id": "af95fb3761649fb1cc6f00571aa854747c8cbcc4",
"size": "763",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/twisted_basic_service_client2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "654"
},
{
"name": "Python",
"bytes": "80764"
},
{
"name": "Shell",
"bytes": "128"
}
],
"symlink_target": ""
}
|
MAX_STATS_AGE = 60
"""
#
# dump1090-stats-to-statsd.py
#
# Requires the statsd python package, there are a bazillion ways to install it. I use:
# sudo apt-get install python-pip
# sudo pip install statsd
#
# Reads dump1090's JSON stats file (usually located at /run/dump1090-mutability/stats.json)
# and sends the 1 minute stats numbers to your statsd daemon.
#
# The stats.json file only writes once every minute, so just run this script once a minute from
# cron. Add the following to your crontab:
#
# */1 * * * * python /path/to/dump1090-to-statsd.py myadsb 127.0.0.1 /run/dump1090-mutability/stats.json >/dev/null 2>&1
#
# Once that is done, you should be able to login to your graphite installation to make some nice graphs.
#
"""
import sys
import os
import stat
import time
import json
from statsd import StatsClient
# Use pprint to debug the json you get from dump1090
from pprint import pprint
# Return the number of seconds since a file was last modified
def file_age_in_secs(path):
return time.time() - os.stat(path)[stat.ST_MTIME]
STATSD_HOST = "127.0.0.1"
RADIO_NAME = "localhost"
# Send data to statsd
def send_radio_stats(n, s):
client = StatsClient(STATSD_HOST)
pipe = client.pipeline()
pipe.gauge("radios.%s.accepted" % n, s["local"]["accepted"][0])
pipe.gauge("radios.%s.accepted_corrected" % n, s["local"]["accepted"][1])
# If you use the "aggressive" setting in dump1090-mutability, there may
# be a third entry in the accepted set. Maybe you want to do something with that data?
#pipe.gauge("radios.%s.accepted_corrected_2bit" % n, s["local"]["accepted"][2])
pipe.gauge("radios.%s.bad" % n, s["local"]["bad"])
pipe.gauge("radios.%s.blocks_dropped" % n, s["local"]["blocks_dropped"])
pipe.gauge("radios.%s.blocks_processed" % n, s["local"]["blocks_processed"])
pipe.gauge("radios.%s.modeac" % n, s["local"]["modeac"])
pipe.gauge("radios.%s.modes" % n, s["local"]["modes"])
pipe.gauge("radios.%s.strong_signals" % n, s["local"]["strong_signals"])
pipe.gauge("radios.%s.unknown_icao" % n, s["local"]["unknown_icao"])
pipe.gauge("radios.%s.cpr.airborne" % n, s["cpr"]["airborne"])
pipe.gauge("radios.%s.cpr.filtered" % n, s["cpr"]["filtered"])
pipe.send()
pipe.gauge("radios.%s.cpr.global_bad" % n, s["cpr"]["global_bad"])
pipe.gauge("radios.%s.cpr.global_ok" % n, s["cpr"]["global_ok"])
pipe.gauge("radios.%s.cpr.global_range" % n, s["cpr"]["global_range"])
pipe.gauge("radios.%s.cpr.global_skipped" % n, s["cpr"]["global_skipped"])
pipe.gauge("radios.%s.cpr.global_speed" % n, s["cpr"]["global_speed"])
pipe.gauge("radios.%s.cpr.local_aircraft_relative" % n, s["cpr"]["local_aircraft_relative"])
pipe.gauge("radios.%s.cpr.local_ok" % n, s["cpr"]["local_ok"])
pipe.gauge("radios.%s.cpr.local_range" % n, s["cpr"]["local_range"])
pipe.gauge("radios.%s.cpr.local_receiver_relative" % n, s["cpr"]["local_receiver_relative"])
pipe.gauge("radios.%s.cpr.local_skipped" % n, s["cpr"]["local_skipped"])
pipe.send()
pipe.gauge("radios.%s.cpr.local_speed" % n, s["cpr"]["local_speed"])
pipe.gauge("radios.%s.cpr.surface" % n, s["cpr"]["surface"])
pipe.gauge("radios.%s.messages" % n, s["messages"])
pipe.gauge("radios.%s.tracks_all" % n, s["tracks"]["all"])
pipe.gauge("radios.%s.tracks_single_message" % n, s["tracks"]["single_message"])
pipe.timing("radios.%s.cpu.background" % n, s["cpu"]["background"])
pipe.timing("radios.%s.cpu.demodulation" % n, s["cpu"]["demod"])
pipe.timing("radios.%s.cpu.usb" % n, s["cpu"]["reader"])
pipe.send()
if __name__ == "__main__":
try:
json_file = sys.argv[3]
except IndexError:
print "Usage: dump1090-to-statsd.py <statsd radio name prefix> <statsd host> <path to dump1090 stats.json file>"
sys.exit(1)
if not os.path.isfile(json_file):
print "File does not appear to exist: %s" % json_file
sys.exit(1)
if not os.access(json_file, os.R_OK):
print "Unable to read file, permission denied: %s" % json_file
sys.exit(1)
RADIO_NAME = sys.argv[1]
STATSD_HOST = sys.argv[2]
print "Parsing dump1090 stats.json file for changes: %s" % json_file
age = file_age_in_secs(json_file)
if age >= MAX_STATS_AGE:
print "File is %d seconds old, not sending data" % age
sys.exit(0)
else:
print "File is %d seconds old, looks like fresh data" % age
with open(json_file) as data_file:
data = json.load(data_file)
d = data["last1min"]
send_radio_stats(RADIO_NAME, d)
sys.exit(0)
|
{
"content_hash": "6852995420d272cb7cfd0efafe446d96",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 120,
"avg_line_length": 38.2695652173913,
"alnum_prop": 0.6757555101113384,
"repo_name": "dsilvers/dump1090-statsd",
"id": "5541224e4a9617802c03ec5cc68af6ac4fb16781",
"size": "4513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dump1090-to-statsd.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8064"
}
],
"symlink_target": ""
}
|
import sys
#from distutils.core import setup, Extension
from setuptools import setup,Extension
version = sys.version_info
module1 = None;
other_sources = [
"depends/libmilk/def.cpp",
"depends/libmilk/dict.cpp",
"depends/libmilk/log.cpp",
"depends/libmilk/thread.cpp",
"depends/libmilk/netio.cpp",
"depends/libmilk/netaio.cpp",
"depends/libmilk/aio.cpp",
"depends/libmilk/testing.cpp",
"depends/libmilk/var.cpp",
"depends/libmilk/json.cpp",
"depends/libmilk/codes.cpp",
"depends/libmilk/datawrapper.cpp",
"depends/libmilk/exception.cpp",
"depends/cavedb/slave_redis.cpp",
"depends/cavedb/slave_ssdb.cpp",
"depends/cavedb/rdb.cpp",
"depends/cavedb/redis_assert.c",
"depends/cavedb/redis/endianconv.c",
"depends/cavedb/redis/intset.c",
"depends/cavedb/redis/lzf_c.c",
"depends/cavedb/redis/lzf_d.c",
"depends/cavedb/redis/quicklist.c",
"depends/cavedb/redis/sds.c",
"depends/cavedb/redis/sha1.c",
"depends/cavedb/redis/util.c",
"depends/cavedb/redis/ziplist.c",
"depends/cavedb/redis/zipmap.c",
"depends/cavedb/redis/zmalloc.c",
]
if version >= (2,0) and version < (3,0):
module1 = Extension('cavedb',sources = ['cavedb_python_mutiplethread.cpp'] + other_sources,libraries = ['rt'],include_dirs=['./depends'],extra_compile_args=["-std=c99"])
elif version >= (3,0) and version < (4,0):
module1 = Extension('cavedb',sources = ['cavedb_python_mutiplethread.cpp'] + other_sources,libraries = ['rt'],include_dirs=['./depends'],extra_compile_args=["-std=c99"])
with open("README.md", "r") as fh:
long_description = fh.read()
setup (name = 'pycavedb',
version = '1.2',
description = 'cavedb-python',
long_description=long_description,
long_description_content_type="text/markdown",
author = "lyramilk",
ext_modules = [module1],
author_email='lyramilk@qq.com',
license='Apache License 2.0',
url='https://github.com/lyramilk/cavedb',
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Natural Language :: Chinese (Simplified)",
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities'
],
keywords = 'cavedb,ssdb,redis',
)
|
{
"content_hash": "12d63a8716fd4beeeaf91328eba5d503",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 170,
"avg_line_length": 33.17333333333333,
"alnum_prop": 0.6981511254019293,
"repo_name": "lyramilk/cavedb",
"id": "73b3d625d555a16325013e1f28d93ed6856fe8db",
"size": "2488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cavedb/pycavedb/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "755066"
},
{
"name": "C++",
"bytes": "1429232"
},
{
"name": "CMake",
"bytes": "8174"
},
{
"name": "HTML",
"bytes": "20970"
},
{
"name": "Java",
"bytes": "2773"
},
{
"name": "Makefile",
"bytes": "16365"
},
{
"name": "Python",
"bytes": "3045"
},
{
"name": "Shell",
"bytes": "8552"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
packages=['rodi_robot'],
package_dir={'': 'src'}
)
setup(**setup_args)
|
{
"content_hash": "290220a688e591842507c329a0961627",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.6681034482758621,
"repo_name": "rodibot/rodi_robot",
"id": "d90da04ecb4ff0269019b7cdcaccc2525b9b7d3b",
"size": "255",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "546"
},
{
"name": "Python",
"bytes": "3985"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('pyforum.views',
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^mysite/', include('mysite.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'forum_list', name='forum_list'),
url(r'^forum/(?P<forum_id>\d+)/$', 'forum_detail', name='forum_detail'),
url(r'^forum/(?P<forum_id>\d+)/compose/$', 'compose_thread',
name='compose_thread'),
url(r'^thread/(?P<thread_id>\d+)/$', 'thread_detail', name='thread_detail'),
url(r'^thread/(?P<thread_id>\d+)/compose/$', 'compose_post',
name='compose_post'),
url(r'^save_post/$', 'save_post', name='save_post'),
url(r'^post/(?P<post_id>\d+)/edit/$', 'edit_post', name='edit_post'),
url(r'^post/(?P<post_id>\d+)/delete/$', 'delete_post', name="delete_post"),
url(r'^user/(?P<user_id>\d+)/$', 'user_detail', name='user_detail'),
url(r'^sign_up/$', 'sign_up', name='sign_up'),
url(r'^save_user/$', 'save_user', name='save_user'),
url(r'^sign_in/$', 'sign_in', name='sign_in'),
url(r'^auth_user/$', 'auth_user', name='auth_user'),
url(r'^sign_out/$', 'sign_out', name='sign_out'),
)
|
{
"content_hash": "8512fe2c3c991675cb47f29ef6633227",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 80,
"avg_line_length": 44.529411764705884,
"alnum_prop": 0.6010568031704095,
"repo_name": "pombredanne/PyForum",
"id": "3c19bb835788d4a6dcb1de414f546e3023a9f08a",
"size": "1514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyforum/urls.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import socket
class Rprint:
name = 'user'
host = '127.0.0.1'
port = 10000
ver = '0.0.8'
dbg = False
def config(name='user', host='localhost', port=10000):
Rprint.name = name
Rprint.host = host
Rprint.port = port
print(Rprint.name, Rprint.host, Rprint.port)
def send(message):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (Rprint.host, Rprint.port)
sock.sendto(message.encode('utf-8'), server_address)
finally:
sock.close()
if Rprint.dbg:
print(message)
def send_from(message):
send("{}:{}".format(Rprint.name, message))
def version():
return Rprint.ver
def debug(change=None):
if change:
Rprint.dbg = change
return Rprint.dbg
def server(host='localhost', port=10000):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (Rprint.host, Rprint.port)
sock.bind(server_address)
print('Server udp listen')
while True:
data, address = sock.recvfrom(4096)
print(address[0], ':', data.decode('utf-8'))
def help():
info = """
server example:
(case 1)
import rprint
rprint.server()
(case 2)
import rprint
rprint.server(host='xxx.xxx.xxx.xxx', port=nnnn)
client example:
(case 1)
import rprint
rprint.send('ops')
(case 2)
import rprint
rprint.config(name, host='xxx.xxx.xxx.xxx', port=nnnn):
rprint.send('ops')
"""
print(info)
|
{
"content_hash": "f4f9884dc80a071bcf00f19594848105",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 63,
"avg_line_length": 19.78205128205128,
"alnum_prop": 0.594296824368114,
"repo_name": "cletrix/rprint",
"id": "f3a87c2ad8e3ff25208584cc12a457b4d999f846",
"size": "1543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rprint/rprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2003"
}
],
"symlink_target": ""
}
|
import botocore.session
from botocore.handlers import SERVICE_NAME_ALIASES
def test_can_use_service_alias():
session = botocore.session.get_session()
for (alias, name) in SERVICE_NAME_ALIASES.items():
yield _instantiates_the_same_client, session, name, alias
def _instantiates_the_same_client(session, service_name, service_alias):
client_kwargs = {
'region_name': 'us-east-1',
'aws_access_key_id': 'foo',
'aws_secret_access_key': 'bar',
}
original_client = session.create_client(service_name, **client_kwargs)
aliased_client = session.create_client(service_alias, **client_kwargs)
original_model_name = original_client.meta.service_model.service_name
aliased_model_name = aliased_client.meta.service_model.service_name
assert original_model_name == aliased_model_name
|
{
"content_hash": "be3836cfe78cc0815674405a4c9db9ba",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 74,
"avg_line_length": 40.04761904761905,
"alnum_prop": 0.7074910820451843,
"repo_name": "pplu/botocore",
"id": "d82cfbcf6aa432dff592574a4cce437a9d421f32",
"size": "1402",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/functional/test_service_alias.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "23824"
},
{
"name": "Python",
"bytes": "2691062"
}
],
"symlink_target": ""
}
|
"""
px_generate_uorb_topic_headers.py
Generates c/cpp header files for uorb topics from .msg (ROS syntax)
message files
"""
from __future__ import print_function
import os
import shutil
import filecmp
import argparse
try:
import genmsg.template_tools
except ImportError as e:
print("python import error: ", e)
print('''
Required python packages not installed.
On a Debian/Ubuntu syystem please run:
sudo apt-get install python-empy
sudo pip install catkin_pkg
On MacOS please run:
sudo pip install empy catkin_pkg
On Windows please run:
easy_install empy catkin_pkg
''')
exit(1)
__author__ = "Thomas Gubler"
__copyright__ = "Copyright (C) 2013-2014 PX4 Development Team."
__license__ = "BSD"
__email__ = "thomasgubler@gmail.com"
msg_template_map = {'msg.h.template': '@NAME@.h'}
srv_template_map = {}
incl_default = ['std_msgs:./msg/std_msgs']
package = 'px4'
def convert_file(filename, outputdir, templatedir, includepath):
"""
Converts a single .msg file to a uorb header
"""
print("Generating headers from {0}".format(filename))
genmsg.template_tools.generate_from_file(filename,
package,
outputdir,
templatedir,
includepath,
msg_template_map,
srv_template_map)
def convert_dir(inputdir, outputdir, templatedir):
"""
Converts all .msg files in inputdir to uORB header files
"""
includepath = incl_default + [':'.join([package, inputdir])]
for f in os.listdir(inputdir):
fn = os.path.join(inputdir, f)
if os.path.isfile(fn):
convert_file(
fn,
outputdir,
templatedir,
includepath)
def copy_changed(inputdir, outputdir, prefix=''):
"""
Copies files from inputdir to outputdir if they don't exist in
ouputdir or if their content changed
"""
# Make sure output directory exists:
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
for f in os.listdir(inputdir):
fni = os.path.join(inputdir, f)
if os.path.isfile(fni):
# Check if f exists in outpoutdir, copy the file if not
fno = os.path.join(outputdir, prefix + f)
if not os.path.isfile(fno):
shutil.copy(fni, fno)
print("{0}: new header file".format(f))
continue
# The file exists in inputdir and outputdir
# only copy if contents do not match
if not filecmp.cmp(fni, fno):
shutil.copy(fni, fno)
print("{0}: updated".format(f))
continue
print("{0}: unchanged".format(f))
def convert_dir_save(inputdir, outputdir, templatedir, temporarydir, prefix):
"""
Converts all .msg files in inputdir to uORB header files
Unchanged existing files are not overwritten.
"""
# Create new headers in temporary output directory
convert_dir(inputdir, temporarydir, templatedir)
# Copy changed headers from temporary dir to output dir
copy_changed(temporarydir, outputdir, prefix)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert msg files to uorb headers')
parser.add_argument('-d', dest='dir', help='directory with msg files')
parser.add_argument('-f', dest='file',
help="files to convert (use only without -d)",
nargs="+")
parser.add_argument('-e', dest='templatedir',
help='directory with template files',)
parser.add_argument('-o', dest='outputdir',
help='output directory for header files')
parser.add_argument('-t', dest='temporarydir',
help='temporary directory')
parser.add_argument('-p', dest='prefix', default='',
help='string added as prefix to the output file '
' name when converting directories')
args = parser.parse_args()
if args.file is not None:
for f in args.file:
convert_file(
f,
args.outputdir,
args.templatedir,
incl_default)
elif args.dir is not None:
convert_dir_save(
args.dir,
args.outputdir,
args.templatedir,
args.temporarydir,
args.prefix)
|
{
"content_hash": "44710d3fbd48e97f8b1b07e2a6be4089",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 36.3125,
"alnum_prop": 0.5037292025243832,
"repo_name": "Yndal/ArduPilot-SensorPlatform",
"id": "c7ed72d582871e5e845d378a13a1f3f53d8d0905",
"size": "6958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PX4Firmware/Tools/px_generate_uorb_topic_headers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "12462"
},
{
"name": "Assembly",
"bytes": "799628"
},
{
"name": "Batchfile",
"bytes": "68199"
},
{
"name": "C",
"bytes": "55034159"
},
{
"name": "C#",
"bytes": "9917"
},
{
"name": "C++",
"bytes": "13663242"
},
{
"name": "CMake",
"bytes": "13681"
},
{
"name": "CSS",
"bytes": "6280"
},
{
"name": "EmberScript",
"bytes": "19928"
},
{
"name": "GDB",
"bytes": "744"
},
{
"name": "Groff",
"bytes": "43610"
},
{
"name": "HTML",
"bytes": "9849"
},
{
"name": "Io",
"bytes": "286"
},
{
"name": "Java",
"bytes": "4394945"
},
{
"name": "Lex",
"bytes": "13878"
},
{
"name": "Lua",
"bytes": "87871"
},
{
"name": "M4",
"bytes": "15467"
},
{
"name": "Makefile",
"bytes": "8807880"
},
{
"name": "Matlab",
"bytes": "185473"
},
{
"name": "Objective-C",
"bytes": "24203"
},
{
"name": "OpenEdge ABL",
"bytes": "12712"
},
{
"name": "PHP",
"bytes": "484"
},
{
"name": "Pascal",
"bytes": "253102"
},
{
"name": "Perl",
"bytes": "17902"
},
{
"name": "Processing",
"bytes": "168008"
},
{
"name": "Python",
"bytes": "1785059"
},
{
"name": "Ruby",
"bytes": "7108"
},
{
"name": "Scilab",
"bytes": "1502"
},
{
"name": "Shell",
"bytes": "1276765"
},
{
"name": "Yacc",
"bytes": "30289"
}
],
"symlink_target": ""
}
|
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts for the test suite
"""
from nova.virt import vmwareapi_conn
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vmware_images
def fake_get_vim_object(arg):
"""Stubs out the VMWareAPISession's get_vim_object method."""
return fake.FakeVim()
def fake_is_vim_object(arg, module):
"""Stubs out the VMWareAPISession's is_vim_object method."""
return isinstance(module, fake.FakeVim)
def set_stubs(stubs):
"""Set the stubs."""
stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
fake.fake_get_vmdk_size_and_properties)
stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object",
fake_get_vim_object)
stubs.Set(vmwareapi_conn.VMWareAPISession, "_is_vim_object",
fake_is_vim_object)
|
{
"content_hash": "0597214b07d21ff73c1a49f384a7697a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 36.08888888888889,
"alnum_prop": 0.6859605911330049,
"repo_name": "superstack/nova",
"id": "a648efb160d2c8d77fad607d03fecc3c03340046",
"size": "1669",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/vmwareapi/stubs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "47238"
},
{
"name": "Python",
"bytes": "2491049"
},
{
"name": "Shell",
"bytes": "31698"
}
],
"symlink_target": ""
}
|
"""Sum of Digits
Given a positive integer, find the sum of its constituent digits.
"""
import sys
def main():
with open(sys.argv[1]) as f:
lines = (l.strip() for l in f)
nums = (map(int, l) for l in lines)
sums = (sum(digits) for digits in nums)
for sum_digits in sums:
print sum_digits
if __name__ == '__main__':
main()
|
{
"content_hash": "054bdc2c299e1231fc033e362c21d14f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 19.94736842105263,
"alnum_prop": 0.5620052770448549,
"repo_name": "foxdan/codeeval",
"id": "124c2f5f26a482602b911c4ddc9bb666849a9185",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easy/sum_digits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2429"
}
],
"symlink_target": ""
}
|
from .feed_summary import *
|
{
"content_hash": "59ad6e9ae8d870fc6d5d6d9f20a2bd0b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 27,
"avg_line_length": 28,
"alnum_prop": 0.75,
"repo_name": "jarrodmillman/stat133-fall2014",
"id": "93db3bed80a887720df701e98a371b4498cd1104",
"size": "28",
"binary": false,
"copies": "74",
"ref": "refs/heads/master",
"path": "_plugins/feed_summary/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "109276"
},
{
"name": "JavaScript",
"bytes": "3952"
},
{
"name": "Python",
"bytes": "187477"
},
{
"name": "Shell",
"bytes": "2178"
}
],
"symlink_target": ""
}
|
"""
Bitstamp API client.
<https://www.bitstamp.net/api/>
The client uses `tornado.httpclient` and can be used
either synchronous or asynchronous fashion.
-----
Based on:
kmadac/bitstamp-python-client
Copyright (c) 2013 Kamil Madac
<https://github.com/kmadac/bitstamp-python-client/blob/master/LICENSE.txt>
-----
"""
from __future__ import division
import json
import time
import hmac
import hashlib
import logging
import datetime
from decimal import Decimal
from urllib.parse import urlencode
import pytz
from tornado.httpclient import AsyncHTTPClient, HTTPClient, HTTPRequest
log = logging.getLogger(__name__)
NOT_PROVIDED = object()
def parse_datetime(s):
return datetime.datetime \
.strptime(s.rsplit('.', 1)[0], '%Y-%m-%d %H:%M:%S') \
.replace(tzinfo=pytz.timezone('Europe/London'))
def parse_timestamp(n):
naive = datetime.datetime.fromtimestamp(int(n))
# FIXME: what timezone is it really?
from django.utils import timezone
tz = timezone.get_current_timezone()
local = tz.localize(naive)
return local
def maybe(type_):
"""Schema field type whose value can be `None` or `type_`."""
def func(val):
if val is not None:
return type_(val)
return func
class Model(dict):
schema = {}
def __init__(self, response):
super().__init__()
for key, value in response.items():
func = self.schema.get(key, NOT_PROVIDED)
if func is NOT_PROVIDED:
raise ValueError('%s unknown field: %r' % (type(self), key))
if func:
value = func(value)
self[key] = value
def __getattr__(self, k):
return self[k]
class Ticker(Model):
schema = {
'vwap': Decimal,
'last': Decimal,
'high': Decimal,
'low': Decimal,
'bid': Decimal,
'ask': Decimal,
'volume': Decimal,
'timestamp': parse_timestamp,
'open': Decimal,
}
class Order(Model):
schema = {
'id': int,
'price': Decimal,
'amount': Decimal,
'type': int,
'datetime': parse_datetime,
}
class Transaction(Model):
schema = {
'id': int,
'datetime': parse_datetime,
'type': int,
'fee': Decimal,
'usd': Decimal,
'btc': Decimal,
'btc_usd': Decimal,
'order_id': maybe(int),
}
class Balance(Model):
schema = {
'fee': Decimal,
'usd_balance': Decimal,
'btc_balance': Decimal,
'usd_reserved': Decimal,
'btc_reserved': Decimal,
'btc_available': Decimal,
'usd_available': Decimal,
'eur_balance': Decimal,
'xrp_balance': Decimal,
'eur_reserved': Decimal,
'xrp_reserved': Decimal,
'eur_available': Decimal,
'xrp_available': Decimal,
}
class BitstampError(Exception):
pass
class BitstampClientError(BitstampError):
pass
class InvalidNonceError(BitstampClientError):
pass
class BitstampClient:
_root = 'https://www.bitstamp.net/api'
def __init__(self, username=None, key=None, secret=None):
credentials = [username, key, secret]
assert all(credentials) or not any(credentials)
self._set_auth(*credentials)
self._requests = []
def _set_auth(self, username, key, secret):
self._username = str(username)
self._key = str(key)
self._secret = str(secret)
self._nonce = int(time.time())
def _get_auth_params(self):
msg = str(self._nonce) + self._username + self._key
signature = hmac.new(
key=self._secret.encode('utf8'),
msg=msg.encode('utf8'),
digestmod=hashlib.sha256
).hexdigest().upper()
params = {
'key': self._key,
'signature': signature,
'nonce': self._nonce
}
self._nonce += 1
return params
def _get(self, path, callback=None, params=None, model_class=None):
if params:
path += '?' + urlencode(params)
return self._request('GET',
path=path,
callback=callback,
model_class=model_class)
def _post(self, path, callback=None, params=None, model_class=None):
params = params or {}
params.update(self._get_auth_params())
body = urlencode(params)
return self._request('POST',
path=path,
callback=callback,
body=body,
model_class=model_class)
def _request(self, method, path, callback=None, body=None,
model_class=None):
now = datetime.datetime.utcnow()
period_start = now - datetime.timedelta(minutes=10)
self._requests = [dt for dt in self._requests if dt >= period_start]
self._requests.append(now)
log.debug('%d requests in last %d seconds',
len(self._requests),
(now - self._requests[0]).seconds)
self._requests.append(datetime.datetime.utcnow())
client_class = AsyncHTTPClient if callback else HTTPClient
log.debug('%s > %s %s %r', client_class.__name__, method, path, body)
request = HTTPRequest(
url=self._root + path,
method=method,
body=body,
)
client = client_class()
if callback:
return client.fetch(
request=request,
callback=lambda resp: callback(self._process_response(
resp, model_class))
)
else:
return self._process_response(client.fetch(request), model_class)
def _process_response(self, response, model_class=None):
"""
:type response: tornado.httpclient.HTTPResponse
"""
response.rethrow()
model_class = model_class or Model
log.debug('< %s %s', response.headers, response.body)
content_type = response.headers['Content-Type']
if 'json' not in content_type:
raise BitstampError(
'not JSON response (%s)' % content_type,
response.headers,
response.body
)
try:
data = json.loads(response.body.decode('utf8'))
except (ValueError, UnicodeError) as e:
raise BitstampError(
'could not decode response json',
response.headers,
response.body) from e
if 'error' in data:
if data['error'] == 'Invalid nonce':
raise InvalidNonceError
raise BitstampClientError(data)
if isinstance(data, list):
data = list(map(model_class, data))
else:
data = model_class(data)
return data
### Public REST API methods ###
def ticker(self, callback=None):
"""
Return dictionary
"""
return self._get('/ticker/', callback=callback, model_class=Ticker)
def order_book(self, group=True, callback=None):
"""
Returns JSON dictionary with "bids" and "asks".
Each is a list of open orders and each order is represented
as a list of price and amount.
"""
return self._get('/order_book/',
params={
'group': group
},
callback=callback)
def transactions(self, timedelta_secs=86400, callback=None):
"""Return transactions for the last 'timedelta' seconds."""
return self._get('/transactions/',
params={
'timedelta': timedelta_secs
},
callback=callback, model_class=Transaction)
def conversion_rate_usd_eur(self, callback=None):
"""
Returns simple dictionary
{'buy': 'buy conversion rate', 'sell': 'sell conversion rate'}
"""
return self._get('/eur_usd/', callback=callback)
### Private REST API methods ###
def account_balance(self, callback=None):
"""
Returns dictionary:
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64'}
"""
return self._post('/balance/', callback=callback, model_class=Balance)
def user_transactions(self, offset=0, limit=100,
descending=True, callback=None):
"""
Returns descending list of transactions.
Every transaction (dictionary) contains
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20', u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
"""
return self._post(
'/user_transactions/',
callback=callback,
model_class=Transaction,
params={
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc'
}
)
def open_orders(self, callback=None):
"""
Returns JSON list of open orders.
Each order is represented as dictionary:
"""
return self._post('/open_orders/',
callback=callback,
model_class=Order)
def cancel_order(self, order_id, callback=None):
"""
Cancel the order specified by order_id
Returns True if order was successfully canceled,
otherwise tuple (False, msg) like (False, u'Order not found')
"""
return self._post('/cancel_order/', callback=callback, params={
'id': order_id
})
def buy_limit_order(self, amount, price, callback=None):
"""
Order to buy amount of bitcoins for specified price
"""
return self._post(
'/buy/',
callback=callback,
model_class=Order,
params={
'amount': amount,
'price': price
}
)
def sell_limit_order(self, amount, price, callback=None):
"""
Order to sell amount of bitcoins for specified price
"""
return self._post(
'/sell/',
callback=callback,
model_class=Order,
params={
'amount': amount,
'price': price
}
)
def withdrawal_requests(self, callback=None):
"""
Returns list of withdrawal requests.
Each request is represented as dictionary
"""
return self._post('/withdrawal_requests/', callback=callback)
def bitcoin_withdrawal(self, amount, address, callback=None):
"""
Send bitcoins to another bitcoin wallet specified by address
"""
return self._post('/bitcoin_withdrawal/', callback=callback, params={
'amount': amount,
'address': address
})
def bitcoin_deposit_address(self, callback=None):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post('/bitcoin_deposit_address/', callback=callback)
def unconfirmed_bitcoin_deposits(self, callback=None):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount - bitcoin amount
address - deposit address used
confirmations - number of confirmations
"""
return self._post('/unconfirmed_btc/', callback=callback)
def ripple_withdrawal(self, amount, address, currency, callback=None):
"""
Returns true if successful
"""
return self._post('/ripple_withdrawal/', callback=callback, params={
'amount': amount,
'address': address,
'currency': currency
})
def ripple_deposit_address(self, callback=None):
"""
Returns ripple deposit address as unicode string
"""
return self._post('/ripple_address/', callback=callback)
|
{
"content_hash": "259682f240eb00f15bd0aab4eb3de3a8",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 78,
"avg_line_length": 28.596774193548388,
"alnum_prop": 0.5429860607525582,
"repo_name": "jakubroztocil/cointrol",
"id": "156574606ef1405203e7a138eb9bf802e62c3fc4",
"size": "12411",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cointrol/trader/bitstamp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "649"
},
{
"name": "CoffeeScript",
"bytes": "15368"
},
{
"name": "HTML",
"bytes": "6690"
},
{
"name": "Python",
"bytes": "80372"
}
],
"symlink_target": ""
}
|
"""DB2 storage backend
"""
from __future__ import division
import pymongo
from ceilometer.alarm.storage import pymongo_base
from ceilometer.openstack.common import log
from ceilometer import storage
from ceilometer.storage.mongo import utils as pymongo_utils
LOG = log.getLogger(__name__)
class Connection(pymongo_base.Connection):
"""The db2 alarm storage for Ceilometer."""
CONNECTION_POOL = pymongo_utils.ConnectionPool()
def __init__(self, url):
# Since we are using pymongo, even though we are connecting to DB2
# we still have to make sure that the scheme which used to distinguish
# db2 driver from mongodb driver be replaced so that pymongo will not
# produce an exception on the scheme.
url = url.replace('db2:', 'mongodb:', 1)
self.conn = self.CONNECTION_POOL.connect(url)
# Require MongoDB 2.2 to use aggregate(), since we are using mongodb
# as backend for test, the following code is necessary to make sure
# that the test wont try aggregate on older mongodb during the test.
# For db2, the versionArray won't be part of the server_info, so there
# will not be exception when real db2 gets used as backend.
server_info = self.conn.server_info()
if server_info.get('sysInfo'):
self._using_mongodb = True
else:
self._using_mongodb = False
if self._using_mongodb and server_info.get('versionArray') < [2, 2]:
raise storage.StorageBadVersion("Need at least MongoDB 2.2")
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
self.upgrade()
def clear(self):
# drop_database command does nothing on db2 database since this has
# not been implemented. However calling this method is important for
# removal of all the empty dbs created during the test runs since
# test run is against mongodb on Jenkins
self.conn.drop_database(self.db)
self.conn.close()
|
{
"content_hash": "bd5dd9e9e8e376519306e15e64afee70",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 78,
"avg_line_length": 38.93103448275862,
"alnum_prop": 0.6647475642161205,
"repo_name": "luogangyi/Ceilometer-oVirt",
"id": "9ca37f203cdbde8029666852ecd1846d423e2215",
"size": "3050",
"binary": false,
"copies": "5",
"ref": "refs/heads/stable/juno",
"path": "ceilometer/alarm/storage/impl_db2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5438675"
},
{
"name": "Shell",
"bytes": "1304"
}
],
"symlink_target": ""
}
|
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/workbooktemplates") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/workbooktemplates/{resourceName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_delete_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/workbooktemplates/{resourceName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_create_or_update_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/workbooktemplates/{resourceName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/workbooktemplates/{resourceName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
class WorkbookTemplatesOperations(object):
"""WorkbookTemplatesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.applicationinsights.v2019_10_17_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.WorkbookTemplatesListResult"]:
"""Get all Workbook templates defined within a specified resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkbookTemplatesListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplatesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkbookTemplatesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WorkbookTemplatesListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.WorkbookError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/workbooktemplates"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.WorkbookTemplate":
"""Get a single workbook template by its resourceName.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkbookTemplate, or the result of cls(response)
:rtype: ~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkbookTemplate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.WorkbookError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkbookTemplate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/workbooktemplates/{resourceName}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
"""Delete a workbook template.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.WorkbookError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/workbooktemplates/{resourceName}"} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
workbook_template_properties: "_models.WorkbookTemplate",
**kwargs: Any
) -> "_models.WorkbookTemplate":
"""Create a new workbook template.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param workbook_template_properties: Properties that need to be specified to create a new
workbook.
:type workbook_template_properties:
~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkbookTemplate, or the result of cls(response)
:rtype: ~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkbookTemplate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(workbook_template_properties, 'WorkbookTemplate')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.WorkbookError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WorkbookTemplate', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('WorkbookTemplate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/workbooktemplates/{resourceName}"} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
resource_name: str,
workbook_template_update_parameters: Optional["_models.WorkbookTemplateUpdateParameters"] = None,
**kwargs: Any
) -> "_models.WorkbookTemplate":
"""Updates a workbook template that has already been added.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param workbook_template_update_parameters: Properties that need to be specified to patch a
workbook template. Default value is None.
:type workbook_template_update_parameters:
~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkbookTemplate, or the result of cls(response)
:rtype: ~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkbookTemplate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if workbook_template_update_parameters is not None:
_json = self._serialize.body(workbook_template_update_parameters, 'WorkbookTemplateUpdateParameters')
else:
_json = None
request = build_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.WorkbookError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkbookTemplate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/workbooktemplates/{resourceName}"} # type: ignore
|
{
"content_hash": "372afc37caa9ed109ca462f566516343",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 202,
"avg_line_length": 42.25919439579685,
"alnum_prop": 0.6535018648984666,
"repo_name": "Azure/azure-sdk-for-python",
"id": "435c3b4a010e7e9c37f34d14dee73724fcb5e1d1",
"size": "24630",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2019_10_17_preview/operations/_workbook_templates_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import pip
pip.main(['install', 'requests'])
pip.main(['install', 'pandas'])
pip.main(['install', 'numpy'])
pip.main(['install', 'json'])
pip.main(['install', 'datetime'])
pip.main(['install', 'sklearn'])
|
{
"content_hash": "2c43f878b5a5883da2c304bc3e51ce1c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 33,
"avg_line_length": 29.142857142857142,
"alnum_prop": 0.6421568627450981,
"repo_name": "andurilhuang/Movie_Income_Prediction",
"id": "c097f92b1c33b4959de46c3539df8f7dcc1e39cd",
"size": "204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7150"
},
{
"name": "Jupyter Notebook",
"bytes": "1286755"
},
{
"name": "Python",
"bytes": "901375"
},
{
"name": "Shell",
"bytes": "5368"
}
],
"symlink_target": ""
}
|
from ark.scheduler import Scheduler
from ark.cli import *
from ark.server_control import ServerControl
from ark.event_handler import EventHandler
class Task_CheckForUpdates(Scheduler):
@staticmethod
def run():
if ServerControl.new_version() is True:
EventHandler.triggerEvent(EventHandler.E_NEW_ARK_VERSION)
else:
debug_out('No server update available',level=3)
|
{
"content_hash": "70a75f84ade020977e33323fc2976df4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 69,
"avg_line_length": 34.166666666666664,
"alnum_prop": 0.7219512195121951,
"repo_name": "f4ble/pyarc",
"id": "8a28e6aa3ed196745635761eab85e22ee0eb2bc6",
"size": "410",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ark/tasks/task_check_for_update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "133175"
}
],
"symlink_target": ""
}
|
import json
f = open("test.json", "r")
f_out=open("analized.json", "w")
myData = json.load(f)
'''
#Parsing para valores concretos de un atributo
for line in myData:
if line["context"]["org_id"]!="":
#print json.dumps(line, sort_keys=True,indent=4, separators=(',', ': '))
json.dump(line,f_out)
#Parsing en caso de que exista un objeto (context en este caso)
for line in myData:
if "username" in line :
print json.dumps(line, sort_keys=True,indent=4, separators=(',', ': '))
json.dump(line,f_out)
#Parsing para existencia de atributos dentro de objetos
for line in myData:
try:
if line["context"]["path"]:
# print json.dumps(line, sort_keys=True,indent=4, separators=(',', ': '))
json.dump(line,f_out)
#print "json con valor"
except KeyError:
print "1"
'''
#parsing para existencia de objetos en objetos
for line in myData:
if ["context"][0]["course_user_tag"]!={}:
# print json.dumps(line, sort_keys=True,indent=4, separators=(',', ': '))
json.dump(line,f_out)
#print "json con valor"
|
{
"content_hash": "ba27328232d29fb885112ba4b2751920",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 75,
"avg_line_length": 21.875,
"alnum_prop": 0.6447619047619048,
"repo_name": "Farmijo/TFG-Dashboard",
"id": "61e67c9a062238cc016bea5566af3483869d04e9",
"size": "1050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1eros ejemplos/ex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6817"
},
{
"name": "HTML",
"bytes": "1265744"
},
{
"name": "JavaScript",
"bytes": "92362"
},
{
"name": "Python",
"bytes": "26432"
}
],
"symlink_target": ""
}
|
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, image_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"imageName": _SERIALIZER.url("image_name", image_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(resource_group_name: str, image_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"imageName": _SERIALIZER.url("image_name", image_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(resource_group_name: str, image_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"imageName": _SERIALIZER.url("image_name", image_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_request(
resource_group_name: str, image_name: str, subscription_id: str, *, expand: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"imageName": _SERIALIZER.url("image_name", image_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class ImagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2018_04_01.ComputeManagementClient`'s
:attr:`images` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self, resource_group_name: str, image_name: str, parameters: Union[_models.Image, IO], **kwargs: Any
) -> _models.Image:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Image]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Image")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Image", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
image_name: str,
parameters: _models.Image,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Image]:
"""Create or update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation. Required.
:type parameters: ~azure.mgmt.compute.v2018_04_01.models.Image
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2018_04_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
image_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Image]:
"""Create or update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2018_04_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self, resource_group_name: str, image_name: str, parameters: Union[_models.Image, IO], **kwargs: Any
) -> LROPoller[_models.Image]:
"""Create or update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2018_04_01.models.Image or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2018_04_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Image]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
def _update_initial(
self, resource_group_name: str, image_name: str, parameters: Union[_models.ImageUpdate, IO], **kwargs: Any
) -> _models.Image:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Image]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ImageUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Image", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
@overload
def begin_update(
self,
resource_group_name: str,
image_name: str,
parameters: _models.ImageUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Image]:
"""Update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Update Image operation. Required.
:type parameters: ~azure.mgmt.compute.v2018_04_01.models.ImageUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2018_04_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
image_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Image]:
"""Update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Update Image operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2018_04_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self, resource_group_name: str, image_name: str, parameters: Union[_models.ImageUpdate, IO], **kwargs: Any
) -> LROPoller[_models.Image]:
"""Update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Update Image operation. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2018_04_01.models.ImageUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2018_04_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Image]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial( # type: ignore
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, image_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
@distributed_trace
def begin_delete(self, resource_group_name: str, image_name: str, **kwargs: Any) -> LROPoller[None]:
"""Deletes an Image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
image_name=image_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
@distributed_trace
def get(
self, resource_group_name: str, image_name: str, expand: Optional[str] = None, **kwargs: Any
) -> _models.Image:
"""Gets an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_04_01.models.Image
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.Image]
request = build_get_request(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Image"]:
"""Gets the list of images under a resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Image or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_04_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ImageListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ImageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images"} # type: ignore
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Image"]:
"""Gets the list of Images in the subscription. Use nextLink property in the response to get the
next page of Images. Do this till nextLink is null to fetch all the Images.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Image or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_04_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-04-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ImageListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ImageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images"} # type: ignore
|
{
"content_hash": "442d0c2ea1847f6983b35a9b245091e8",
"timestamp": "",
"source": "github",
"line_count": 966,
"max_line_length": 181,
"avg_line_length": 46.5175983436853,
"alnum_prop": 0.6356150970268827,
"repo_name": "Azure/azure-sdk-for-python",
"id": "216b39ec33f54ba8580aa85c9431b9945cac3908",
"size": "45436",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/_images_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Adadelta for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Adadelta')
class Adadelta(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adadelta algorithm.
Adadelta optimization is a stochastic gradient descent method that is based on
adaptive learning rate per dimension to address two drawbacks:
1) the continual decay of learning rates throughout training
2) the need for a manually selected global learning rate
Two accumulation steps are required:
1) the accumulation of gradients squared,
2) the accumulation of updates squared.
Initialization:
$$E[g^2]_0 := 0 \text{(Initialize gradient 2nd order moment vector)}$$
$$E[\Delta x^2]_0 := 0 \text{(Initialize 2nd order variable update)}$$
$$t := t + 1$$
$$E[g^2]_t := \rho * E[g^2]_{t-1} + (1 - \rho) * g^2$$
$$\Delta x_t = -RMS[\Delta x]_{t-1} * g_t / RMS[g]_t$$
$$E[\Delta x^2]_t := \rho * E[\Delta x^2]_{t-1} + (1 - \rho) * \Delta x_t^2$$
$$x_t := x_{t-1} + \Delta x_{t}
References
See [M. D. Zeiler](http://arxiv.org/abs/1212.5701)
([pdf](http://arxiv.org/pdf/1212.5701v1.pdf))
"""
def __init__(self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
name='Adadelta',
**kwargs):
"""Construct a new Adadelta optimizer.
Adadelta is a more robust extension of Adagrad that adapts learning rates
based on a moving window of gradient updates, instead of accumulating all
past gradients. This way, Adadelta continues learning even when many updates
have been done. Compared to Adagrad, in the original version of Adadelta you
don't have to set an initial learning rate. In this version, initial
learning rate can be set, as in most other Keras optimizers.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
To match the exact form in the original paper use 1.0.
rho: A `Tensor` or a floating point value. The decay rate.
epsilon: A `Tensor` or a floating point value. A constant epsilon used
to better conditioning the grad update.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adadelta".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
@compatibility(eager)
When eager execution is enabled, `learning_rate`, `rho`, and `epsilon` can
each be a callable that takes no arguments and returns the actual value to
use. This can be useful for changing these values across different
invocations of optimizer functions.
@end_compatibility
"""
if epsilon is None:
epsilon = backend_config.epsilon()
super(Adadelta, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('rho', rho)
self._set_hyper('epsilon', epsilon)
def _create_slots(self, var_list):
# Separate for-loops to respect the ordering of slot variables from v1.
for v in var_list:
self.add_slot(v, 'accum_grad')
for v in var_list:
self.add_slot(v, 'accum_var')
def set_weights(self, weights):
params = self.weights
# Override set_weights for backward compatibility of Keras V1 optimizer
# since it does not include iteration at head of the weight list. Set
# iteration to 0.
if len(params) == len(weights) + 1:
weights = [np.array(0)] + weights
super(Adadelta, self).set_weights(weights)
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
accum_grad = self.get_slot(var, 'accum_grad')
accum_var = self.get_slot(var, 'accum_var')
return training_ops.resource_apply_adadelta(
var.handle,
accum_grad.handle,
accum_var.handle,
lr_t,
self._get_hyper('rho', var_dtype),
self._get_hyper('epsilon', var_dtype),
grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
accum_grad = self.get_slot(var, 'accum_grad')
accum_var = self.get_slot(var, 'accum_var')
return training_ops.resource_sparse_apply_adadelta(
var.handle,
accum_grad.handle,
accum_var.handle,
lr_t,
self._get_hyper('rho', var_dtype),
self._get_hyper('epsilon', var_dtype),
grad,
indices,
use_locking=self._use_locking)
def get_config(self):
config = super(Adadelta, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'rho': self._serialize_hyperparameter('rho'),
'epsilon': self._serialize_hyperparameter('epsilon'),
})
return config
|
{
"content_hash": "fdd0fab30f805fefa63e214dd69aa1fa",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 80,
"avg_line_length": 39.29370629370629,
"alnum_prop": 0.6611496707599217,
"repo_name": "kevin-coder/tensorflow-fork",
"id": "1ceb93328246f44525a17098fa5e8fc3eaf98611",
"size": "6309",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/optimizer_v2/adadelta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9117"
},
{
"name": "C",
"bytes": "340300"
},
{
"name": "C++",
"bytes": "39383425"
},
{
"name": "CMake",
"bytes": "194940"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33617202"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425910"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
import itertools
import functools
import operator
import ctypes
import numpy as np
from numba import _helperlib
from numba.core import config
Extent = namedtuple("Extent", ["begin", "end"])
attempt_nocopy_reshape = ctypes.CFUNCTYPE(
ctypes.c_int,
ctypes.c_long, # nd
np.ctypeslib.ndpointer(np.ctypeslib.c_intp, ndim=1), # dims
np.ctypeslib.ndpointer(np.ctypeslib.c_intp, ndim=1), # strides
ctypes.c_long, # newnd
np.ctypeslib.ndpointer(np.ctypeslib.c_intp, ndim=1), # newdims
np.ctypeslib.ndpointer(np.ctypeslib.c_intp, ndim=1), # newstrides
ctypes.c_long, # itemsize
ctypes.c_int, # is_f_order
)(_helperlib.c_helpers['attempt_nocopy_reshape'])
class Dim(object):
"""A single dimension of the array
Attributes
----------
start:
start offset
stop:
stop offset
size:
number of items
stride:
item stride
"""
__slots__ = 'start', 'stop', 'size', 'stride', 'single'
def __init__(self, start, stop, size, stride, single):
self.start = start
self.stop = stop
self.size = size
self.stride = stride
self.single = single
assert not single or size == 1
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = item.indices(self.size)
stride = step * self.stride
start = self.start + start * abs(self.stride)
stop = self.start + stop * abs(self.stride)
if stride == 0:
size = 1
else:
size = _compute_size(start, stop, stride)
ret = Dim(
start=start,
stop=stop,
size=size,
stride=stride,
single=False
)
return ret
else:
sliced = self[item:item + 1] if item != -1 else self[-1:]
if sliced.size != 1:
raise IndexError
return Dim(
start=sliced.start,
stop=sliced.stop,
size=sliced.size,
stride=sliced.stride,
single=True,
)
def get_offset(self, idx):
return self.start + idx * self.stride
def __repr__(self):
strfmt = "Dim(start=%s, stop=%s, size=%s, stride=%s)"
return strfmt % (self.start, self.stop, self.size, self.stride)
def normalize(self, base):
return Dim(start=self.start - base, stop=self.stop - base,
size=self.size, stride=self.stride, single=self.single)
def copy(self, start=None, stop=None, size=None, stride=None, single=None):
if start is None:
start = self.start
if stop is None:
stop = self.stop
if size is None:
size = self.size
if stride is None:
stride = self.stride
if single is None:
single = self.single
return Dim(start, stop, size, stride, single)
def is_contiguous(self, itemsize):
return self.stride == itemsize
def compute_index(indices, dims):
return sum(d.get_offset(i) for i, d in zip(indices, dims))
class Element(object):
is_array = False
def __init__(self, extent):
self.extent = extent
def iter_contiguous_extent(self):
yield self.extent
class Array(object):
"""A dummy numpy array-like object. Consider it an array without the
actual data, but offset from the base data pointer.
Attributes
----------
dims: tuple of Dim
describing each dimension of the array
ndim: int
number of dimension
shape: tuple of int
size of each dimension
strides: tuple of int
stride of each dimension
itemsize: int
itemsize
extent: (start, end)
start and end offset containing the memory region
"""
is_array = True
@classmethod
def from_desc(cls, offset, shape, strides, itemsize):
dims = []
for ashape, astride in zip(shape, strides):
dim = Dim(offset, offset + ashape * astride, ashape, astride,
single=False)
dims.append(dim)
offset = 0 # offset only applies to first dimension
return cls(dims, itemsize)
def __init__(self, dims, itemsize):
self.dims = tuple(dims)
self.ndim = len(self.dims)
self.shape = tuple(dim.size for dim in self.dims)
self.strides = tuple(dim.stride for dim in self.dims)
self.itemsize = itemsize
self.size = functools.reduce(operator.mul, self.shape, 1)
self.extent = self._compute_extent()
if config.NPY_RELAXED_STRIDES_CHECKING:
self.flags = self._relaxed_compute_layout()
else:
self.flags = self._compute_layout()
def _relaxed_compute_layout(self):
# The logic here is based on that in _UpdateContiguousFlags from
# numpy/core/src/multiarray/flagsobject.c in NumPy v1.19.1 (commit
# 13661ac70).
# https://github.com/numpy/numpy/blob/maintenance/1.19.x/numpy/core/src/multiarray/flagsobject.c#L123-L191
# Records have no dims, and we can treat them as contiguous
if not self.dims:
return {'C_CONTIGUOUS': True, 'F_CONTIGUOUS': True}
# If this is a broadcast array then it is not contiguous
if any([dim.stride == 0 for dim in self.dims]):
return {'C_CONTIGUOUS': False, 'F_CONTIGUOUS': False}
flags = {'C_CONTIGUOUS': True, 'F_CONTIGUOUS': True}
# Check C contiguity
sd = self.itemsize
for dim in reversed(self.dims):
if dim.size == 0:
# Contiguous by definition
return {'C_CONTIGUOUS': True, 'F_CONTIGUOUS': True}
if dim.size != 1:
if dim.stride != sd:
flags['C_CONTIGUOUS'] = False
sd *= dim.size
# Check F contiguity
sd = self.itemsize
for dim in self.dims:
if dim.size != 1:
if dim.stride != sd:
flags['F_CONTIGUOUS'] = False
return flags
sd *= dim.size
return flags
def _compute_layout(self):
flags = {}
if not self.dims:
# Records have no dims, and we can treat them as contiguous
flags['F_CONTIGUOUS'] = True
flags['C_CONTIGUOUS'] = True
return flags
leftmost = self.dims[0].is_contiguous(self.itemsize)
rightmost = self.dims[-1].is_contiguous(self.itemsize)
def is_contig(traverse):
last = next(traverse)
for dim in traverse:
if last.size != 0 and last.size * last.stride != dim.stride:
return False
last = dim
return True
flags['F_CONTIGUOUS'] = leftmost and is_contig(iter(self.dims))
flags['C_CONTIGUOUS'] = rightmost and is_contig(reversed(self.dims))
return flags
def _compute_extent(self):
firstidx = [0] * self.ndim
lastidx = [s - 1 for s in self.shape]
start = compute_index(firstidx, self.dims)
stop = compute_index(lastidx, self.dims) + self.itemsize
stop = max(stop, start) # ensure positive extent
return Extent(start, stop)
def __repr__(self):
return '<Array dims=%s itemsize=%s>' % (self.dims, self.itemsize)
def __getitem__(self, item):
if not isinstance(item, tuple):
item = [item]
else:
item = list(item)
nitem = len(item)
ndim = len(self.dims)
if nitem > ndim:
raise IndexError("%d extra indices given" % (nitem - ndim,))
# Add empty slices for missing indices
while len(item) < ndim:
item.append(slice(None, None))
dims = [dim.__getitem__(it) for dim, it in zip(self.dims, item)]
newshape = [d.size for d in dims if not d.single]
arr = Array(dims, self.itemsize)
if newshape:
return arr.reshape(*newshape)[0]
else:
return Element(arr.extent)
@property
def is_c_contig(self):
return self.flags['C_CONTIGUOUS']
@property
def is_f_contig(self):
return self.flags['F_CONTIGUOUS']
def iter_contiguous_extent(self):
""" Generates extents
"""
if self.is_c_contig or self.is_f_contig:
yield self.extent
else:
if self.dims[0].stride < self.dims[-1].stride:
innerdim = self.dims[0]
outerdims = self.dims[1:]
outershape = self.shape[1:]
else:
innerdim = self.dims[-1]
outerdims = self.dims[:-1]
outershape = self.shape[:-1]
if innerdim.is_contiguous(self.itemsize):
oslen = [range(s) for s in outershape]
for indices in itertools.product(*oslen):
base = compute_index(indices, outerdims)
yield base + innerdim.start, base + innerdim.stop
else:
oslen = [range(s) for s in self.shape]
for indices in itertools.product(*oslen):
offset = compute_index(indices, self.dims)
yield offset, offset + self.itemsize
def reshape(self, *newdims, **kws):
oldnd = self.ndim
newnd = len(newdims)
if newdims == self.shape:
return self, None
order = kws.pop('order', 'C')
if kws:
raise TypeError('unknown keyword arguments %s' % kws.keys())
if order not in 'CFA':
raise ValueError('order not C|F|A')
# check for exactly one instance of -1 in newdims
# https://github.com/numpy/numpy/blob/623bc1fae1d47df24e7f1e29321d0c0ba2771ce0/numpy/core/src/multiarray/shape.c#L470-L515 # noqa: E501
unknownidx = -1
knownsize = 1
for i, dim in enumerate(newdims):
if dim < 0:
if unknownidx == -1:
unknownidx = i
else:
raise ValueError("can only specify one unknown dimension")
else:
knownsize *= dim
# compute the missing dimension
if unknownidx >= 0:
if knownsize == 0 or self.size % knownsize != 0:
raise ValueError("cannot infer valid shape for unknown dimension")
else:
newdims = newdims[0:unknownidx] \
+ (self.size // knownsize,) \
+ newdims[unknownidx + 1:]
newsize = functools.reduce(operator.mul, newdims, 1)
if order == 'A':
order = 'F' if self.is_f_contig else 'C'
if newsize != self.size:
raise ValueError("reshape changes the size of the array")
if self.is_c_contig or self.is_f_contig:
if order == 'C':
newstrides = list(iter_strides_c_contig(self, newdims))
elif order == 'F':
newstrides = list(iter_strides_f_contig(self, newdims))
else:
raise AssertionError("unreachable")
else:
newstrides = np.empty(newnd, np.ctypeslib.c_intp)
# need to keep these around in variables, not temporaries, so they
# don't get GC'ed before we call into the C code
olddims = np.array(self.shape, dtype=np.ctypeslib.c_intp)
oldstrides = np.array(self.strides, dtype=np.ctypeslib.c_intp)
newdims = np.array(newdims, dtype=np.ctypeslib.c_intp)
if not attempt_nocopy_reshape(
oldnd,
olddims,
oldstrides,
newnd,
newdims,
newstrides,
self.itemsize,
order == 'F',
):
raise NotImplementedError('reshape would require copy')
ret = self.from_desc(self.extent.begin, shape=newdims,
strides=newstrides, itemsize=self.itemsize)
return ret, list(self.iter_contiguous_extent())
def squeeze(self, axis=None):
newshape, newstrides = [], []
if axis is None:
for length, stride in zip(self.shape, self.strides):
if length != 1:
newshape.append(length)
newstrides.append(stride)
else:
if not isinstance(axis, tuple):
axis = (axis,)
for ax in axis:
if self.shape[ax] != 1:
raise ValueError(
"cannot select an axis to squeeze out which has size not equal "
"to one"
)
for i, (length, stride) in enumerate(zip(self.shape, self.strides)):
if i not in axis:
newshape.append(length)
newstrides.append(stride)
newarr = self.from_desc(
self.extent.begin,
shape=newshape,
strides=newstrides,
itemsize=self.itemsize,
)
return newarr, list(self.iter_contiguous_extent())
def ravel(self, order='C'):
if order not in 'CFA':
raise ValueError('order not C|F|A')
if self.ndim <= 1:
return self
elif (order in 'CA' and self.is_c_contig or
order in 'FA' and self.is_f_contig):
newshape = (self.size,)
newstrides = (self.itemsize,)
arr = self.from_desc(self.extent.begin, newshape, newstrides,
self.itemsize)
return arr, list(self.iter_contiguous_extent())
else:
raise NotImplementedError("ravel on non-contiguous array")
def iter_strides_f_contig(arr, shape=None):
"""yields the f-contiguous strides
"""
shape = arr.shape if shape is None else shape
itemsize = arr.itemsize
yield itemsize
sum = 1
for s in shape[:-1]:
sum *= s
yield sum * itemsize
def iter_strides_c_contig(arr, shape=None):
"""yields the c-contiguous strides
"""
shape = arr.shape if shape is None else shape
itemsize = arr.itemsize
def gen():
yield itemsize
sum = 1
for s in reversed(shape[1:]):
sum *= s
yield sum * itemsize
for i in reversed(list(gen())):
yield i
def is_element_indexing(item, ndim):
if isinstance(item, slice):
return False
elif isinstance(item, tuple):
if len(item) == ndim:
if not any(isinstance(it, slice) for it in item):
return True
else:
return True
return False
def _compute_size(start, stop, step):
"""Algorithm adapted from cpython rangeobject.c
"""
if step > 0:
lo = start
hi = stop
else:
lo = stop
hi = start
step = -step
if lo >= hi:
return 0
return (hi - lo - 1) // step + 1
|
{
"content_hash": "9634927644f590a825da3bc3bb1bef76",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 145,
"avg_line_length": 31.51345755693582,
"alnum_prop": 0.5418829249063793,
"repo_name": "stonebig/numba",
"id": "6239e93756219f27d76018ab3f3c5d88fc67e0b9",
"size": "15221",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numba/misc/dummyarray.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "228078"
},
{
"name": "C++",
"bytes": "18847"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "2965893"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
"""Test Home Assistant template helper methods."""
from datetime import datetime, timedelta
import logging
import math
import random
from unittest.mock import patch
from freezegun import freeze_time
import pytest
import voluptuous as vol
from homeassistant.components import group
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
LENGTH_MILLIMETERS,
MASS_GRAMS,
PRESSURE_PA,
SPEED_KILOMETERS_PER_HOUR,
TEMP_CELSIUS,
VOLUME_LITERS,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import device_registry as dr, entity, template
from homeassistant.helpers.entity_platform import EntityPlatform
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import UnitSystem
from tests.common import (
MockConfigEntry,
mock_area_registry,
mock_device_registry,
mock_registry,
)
def _set_up_units(hass):
"""Set up the tests."""
hass.config.units = UnitSystem(
"custom",
TEMP_CELSIUS,
LENGTH_METERS,
SPEED_KILOMETERS_PER_HOUR,
VOLUME_LITERS,
MASS_GRAMS,
PRESSURE_PA,
LENGTH_MILLIMETERS,
)
def render(hass, template_str, variables=None):
"""Create render info from template."""
tmp = template.Template(template_str, hass)
return tmp.async_render(variables)
def render_to_info(hass, template_str, variables=None):
"""Create render info from template."""
tmp = template.Template(template_str, hass)
return tmp.async_render_to_info(variables)
def extract_entities(hass, template_str, variables=None):
"""Extract entities from a template."""
info = render_to_info(hass, template_str, variables)
return info.entities
def assert_result_info(info, result, entities=None, domains=None, all_states=False):
"""Check result info."""
assert info.result() == result
assert info.all_states == all_states
assert info.filter("invalid_entity_name.somewhere") == all_states
if entities is not None:
assert info.entities == frozenset(entities)
assert all([info.filter(entity) for entity in entities])
if not all_states:
assert not info.filter("invalid_entity_name.somewhere")
else:
assert not info.entities
if domains is not None:
assert info.domains == frozenset(domains)
assert all([info.filter(domain + ".entity") for domain in domains])
else:
assert not hasattr(info, "_domains")
def test_template_equality():
"""Test template comparison and hashing."""
template_one = template.Template("{{ template_one }}")
template_one_1 = template.Template("{{ template_one }}")
template_two = template.Template("{{ template_two }}")
assert template_one == template_one_1
assert template_one != template_two
assert hash(template_one) == hash(template_one_1)
assert hash(template_one) != hash(template_two)
assert str(template_one_1) == 'Template("{{ template_one }}")'
with pytest.raises(TypeError):
template.Template(["{{ template_one }}"])
def test_invalid_template(hass):
"""Invalid template raises error."""
tmpl = template.Template("{{", hass)
with pytest.raises(TemplateError):
tmpl.ensure_valid()
with pytest.raises(TemplateError):
tmpl.async_render()
info = tmpl.async_render_to_info()
with pytest.raises(TemplateError):
assert info.result() == "impossible"
tmpl = template.Template("{{states(keyword)}}", hass)
tmpl.ensure_valid()
with pytest.raises(TemplateError):
tmpl.async_render()
def test_referring_states_by_entity_id(hass):
"""Test referring states by entity id."""
hass.states.async_set("test.object", "happy")
assert (
template.Template("{{ states.test.object.state }}", hass).async_render()
== "happy"
)
assert (
template.Template('{{ states["test.object"].state }}', hass).async_render()
== "happy"
)
assert (
template.Template('{{ states("test.object") }}', hass).async_render() == "happy"
)
def test_invalid_entity_id(hass):
"""Test referring states by entity id."""
with pytest.raises(TemplateError):
template.Template('{{ states["big.fat..."] }}', hass).async_render()
with pytest.raises(TemplateError):
template.Template('{{ states.test["big.fat..."] }}', hass).async_render()
with pytest.raises(TemplateError):
template.Template('{{ states["invalid/domain"] }}', hass).async_render()
def test_raise_exception_on_error(hass):
"""Test raising an exception on error."""
with pytest.raises(TemplateError):
template.Template("{{ invalid_syntax").ensure_valid()
def test_iterating_all_states(hass):
"""Test iterating all states."""
tmpl_str = "{% for state in states %}{{ state.state }}{% endfor %}"
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "", all_states=True)
assert info.rate_limit == template.ALL_STATES_RATE_LIMIT
hass.states.async_set("test.object", "happy")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "10happy", entities=[], all_states=True)
def test_iterating_all_states_unavailable(hass):
"""Test iterating all states unavailable."""
hass.states.async_set("test.object", "on")
tmpl_str = "{{ states | selectattr('state', 'in', ['unavailable', 'unknown', 'none']) | list | count }}"
info = render_to_info(hass, tmpl_str)
assert info.all_states is True
assert info.rate_limit == template.ALL_STATES_RATE_LIMIT
hass.states.async_set("test.object", "unknown")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(info, 1, entities=[], all_states=True)
def test_iterating_domain_states(hass):
"""Test iterating domain states."""
tmpl_str = "{% for state in states.sensor %}{{ state.state }}{% endfor %}"
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "", domains=["sensor"])
assert info.rate_limit == template.DOMAIN_STATES_RATE_LIMIT
hass.states.async_set("test.object", "happy")
hass.states.async_set("sensor.back_door", "open")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(
info,
"open10",
entities=[],
domains=["sensor"],
)
def test_float_function(hass):
"""Test float function."""
hass.states.async_set("sensor.temperature", "12")
assert (
template.Template(
"{{ float(states.sensor.temperature.state) }}", hass
).async_render()
== 12.0
)
assert (
template.Template(
"{{ float(states.sensor.temperature.state) > 11 }}", hass
).async_render()
is True
)
assert (
template.Template("{{ float('forgiving') }}", hass).async_render()
== "forgiving"
)
assert render(hass, "{{ float('bad', 1) }}") == 1
assert render(hass, "{{ float('bad', default=1) }}") == 1
def test_float_filter(hass):
"""Test float filter."""
hass.states.async_set("sensor.temperature", "12")
assert render(hass, "{{ states.sensor.temperature.state | float }}") == 12.0
assert render(hass, "{{ states.sensor.temperature.state | float > 11 }}") is True
assert render(hass, "{{ 'bad' | float }}") == 0
assert render(hass, "{{ 'bad' | float(1) }}") == 1
assert render(hass, "{{ 'bad' | float(default=1) }}") == 1
def test_int_filter(hass):
"""Test int filter."""
hass.states.async_set("sensor.temperature", "12.2")
assert render(hass, "{{ states.sensor.temperature.state | int }}") == 12
assert render(hass, "{{ states.sensor.temperature.state | int > 11 }}") is True
hass.states.async_set("sensor.temperature", "0x10")
assert render(hass, "{{ states.sensor.temperature.state | int(base=16) }}") == 16
assert render(hass, "{{ 'bad' | int }}") == 0
assert render(hass, "{{ 'bad' | int(1) }}") == 1
assert render(hass, "{{ 'bad' | int(default=1) }}") == 1
def test_int_function(hass):
"""Test int filter."""
hass.states.async_set("sensor.temperature", "12.2")
assert render(hass, "{{ int(states.sensor.temperature.state) }}") == 12
assert render(hass, "{{ int(states.sensor.temperature.state) > 11 }}") is True
hass.states.async_set("sensor.temperature", "0x10")
assert render(hass, "{{ int(states.sensor.temperature.state, base=16) }}") == 16
assert render(hass, "{{ int('bad') }}") == "bad"
assert render(hass, "{{ int('bad', 1) }}") == 1
assert render(hass, "{{ int('bad', default=1) }}") == 1
@pytest.mark.parametrize(
"value, expected",
[
(0, True),
(0.0, True),
("0", True),
("0.0", True),
(True, True),
(False, True),
("True", False),
("False", False),
(None, False),
("None", False),
("horse", False),
(math.pi, True),
(math.nan, False),
(math.inf, False),
("nan", False),
("inf", False),
],
)
def test_isnumber(hass, value, expected):
"""Test is_number."""
assert (
template.Template("{{ is_number(value) }}", hass).async_render({"value": value})
== expected
)
assert (
template.Template("{{ value | is_number }}", hass).async_render(
{"value": value}
)
== expected
)
def test_rounding_value(hass):
"""Test rounding value."""
hass.states.async_set("sensor.temperature", 12.78)
assert (
template.Template(
"{{ states.sensor.temperature.state | round(1) }}", hass
).async_render()
== 12.8
)
assert (
template.Template(
"{{ states.sensor.temperature.state | multiply(10) | round }}", hass
).async_render()
== 128
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "floor") }}', hass
).async_render()
== 12.7
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "ceil") }}', hass
).async_render()
== 12.8
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "half") }}', hass
).async_render()
== 13.0
)
def test_rounding_value_on_error(hass):
"""Test rounding value handling of error."""
assert template.Template("{{ None | round }}", hass).async_render() is None
assert (
template.Template('{{ "no_number" | round }}', hass).async_render()
== "no_number"
)
# Test handling of default return value
assert render(hass, "{{ 'no_number' | round(default=1) }}") == 1
def test_multiply(hass):
"""Test multiply."""
tests = {None: None, 10: 100, '"abcd"': "abcd"}
for inp, out in tests.items():
assert (
template.Template(
"{{ %s | multiply(10) | round }}" % inp, hass
).async_render()
== out
)
# Test handling of default return value
assert render(hass, "{{ 'no_number' | multiply(10, 1) }}") == 1
assert render(hass, "{{ 'no_number' | multiply(10, default=1) }}") == 1
def test_logarithm(hass):
"""Test logarithm."""
tests = [
(4, 2, 2.0),
(1000, 10, 3.0),
(math.e, "", 1.0),
('"invalid"', "_", "invalid"),
(10, '"invalid"', 10.0),
]
for value, base, expected in tests:
assert (
template.Template(
f"{{{{ {value} | log({base}) | round(1) }}}}", hass
).async_render()
== expected
)
assert (
template.Template(
f"{{{{ log({value}, {base}) | round(1) }}}}", hass
).async_render()
== expected
)
# Test handling of default return value
assert render(hass, "{{ 'no_number' | log(10, 1) }}") == 1
assert render(hass, "{{ 'no_number' | log(10, default=1) }}") == 1
assert render(hass, "{{ log('no_number', 10, 1) }}") == 1
assert render(hass, "{{ log('no_number', 10, default=1) }}") == 1
def test_sine(hass):
"""Test sine."""
tests = [
(0, 0.0),
(math.pi / 2, 1.0),
(math.pi, 0.0),
(math.pi * 1.5, -1.0),
(math.pi / 10, 0.309),
('"duck"', "duck"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | sin | round(3) }}" % value, hass).async_render()
== expected
)
assert render(hass, f"{{{{ sin({value}) | round(3) }}}}") == expected
# Test handling of default return value
assert render(hass, "{{ 'no_number' | sin(1) }}") == 1
assert render(hass, "{{ 'no_number' | sin(default=1) }}") == 1
assert render(hass, "{{ sin('no_number', 1) }}") == 1
assert render(hass, "{{ sin('no_number', default=1) }}") == 1
def test_cos(hass):
"""Test cosine."""
tests = [
(0, 1.0),
(math.pi / 2, 0.0),
(math.pi, -1.0),
(math.pi * 1.5, -0.0),
(math.pi / 10, 0.951),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | cos | round(3) }}" % value, hass).async_render()
== expected
)
assert render(hass, f"{{{{ cos({value}) | round(3) }}}}") == expected
# Test handling of default return value
assert render(hass, "{{ 'no_number' | sin(1) }}") == 1
assert render(hass, "{{ 'no_number' | sin(default=1) }}") == 1
assert render(hass, "{{ sin('no_number', 1) }}") == 1
assert render(hass, "{{ sin('no_number', default=1) }}") == 1
def test_tan(hass):
"""Test tangent."""
tests = [
(0, 0.0),
(math.pi, -0.0),
(math.pi / 180 * 45, 1.0),
(math.pi / 180 * 90, "1.633123935319537e+16"),
(math.pi / 180 * 135, -1.0),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | tan | round(3) }}" % value, hass).async_render()
== expected
)
assert render(hass, f"{{{{ tan({value}) | round(3) }}}}") == expected
# Test handling of default return value
assert render(hass, "{{ 'no_number' | tan(1) }}") == 1
assert render(hass, "{{ 'no_number' | tan(default=1) }}") == 1
assert render(hass, "{{ tan('no_number', 1) }}") == 1
assert render(hass, "{{ tan('no_number', default=1) }}") == 1
def test_sqrt(hass):
"""Test square root."""
tests = [
(0, 0.0),
(1, 1.0),
(2, 1.414),
(10, 3.162),
(100, 10.0),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | sqrt | round(3) }}" % value, hass).async_render()
== expected
)
assert render(hass, f"{{{{ sqrt({value}) | round(3) }}}}") == expected
# Test handling of default return value
assert render(hass, "{{ 'no_number' | sqrt(1) }}") == 1
assert render(hass, "{{ 'no_number' | sqrt(default=1) }}") == 1
assert render(hass, "{{ sqrt('no_number', 1) }}") == 1
assert render(hass, "{{ sqrt('no_number', default=1) }}") == 1
def test_arc_sine(hass):
"""Test arcus sine."""
tests = [
(-2.0, -2.0), # value error
(-1.0, -1.571),
(-0.5, -0.524),
(0.0, 0.0),
(0.5, 0.524),
(1.0, 1.571),
(2.0, 2.0), # value error
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | asin | round(3) }}" % value, hass).async_render()
== expected
)
assert render(hass, f"{{{{ asin({value}) | round(3) }}}}") == expected
# Test handling of default return value
assert render(hass, "{{ 'no_number' | asin(1) }}") == 1
assert render(hass, "{{ 'no_number' | asin(default=1) }}") == 1
assert render(hass, "{{ asin('no_number', 1) }}") == 1
assert render(hass, "{{ asin('no_number', default=1) }}") == 1
def test_arc_cos(hass):
"""Test arcus cosine."""
tests = [
(-2.0, -2.0), # value error
(-1.0, 3.142),
(-0.5, 2.094),
(0.0, 1.571),
(0.5, 1.047),
(1.0, 0.0),
(2.0, 2.0), # value error
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | acos | round(3) }}" % value, hass).async_render()
== expected
)
assert render(hass, f"{{{{ acos({value}) | round(3) }}}}") == expected
# Test handling of default return value
assert render(hass, "{{ 'no_number' | acos(1) }}") == 1
assert render(hass, "{{ 'no_number' | acos(default=1) }}") == 1
assert render(hass, "{{ acos('no_number', 1) }}") == 1
assert render(hass, "{{ acos('no_number', default=1) }}") == 1
def test_arc_tan(hass):
"""Test arcus tangent."""
tests = [
(-10.0, -1.471),
(-2.0, -1.107),
(-1.0, -0.785),
(-0.5, -0.464),
(0.0, 0.0),
(0.5, 0.464),
(1.0, 0.785),
(2.0, 1.107),
(10.0, 1.471),
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | atan | round(3) }}" % value, hass).async_render()
== expected
)
assert render(hass, f"{{{{ atan({value}) | round(3) }}}}") == expected
# Test handling of default return value
assert render(hass, "{{ 'no_number' | atan(1) }}") == 1
assert render(hass, "{{ 'no_number' | atan(default=1) }}") == 1
assert render(hass, "{{ atan('no_number', 1) }}") == 1
assert render(hass, "{{ atan('no_number', default=1) }}") == 1
def test_arc_tan2(hass):
"""Test two parameter version of arcus tangent."""
tests = [
(-10.0, -10.0, -2.356),
(-10.0, 0.0, -1.571),
(-10.0, 10.0, -0.785),
(0.0, -10.0, 3.142),
(0.0, 0.0, 0.0),
(0.0, 10.0, 0.0),
(10.0, -10.0, 2.356),
(10.0, 0.0, 1.571),
(10.0, 10.0, 0.785),
(-4.0, 3.0, -0.927),
(-1.0, 2.0, -0.464),
(2.0, 1.0, 1.107),
('"duck"', '"goose"', ("duck", "goose")),
]
for y, x, expected in tests:
assert (
template.Template(
f"{{{{ ({y}, {x}) | atan2 | round(3) }}}}", hass
).async_render()
== expected
)
assert (
template.Template(
f"{{{{ atan2({y}, {x}) | round(3) }}}}", hass
).async_render()
== expected
)
# Test handling of default return value
assert render(hass, "{{ ('duck', 'goose') | atan2(1) }}") == 1
assert render(hass, "{{ ('duck', 'goose') | atan2(default=1) }}") == 1
assert render(hass, "{{ atan2('duck', 'goose', 1) }}") == 1
assert render(hass, "{{ atan2('duck', 'goose', default=1) }}") == 1
def test_strptime(hass):
"""Test the parse timestamp method."""
tests = [
("2016-10-19 15:22:05.588122 UTC", "%Y-%m-%d %H:%M:%S.%f %Z", None),
("2016-10-19 15:22:05.588122+0100", "%Y-%m-%d %H:%M:%S.%f%z", None),
("2016-10-19 15:22:05.588122", "%Y-%m-%d %H:%M:%S.%f", None),
("2016-10-19", "%Y-%m-%d", None),
("2016", "%Y", None),
("15:22:05", "%H:%M:%S", None),
("1469119144", "%Y", 1469119144),
("invalid", "%Y", "invalid"),
]
for inp, fmt, expected in tests:
if expected is None:
expected = str(datetime.strptime(inp, fmt))
temp = f"{{{{ strptime('{inp}', '{fmt}') }}}}"
assert template.Template(temp, hass).async_render() == expected
# Test handling of default return value
assert render(hass, "{{ strptime('invalid', '%Y', 1) }}") == 1
assert render(hass, "{{ strptime('invalid', '%Y', default=1) }}") == 1
def test_timestamp_custom(hass):
"""Test the timestamps to custom filter."""
now = dt_util.utcnow()
tests = [
(None, None, None, None),
(1469119144, None, True, "2016-07-21 16:39:04"),
(1469119144, "%Y", True, 2016),
(1469119144, "invalid", True, "invalid"),
(dt_util.as_timestamp(now), None, False, now.strftime("%Y-%m-%d %H:%M:%S")),
]
for inp, fmt, local, out in tests:
if fmt:
fil = f"timestamp_custom('{fmt}')"
elif fmt and local:
fil = f"timestamp_custom('{fmt}', {local})"
else:
fil = "timestamp_custom"
assert template.Template(f"{{{{ {inp} | {fil} }}}}", hass).async_render() == out
# Test handling of default return value
assert render(hass, "{{ None | timestamp_custom('invalid', True, 1) }}") == 1
assert render(hass, "{{ None | timestamp_custom(default=1) }}") == 1
def test_timestamp_local(hass):
"""Test the timestamps to local filter."""
tests = {None: None, 1469119144: "2016-07-21T16:39:04+00:00"}
for inp, out in tests.items():
assert (
template.Template("{{ %s | timestamp_local }}" % inp, hass).async_render()
== out
)
# Test handling of default return value
assert render(hass, "{{ None | timestamp_local(1) }}") == 1
assert render(hass, "{{ None | timestamp_local(default=1) }}") == 1
@pytest.mark.parametrize(
"input",
(
"2021-06-03 13:00:00.000000+00:00",
"1986-07-09T12:00:00Z",
"2016-10-19 15:22:05.588122+0100",
"2016-10-19",
"2021-01-01 00:00:01",
"invalid",
),
)
def test_as_datetime(hass, input):
"""Test converting a timestamp string to a date object."""
expected = dt_util.parse_datetime(input)
if expected is not None:
expected = str(expected)
assert (
template.Template(f"{{{{ as_datetime('{input}') }}}}", hass).async_render()
== expected
)
assert (
template.Template(f"{{{{ '{input}' | as_datetime }}}}", hass).async_render()
== expected
)
def test_as_datetime_from_timestamp(hass):
"""Test converting a UNIX timestamp to a date object."""
tests = [
(1469119144, "2016-07-21 16:39:04+00:00"),
(1469119144.0, "2016-07-21 16:39:04+00:00"),
(-1, "1969-12-31 23:59:59+00:00"),
]
for input, output in tests:
# expected = dt_util.parse_datetime(input)
if output is not None:
output = str(output)
assert (
template.Template(f"{{{{ as_datetime({input}) }}}}", hass).async_render()
== output
)
assert (
template.Template(f"{{{{ {input} | as_datetime }}}}", hass).async_render()
== output
)
assert (
template.Template(f"{{{{ as_datetime('{input}') }}}}", hass).async_render()
== output
)
assert (
template.Template(f"{{{{ '{input}' | as_datetime }}}}", hass).async_render()
== output
)
def test_as_local(hass):
"""Test converting time to local."""
hass.states.async_set("test.object", "available")
last_updated = hass.states.get("test.object").last_updated
assert template.Template(
"{{ as_local(states.test.object.last_updated) }}", hass
).async_render() == str(dt_util.as_local(last_updated))
assert template.Template(
"{{ states.test.object.last_updated | as_local }}", hass
).async_render() == str(dt_util.as_local(last_updated))
def test_to_json(hass):
"""Test the object to JSON string filter."""
# Note that we're not testing the actual json.loads and json.dumps methods,
# only the filters, so we don't need to be exhaustive with our sample JSON.
expected_result = {"Foo": "Bar"}
actual_result = template.Template(
"{{ {'Foo': 'Bar'} | to_json }}", hass
).async_render()
assert actual_result == expected_result
def test_to_json_string(hass):
"""Test the object to JSON string filter."""
# Note that we're not testing the actual json.loads and json.dumps methods,
# only the filters, so we don't need to be exhaustive with our sample JSON.
actual_value_ascii = template.Template(
"{{ 'Bar ҝ éèà' | to_json }}", hass
).async_render()
assert actual_value_ascii == '"Bar \\u049d \\u00e9\\u00e8\\u00e0"'
actual_value = template.Template(
"{{ 'Bar ҝ éèà' | to_json(ensure_ascii=False) }}", hass
).async_render()
assert actual_value == '"Bar ҝ éèà"'
def test_from_json(hass):
"""Test the JSON string to object filter."""
# Note that we're not testing the actual json.loads and json.dumps methods,
# only the filters, so we don't need to be exhaustive with our sample JSON.
expected_result = "Bar"
actual_result = template.Template(
'{{ (\'{"Foo": "Bar"}\' | from_json).Foo }}', hass
).async_render()
assert actual_result == expected_result
def test_average(hass):
"""Test the average filter."""
assert template.Template("{{ [1, 2, 3] | average }}", hass).async_render() == 2
assert template.Template("{{ average([1, 2, 3]) }}", hass).async_render() == 2
assert template.Template("{{ average(1, 2, 3) }}", hass).async_render() == 2
with pytest.raises(TemplateError):
template.Template("{{ 1 | average }}", hass).async_render()
with pytest.raises(TemplateError):
template.Template("{{ average() }}", hass).async_render()
def test_min(hass):
"""Test the min filter."""
assert template.Template("{{ [1, 2, 3] | min }}", hass).async_render() == 1
assert template.Template("{{ min([1, 2, 3]) }}", hass).async_render() == 1
assert template.Template("{{ min(1, 2, 3) }}", hass).async_render() == 1
def test_max(hass):
"""Test the max filter."""
assert template.Template("{{ [1, 2, 3] | max }}", hass).async_render() == 3
assert template.Template("{{ max([1, 2, 3]) }}", hass).async_render() == 3
assert template.Template("{{ max(1, 2, 3) }}", hass).async_render() == 3
def test_ord(hass):
"""Test the ord filter."""
assert template.Template('{{ "d" | ord }}', hass).async_render() == 100
def test_base64_encode(hass):
"""Test the base64_encode filter."""
assert (
template.Template('{{ "homeassistant" | base64_encode }}', hass).async_render()
== "aG9tZWFzc2lzdGFudA=="
)
def test_base64_decode(hass):
"""Test the base64_decode filter."""
assert (
template.Template(
'{{ "aG9tZWFzc2lzdGFudA==" | base64_decode }}', hass
).async_render()
== "homeassistant"
)
def test_ordinal(hass):
"""Test the ordinal filter."""
tests = [
(1, "1st"),
(2, "2nd"),
(3, "3rd"),
(4, "4th"),
(5, "5th"),
(12, "12th"),
(100, "100th"),
(101, "101st"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | ordinal }}" % value, hass).async_render()
== expected
)
def test_timestamp_utc(hass):
"""Test the timestamps to local filter."""
now = dt_util.utcnow()
tests = {
None: None,
1469119144: "2016-07-21T16:39:04+00:00",
dt_util.as_timestamp(now): now.isoformat(),
}
for inp, out in tests.items():
assert (
template.Template("{{ %s | timestamp_utc }}" % inp, hass).async_render()
== out
)
# Test handling of default return value
assert render(hass, "{{ None | timestamp_utc(1) }}") == 1
assert render(hass, "{{ None | timestamp_utc(default=1) }}") == 1
def test_as_timestamp(hass):
"""Test the as_timestamp function."""
assert (
template.Template('{{ as_timestamp("invalid") }}', hass).async_render() is None
)
hass.mock = None
assert (
template.Template("{{ as_timestamp(states.mock) }}", hass).async_render()
is None
)
tpl = (
'{{ as_timestamp(strptime("2024-02-03T09:10:24+0000", '
'"%Y-%m-%dT%H:%M:%S%z")) }}'
)
assert template.Template(tpl, hass).async_render() == 1706951424.0
# Test handling of default return value
assert render(hass, "{{ 'invalid' | as_timestamp(1) }}") == 1
assert render(hass, "{{ 'invalid' | as_timestamp(default=1) }}") == 1
assert render(hass, "{{ as_timestamp('invalid', 1) }}") == 1
assert render(hass, "{{ as_timestamp('invalid', default=1) }}") == 1
@patch.object(random, "choice")
def test_random_every_time(test_choice, hass):
"""Ensure the random filter runs every time, not just once."""
tpl = template.Template("{{ [1,2] | random }}", hass)
test_choice.return_value = "foo"
assert tpl.async_render() == "foo"
test_choice.return_value = "bar"
assert tpl.async_render() == "bar"
def test_passing_vars_as_keywords(hass):
"""Test passing variables as keywords."""
assert template.Template("{{ hello }}", hass).async_render(hello=127) == 127
def test_passing_vars_as_vars(hass):
"""Test passing variables as variables."""
assert template.Template("{{ hello }}", hass).async_render({"hello": 127}) == 127
def test_passing_vars_as_list(hass):
"""Test passing variables as list."""
assert template.render_complex(
template.Template("{{ hello }}", hass), {"hello": ["foo", "bar"]}
) == ["foo", "bar"]
def test_passing_vars_as_list_element(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello[1] }}", hass), {"hello": ["foo", "bar"]}
)
== "bar"
)
def test_passing_vars_as_dict_element(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello.foo }}", hass), {"hello": {"foo": "bar"}}
)
== "bar"
)
def test_passing_vars_as_dict(hass):
"""Test passing variables as list."""
assert template.render_complex(
template.Template("{{ hello }}", hass), {"hello": {"foo": "bar"}}
) == {"foo": "bar"}
def test_render_with_possible_json_value_with_valid_json(hass):
"""Render with possible JSON value with valid JSON."""
tpl = template.Template("{{ value_json.hello }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == "world"
def test_render_with_possible_json_value_with_invalid_json(hass):
"""Render with possible JSON value with invalid JSON."""
tpl = template.Template("{{ value_json }}", hass)
assert tpl.async_render_with_possible_json_value("{ I AM NOT JSON }") == ""
def test_render_with_possible_json_value_with_template_error_value(hass):
"""Render with possible JSON value with template error value."""
tpl = template.Template("{{ non_existing.variable }}", hass)
assert tpl.async_render_with_possible_json_value("hello", "-") == "-"
def test_render_with_possible_json_value_with_missing_json_value(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.goodbye }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == ""
def test_render_with_possible_json_value_valid_with_is_defined(hass):
"""Render with possible JSON value with known JSON object."""
tpl = template.Template("{{ value_json.hello|is_defined }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == "world"
def test_render_with_possible_json_value_undefined_json(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.bye|is_defined }}", hass)
assert (
tpl.async_render_with_possible_json_value('{"hello": "world"}')
== '{"hello": "world"}'
)
def test_render_with_possible_json_value_undefined_json_error_value(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.bye|is_defined }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}', "") == ""
def test_render_with_possible_json_value_non_string_value(hass):
"""Render with possible JSON value with non-string value."""
tpl = template.Template(
"""
{{ strptime(value~'+0000', '%Y-%m-%d %H:%M:%S%z') }}
""",
hass,
)
value = datetime(2019, 1, 18, 12, 13, 14)
expected = str(value.replace(tzinfo=dt_util.UTC))
assert tpl.async_render_with_possible_json_value(value) == expected
def test_if_state_exists(hass):
"""Test if state exists works."""
hass.states.async_set("test.object", "available")
tpl = template.Template(
"{% if states.test.object %}exists{% else %}not exists{% endif %}", hass
)
assert tpl.async_render() == "exists"
def test_is_state(hass):
"""Test is_state method."""
hass.states.async_set("test.object", "available")
tpl = template.Template(
"""
{% if is_state("test.object", "available") %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ is_state("test.noobject", "available") }}
""",
hass,
)
assert tpl.async_render() is False
def test_is_state_attr(hass):
"""Test is_state_attr method."""
hass.states.async_set("test.object", "available", {"mode": "on"})
tpl = template.Template(
"""
{% if is_state_attr("test.object", "mode", "on") %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ is_state_attr("test.noobject", "mode", "on") }}
""",
hass,
)
assert tpl.async_render() is False
def test_state_attr(hass):
"""Test state_attr method."""
hass.states.async_set("test.object", "available", {"mode": "on"})
tpl = template.Template(
"""
{% if state_attr("test.object", "mode") == "on" %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ state_attr("test.noobject", "mode") == None }}
""",
hass,
)
assert tpl.async_render() is True
def test_states_function(hass):
"""Test using states as a function."""
hass.states.async_set("test.object", "available")
tpl = template.Template('{{ states("test.object") }}', hass)
assert tpl.async_render() == "available"
tpl2 = template.Template('{{ states("test.object2") }}', hass)
assert tpl2.async_render() == "unknown"
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_now(mock_is_safe, hass):
"""Test now method."""
now = dt_util.now()
with patch("homeassistant.util.dt.now", return_value=now):
info = template.Template("{{ now().isoformat() }}", hass).async_render_to_info()
assert now.isoformat() == info.result()
assert info.has_time is True
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_utcnow(mock_is_safe, hass):
"""Test now method."""
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
info = template.Template(
"{{ utcnow().isoformat() }}", hass
).async_render_to_info()
assert utcnow.isoformat() == info.result()
assert info.has_time is True
@pytest.mark.parametrize(
"now, expected, expected_midnight, timezone_str",
[
# Host clock in UTC
(
"2021-11-24 03:00:00+00:00",
"2021-11-23T10:00:00-08:00",
"2021-11-23T00:00:00-08:00",
"America/Los_Angeles",
),
# Host clock in local time
(
"2021-11-23 19:00:00-08:00",
"2021-11-23T10:00:00-08:00",
"2021-11-23T00:00:00-08:00",
"America/Los_Angeles",
),
],
)
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_today_at(mock_is_safe, hass, now, expected, expected_midnight, timezone_str):
"""Test today_at method."""
freezer = freeze_time(now)
freezer.start()
original_tz = dt_util.DEFAULT_TIME_ZONE
timezone = dt_util.get_time_zone(timezone_str)
dt_util.set_default_time_zone(timezone)
result = template.Template(
"{{ today_at('10:00').isoformat() }}",
hass,
).async_render()
assert result == expected
result = template.Template(
"{{ today_at('10:00:00').isoformat() }}",
hass,
).async_render()
assert result == expected
result = template.Template(
"{{ ('10:00:00' | today_at).isoformat() }}",
hass,
).async_render()
assert result == expected
result = template.Template(
"{{ today_at().isoformat() }}",
hass,
).async_render()
assert result == expected_midnight
with pytest.raises(TemplateError):
template.Template("{{ today_at('bad') }}", hass).async_render()
freezer.stop()
dt_util.set_default_time_zone(original_tz)
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_relative_time(mock_is_safe, hass):
"""Test relative_time method."""
now = datetime.strptime("2000-01-01 10:00:00 +00:00", "%Y-%m-%d %H:%M:%S %z")
with patch("homeassistant.util.dt.now", return_value=now):
result = template.Template(
'{{relative_time(strptime("2000-01-01 09:00:00", "%Y-%m-%d %H:%M:%S"))}}',
hass,
).async_render()
assert result == "1 hour"
result = template.Template(
'{{relative_time(strptime("2000-01-01 09:00:00 +01:00", "%Y-%m-%d %H:%M:%S %z"))}}',
hass,
).async_render()
assert result == "2 hours"
result = template.Template(
'{{relative_time(strptime("2000-01-01 03:00:00 -06:00", "%Y-%m-%d %H:%M:%S %z"))}}',
hass,
).async_render()
assert result == "1 hour"
result1 = str(
template.strptime("2000-01-01 11:00:00 +00:00", "%Y-%m-%d %H:%M:%S %z")
)
result2 = template.Template(
'{{relative_time(strptime("2000-01-01 11:00:00 +00:00", "%Y-%m-%d %H:%M:%S %z"))}}',
hass,
).async_render()
assert result1 == result2
result = template.Template(
'{{relative_time("string")}}',
hass,
).async_render()
assert result == "string"
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_timedelta(mock_is_safe, hass):
"""Test relative_time method."""
now = datetime.strptime("2000-01-01 10:00:00 +00:00", "%Y-%m-%d %H:%M:%S %z")
with patch("homeassistant.util.dt.now", return_value=now):
result = template.Template(
"{{timedelta(seconds=120)}}",
hass,
).async_render()
assert result == "0:02:00"
result = template.Template(
"{{timedelta(seconds=86400)}}",
hass,
).async_render()
assert result == "1 day, 0:00:00"
result = template.Template(
"{{timedelta(days=1, hours=4)}}", hass
).async_render()
assert result == "1 day, 4:00:00"
result = template.Template(
"{{relative_time(now() - timedelta(seconds=3600))}}",
hass,
).async_render()
assert result == "1 hour"
result = template.Template(
"{{relative_time(now() - timedelta(seconds=86400))}}",
hass,
).async_render()
assert result == "1 day"
result = template.Template(
"{{relative_time(now() - timedelta(seconds=86401))}}",
hass,
).async_render()
assert result == "1 day"
result = template.Template(
"{{relative_time(now() - timedelta(weeks=2, days=1))}}",
hass,
).async_render()
assert result == "15 days"
def test_regex_match(hass):
"""Test regex_match method."""
tpl = template.Template(
r"""
{{ '123-456-7890' | regex_match('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() is True
tpl = template.Template(
"""
{{ 'Home Assistant test' | regex_match('home', True) }}
""",
hass,
)
assert tpl.async_render() is True
tpl = template.Template(
"""
{{ 'Another Home Assistant test' | regex_match('Home') }}
""",
hass,
)
assert tpl.async_render() is False
tpl = template.Template(
"""
{{ ['Home Assistant test'] | regex_match('.*Assist') }}
""",
hass,
)
assert tpl.async_render() is True
def test_match_test(hass):
"""Test match test."""
tpl = template.Template(
r"""
{{ '123-456-7890' is match('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() is True
def test_regex_search(hass):
"""Test regex_search method."""
tpl = template.Template(
r"""
{{ '123-456-7890' | regex_search('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() is True
tpl = template.Template(
"""
{{ 'Home Assistant test' | regex_search('home', True) }}
""",
hass,
)
assert tpl.async_render() is True
tpl = template.Template(
"""
{{ 'Another Home Assistant test' | regex_search('Home') }}
""",
hass,
)
assert tpl.async_render() is True
tpl = template.Template(
"""
{{ ['Home Assistant test'] | regex_search('Assist') }}
""",
hass,
)
assert tpl.async_render() is True
def test_search_test(hass):
"""Test search test."""
tpl = template.Template(
r"""
{{ '123-456-7890' is search('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() is True
def test_regex_replace(hass):
"""Test regex_replace method."""
tpl = template.Template(
r"""
{{ 'Hello World' | regex_replace('(Hello\\s)',) }}
""",
hass,
)
assert tpl.async_render() == "World"
tpl = template.Template(
"""
{{ ['Home hinderant test'] | regex_replace('hinder', 'Assist') }}
""",
hass,
)
assert tpl.async_render() == ["Home Assistant test"]
def test_regex_findall(hass):
"""Test regex_findall method."""
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall('([A-Z]{3})') }}
""",
hass,
)
assert tpl.async_render() == ["JFK", "LHR"]
def test_regex_findall_index(hass):
"""Test regex_findall_index method."""
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 0) }}
""",
hass,
)
assert tpl.async_render() == "JFK"
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 1) }}
""",
hass,
)
assert tpl.async_render() == "LHR"
tpl = template.Template(
"""
{{ ['JFK', 'LHR'] | regex_findall_index('([A-Z]{3})', 1) }}
""",
hass,
)
assert tpl.async_render() == "LHR"
def test_bitwise_and(hass):
"""Test bitwise_and method."""
tpl = template.Template(
"""
{{ 8 | bitwise_and(8) }}
""",
hass,
)
assert tpl.async_render() == 8 & 8
tpl = template.Template(
"""
{{ 10 | bitwise_and(2) }}
""",
hass,
)
assert tpl.async_render() == 10 & 2
tpl = template.Template(
"""
{{ 8 | bitwise_and(2) }}
""",
hass,
)
assert tpl.async_render() == 8 & 2
def test_bitwise_or(hass):
"""Test bitwise_or method."""
tpl = template.Template(
"""
{{ 8 | bitwise_or(8) }}
""",
hass,
)
assert tpl.async_render() == 8 | 8
tpl = template.Template(
"""
{{ 10 | bitwise_or(2) }}
""",
hass,
)
assert tpl.async_render() == 10 | 2
tpl = template.Template(
"""
{{ 8 | bitwise_or(2) }}
""",
hass,
)
assert tpl.async_render() == 8 | 2
def test_pack(hass, caplog):
"""Test struct pack method."""
# render as filter
tpl = template.Template(
"""
{{ value | pack('>I') }}
""",
hass,
)
variables = {
"value": 0xDEADBEEF,
}
assert tpl.async_render(variables=variables) == b"\xde\xad\xbe\xef"
# render as function
tpl = template.Template(
"""
{{ pack(value, '>I') }}
""",
hass,
)
variables = {
"value": 0xDEADBEEF,
}
assert tpl.async_render(variables=variables) == b"\xde\xad\xbe\xef"
# test with None value
tpl = template.Template(
"""
{{ pack(value, '>I') }}
""",
hass,
)
variables = {
"value": None,
}
# "Template warning: 'pack' unable to pack object with type '%s' and format_string '%s' see https://docs.python.org/3/library/struct.html for more information"
assert tpl.async_render(variables=variables) is None
assert (
"Template warning: 'pack' unable to pack object 'None' with type 'NoneType' and format_string '>I' see https://docs.python.org/3/library/struct.html for more information"
in caplog.text
)
# test with invalid filter
tpl = template.Template(
"""
{{ pack(value, 'invalid filter') }}
""",
hass,
)
variables = {
"value": 0xDEADBEEF,
}
# "Template warning: 'pack' unable to pack object with type '%s' and format_string '%s' see https://docs.python.org/3/library/struct.html for more information"
assert tpl.async_render(variables=variables) is None
assert (
"Template warning: 'pack' unable to pack object '3735928559' with type 'int' and format_string 'invalid filter' see https://docs.python.org/3/library/struct.html for more information"
in caplog.text
)
def test_unpack(hass, caplog):
"""Test struct unpack method."""
# render as filter
tpl = template.Template(
"""
{{ value | unpack('>I') }}
""",
hass,
)
variables = {
"value": b"\xde\xad\xbe\xef",
}
assert tpl.async_render(variables=variables) == 0xDEADBEEF
# render as function
tpl = template.Template(
"""
{{ unpack(value, '>I') }}
""",
hass,
)
variables = {
"value": b"\xde\xad\xbe\xef",
}
assert tpl.async_render(variables=variables) == 0xDEADBEEF
# unpack with offset
tpl = template.Template(
"""
{{ unpack(value, '>H', offset=2) }}
""",
hass,
)
variables = {
"value": b"\xde\xad\xbe\xef",
}
assert tpl.async_render(variables=variables) == 0xBEEF
# test with an empty bytes object
tpl = template.Template(
"""
{{ unpack(value, '>I') }}
""",
hass,
)
variables = {
"value": b"",
}
assert tpl.async_render(variables=variables) is None
assert (
"Template warning: 'unpack' unable to unpack object 'b''' with format_string '>I' and offset 0 see https://docs.python.org/3/library/struct.html for more information"
in caplog.text
)
# test with invalid filter
tpl = template.Template(
"""
{{ unpack(value, 'invalid filter') }}
""",
hass,
)
variables = {
"value": b"",
}
assert tpl.async_render(variables=variables) is None
assert (
"Template warning: 'unpack' unable to unpack object 'b''' with format_string 'invalid filter' and offset 0 see https://docs.python.org/3/library/struct.html for more information"
in caplog.text
)
def test_distance_function_with_1_state(hass):
"""Test distance function with 1 state."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
tpl = template.Template("{{ distance(states.test.object) | round }}", hass)
assert tpl.async_render() == 187
def test_distance_function_with_2_states(hass):
"""Test distance function with 2 states."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
"{{ distance(states.test.object, states.test.object_2) | round }}", hass
)
assert tpl.async_render() == 187
def test_distance_function_with_1_coord(hass):
"""Test distance function with 1 coord."""
_set_up_units(hass)
tpl = template.Template('{{ distance("32.87336", "-117.22943") | round }}', hass)
assert tpl.async_render() == 187
def test_distance_function_with_2_coords(hass):
"""Test distance function with 2 coords."""
_set_up_units(hass)
assert (
template.Template(
'{{ distance("32.87336", "-117.22943", %s, %s) | round }}'
% (hass.config.latitude, hass.config.longitude),
hass,
).async_render()
== 187
)
def test_distance_function_with_1_state_1_coord(hass):
"""Test distance function with 1 state 1 coord."""
_set_up_units(hass)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("32.87336", "-117.22943", states.test.object_2) ' "| round }}",
hass,
)
assert tpl.async_render() == 187
tpl2 = template.Template(
'{{ distance(states.test.object_2, "32.87336", "-117.22943") ' "| round }}",
hass,
)
assert tpl2.async_render() == 187
def test_distance_function_return_none_if_invalid_state(hass):
"""Test distance function return None if invalid state."""
hass.states.async_set("test.object_2", "happy", {"latitude": 10})
tpl = template.Template("{{ distance(states.test.object_2) | round }}", hass)
assert tpl.async_render() is None
def test_distance_function_return_none_if_invalid_coord(hass):
"""Test distance function return None if invalid coord."""
assert (
template.Template('{{ distance("123", "abc") }}', hass).async_render() is None
)
assert template.Template('{{ distance("123") }}', hass).async_render() is None
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template('{{ distance("123", states.test_object_2) }}', hass)
assert tpl.async_render() is None
def test_distance_function_with_2_entity_ids(hass):
"""Test distance function with 2 entity ids."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("test.object", "test.object_2") | round }}', hass
)
assert tpl.async_render() == 187
def test_distance_function_with_1_entity_1_coord(hass):
"""Test distance function with 1 entity_id and 1 coord."""
_set_up_units(hass)
hass.states.async_set(
"test.object",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("test.object", "32.87336", "-117.22943") | round }}', hass
)
assert tpl.async_render() == 187
def test_closest_function_home_vs_domain(hass):
"""Test closest function home vs domain."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_test_domain.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert (
template.Template(
"{{ closest(states.test_domain).entity_id }}", hass
).async_render()
== "test_domain.object"
)
assert (
template.Template(
"{{ (states.test_domain | closest).entity_id }}", hass
).async_render()
== "test_domain.object"
)
def test_closest_function_home_vs_all_states(hass):
"""Test closest function home vs all states."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain_2.and_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert (
template.Template("{{ closest(states).entity_id }}", hass).async_render()
== "test_domain_2.and_closer"
)
assert (
template.Template("{{ (states | closest).entity_id }}", hass).async_render()
== "test_domain_2.and_closer"
)
async def test_closest_function_home_vs_group_entity_id(hass):
"""Test closest function home vs group entity id."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_in_group.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert await async_setup_component(hass, "group", {})
await hass.async_block_till_done()
await group.Group.async_create_group(hass, "location group", ["test_domain.object"])
info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}')
assert_result_info(
info, "test_domain.object", {"group.location_group", "test_domain.object"}
)
assert info.rate_limit is None
async def test_closest_function_home_vs_group_state(hass):
"""Test closest function home vs group state."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_in_group.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert await async_setup_component(hass, "group", {})
await hass.async_block_till_done()
await group.Group.async_create_group(hass, "location group", ["test_domain.object"])
info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}')
assert_result_info(
info, "test_domain.object", {"group.location_group", "test_domain.object"}
)
assert info.rate_limit is None
info = render_to_info(hass, "{{ closest(states.group.location_group).entity_id }}")
assert_result_info(
info, "test_domain.object", {"test_domain.object", "group.location_group"}
)
assert info.rate_limit is None
async def test_expand(hass):
"""Test expand function."""
info = render_to_info(hass, "{{ expand('test.object') }}")
assert_result_info(info, [], ["test.object"])
assert info.rate_limit is None
info = render_to_info(hass, "{{ expand(56) }}")
assert_result_info(info, [])
assert info.rate_limit is None
hass.states.async_set("test.object", "happy")
info = render_to_info(
hass, "{{ expand('test.object') | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "test.object", ["test.object"])
assert info.rate_limit is None
info = render_to_info(
hass,
"{{ expand('group.new_group') | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "", ["group.new_group"])
assert info.rate_limit is None
info = render_to_info(
hass, "{{ expand(states.group) | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "", [], ["group"])
assert info.rate_limit == template.DOMAIN_STATES_RATE_LIMIT
assert await async_setup_component(hass, "group", {})
await hass.async_block_till_done()
await group.Group.async_create_group(hass, "new group", ["test.object"])
info = render_to_info(
hass,
"{{ expand('group.new_group') | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", {"group.new_group", "test.object"})
assert info.rate_limit is None
info = render_to_info(
hass, "{{ expand(states.group) | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "test.object", {"test.object"}, ["group"])
assert info.rate_limit == template.DOMAIN_STATES_RATE_LIMIT
info = render_to_info(
hass,
"{{ expand('group.new_group', 'test.object')"
" | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", {"test.object", "group.new_group"})
info = render_to_info(
hass,
"{{ ['group.new_group', 'test.object'] | expand"
" | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", {"test.object", "group.new_group"})
assert info.rate_limit is None
hass.states.async_set("sensor.power_1", 0)
hass.states.async_set("sensor.power_2", 200.2)
hass.states.async_set("sensor.power_3", 400.4)
assert await async_setup_component(hass, "group", {})
await hass.async_block_till_done()
await group.Group.async_create_group(
hass, "power sensors", ["sensor.power_1", "sensor.power_2", "sensor.power_3"]
)
info = render_to_info(
hass,
"{{ states.group.power_sensors.attributes.entity_id | expand | map(attribute='state')|map('float')|sum }}",
)
assert_result_info(
info,
200.2 + 400.4,
{"group.power_sensors", "sensor.power_1", "sensor.power_2", "sensor.power_3"},
)
assert info.rate_limit is None
async def test_device_entities(hass):
"""Test device_entities function."""
config_entry = MockConfigEntry(domain="light")
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
# Test non existing device ids
info = render_to_info(hass, "{{ device_entities('abc123') }}")
assert_result_info(info, [])
assert info.rate_limit is None
info = render_to_info(hass, "{{ device_entities(56) }}")
assert_result_info(info, [])
assert info.rate_limit is None
# Test device without entities
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
info = render_to_info(hass, f"{{{{ device_entities('{device_entry.id}') }}}}")
assert_result_info(info, [])
assert info.rate_limit is None
# Test device with single entity, which has no state
entity_registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=config_entry,
device_id=device_entry.id,
)
info = render_to_info(hass, f"{{{{ device_entities('{device_entry.id}') }}}}")
assert_result_info(info, ["light.hue_5678"], [])
assert info.rate_limit is None
info = render_to_info(
hass,
f"{{{{ device_entities('{device_entry.id}') | expand | map(attribute='entity_id') | join(', ') }}}}",
)
assert_result_info(info, "", ["light.hue_5678"])
assert info.rate_limit is None
# Test device with single entity, with state
hass.states.async_set("light.hue_5678", "happy")
info = render_to_info(
hass,
f"{{{{ device_entities('{device_entry.id}') | expand | map(attribute='entity_id') | join(', ') }}}}",
)
assert_result_info(info, "light.hue_5678", ["light.hue_5678"])
assert info.rate_limit is None
# Test device with multiple entities, which have a state
entity_registry.async_get_or_create(
"light",
"hue",
"ABCD",
config_entry=config_entry,
device_id=device_entry.id,
)
hass.states.async_set("light.hue_abcd", "camper")
info = render_to_info(hass, f"{{{{ device_entities('{device_entry.id}') }}}}")
assert_result_info(info, ["light.hue_5678", "light.hue_abcd"], [])
assert info.rate_limit is None
info = render_to_info(
hass,
f"{{{{ device_entities('{device_entry.id}') | expand | map(attribute='entity_id') | join(', ') }}}}",
)
assert_result_info(
info, "light.hue_5678, light.hue_abcd", ["light.hue_5678", "light.hue_abcd"]
)
assert info.rate_limit is None
async def test_integration_entities(hass):
"""Test integration_entities function."""
entity_registry = mock_registry(hass)
# test entities for given config entry title
config_entry = MockConfigEntry(domain="mock", title="Mock bridge 2")
config_entry.add_to_hass(hass)
entity_entry = entity_registry.async_get_or_create(
"sensor", "mock", "test", config_entry=config_entry
)
info = render_to_info(hass, "{{ integration_entities('Mock bridge 2') }}")
assert_result_info(info, [entity_entry.entity_id])
assert info.rate_limit is None
# test integration entities not in entity registry
mock_entity = entity.Entity()
mock_entity.hass = hass
mock_entity.entity_id = "light.test_entity"
mock_entity.platform = EntityPlatform(
hass=hass,
logger=logging.getLogger(__name__),
domain="light",
platform_name="entryless_integration",
platform=None,
scan_interval=timedelta(seconds=30),
entity_namespace=None,
)
await mock_entity.async_internal_added_to_hass()
info = render_to_info(hass, "{{ integration_entities('entryless_integration') }}")
assert_result_info(info, ["light.test_entity"])
assert info.rate_limit is None
# Test non existing integration/entry title
info = render_to_info(hass, "{{ integration_entities('abc123') }}")
assert_result_info(info, [])
assert info.rate_limit is None
async def test_device_id(hass):
"""Test device_id function."""
config_entry = MockConfigEntry(domain="light")
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
model="test",
name="test",
)
entity_entry = entity_registry.async_get_or_create(
"sensor", "test", "test", suggested_object_id="test", device_id=device_entry.id
)
entity_entry_no_device = entity_registry.async_get_or_create(
"sensor", "test", "test_no_device", suggested_object_id="test"
)
info = render_to_info(hass, "{{ 'sensor.fail' | device_id }}")
assert_result_info(info, None)
assert info.rate_limit is None
info = render_to_info(hass, "{{ 56 | device_id }}")
assert_result_info(info, None)
info = render_to_info(hass, "{{ 'not_a_real_entity_id' | device_id }}")
assert_result_info(info, None)
info = render_to_info(
hass, f"{{{{ device_id('{entity_entry_no_device.entity_id}') }}}}"
)
assert_result_info(info, None)
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ device_id('{entity_entry.entity_id}') }}}}")
assert_result_info(info, device_entry.id)
assert info.rate_limit is None
info = render_to_info(hass, "{{ device_id('test') }}")
assert_result_info(info, device_entry.id)
assert info.rate_limit is None
async def test_device_attr(hass):
"""Test device_attr and is_device_attr functions."""
config_entry = MockConfigEntry(domain="light")
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
# Test non existing device ids (device_attr)
info = render_to_info(hass, "{{ device_attr('abc123', 'id') }}")
assert_result_info(info, None)
assert info.rate_limit is None
with pytest.raises(TemplateError):
info = render_to_info(hass, "{{ device_attr(56, 'id') }}")
assert_result_info(info, None)
# Test non existing device ids (is_device_attr)
info = render_to_info(hass, "{{ is_device_attr('abc123', 'id', 'test') }}")
assert_result_info(info, False)
assert info.rate_limit is None
with pytest.raises(TemplateError):
info = render_to_info(hass, "{{ is_device_attr(56, 'id', 'test') }}")
assert_result_info(info, False)
# Test non existing entity id (device_attr)
info = render_to_info(hass, "{{ device_attr('entity.test', 'id') }}")
assert_result_info(info, None)
assert info.rate_limit is None
# Test non existing entity id (is_device_attr)
info = render_to_info(hass, "{{ is_device_attr('entity.test', 'id', 'test') }}")
assert_result_info(info, False)
assert info.rate_limit is None
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
model="test",
)
entity_entry = entity_registry.async_get_or_create(
"sensor", "test", "test", suggested_object_id="test", device_id=device_entry.id
)
# Test non existent device attribute (device_attr)
info = render_to_info(
hass, f"{{{{ device_attr('{device_entry.id}', 'invalid_attr') }}}}"
)
assert_result_info(info, None)
assert info.rate_limit is None
# Test non existent device attribute (is_device_attr)
info = render_to_info(
hass, f"{{{{ is_device_attr('{device_entry.id}', 'invalid_attr', 'test') }}}}"
)
assert_result_info(info, False)
assert info.rate_limit is None
# Test None device attribute (device_attr)
info = render_to_info(
hass, f"{{{{ device_attr('{device_entry.id}', 'manufacturer') }}}}"
)
assert_result_info(info, None)
assert info.rate_limit is None
# Test None device attribute mismatch (is_device_attr)
info = render_to_info(
hass, f"{{{{ is_device_attr('{device_entry.id}', 'manufacturer', 'test') }}}}"
)
assert_result_info(info, False)
assert info.rate_limit is None
# Test None device attribute match (is_device_attr)
info = render_to_info(
hass, f"{{{{ is_device_attr('{device_entry.id}', 'manufacturer', None) }}}}"
)
assert_result_info(info, True)
assert info.rate_limit is None
# Test valid device attribute match (device_attr)
info = render_to_info(hass, f"{{{{ device_attr('{device_entry.id}', 'model') }}}}")
assert_result_info(info, "test")
assert info.rate_limit is None
# Test valid device attribute match (device_attr)
info = render_to_info(
hass, f"{{{{ device_attr('{entity_entry.entity_id}', 'model') }}}}"
)
assert_result_info(info, "test")
assert info.rate_limit is None
# Test valid device attribute mismatch (is_device_attr)
info = render_to_info(
hass, f"{{{{ is_device_attr('{device_entry.id}', 'model', 'fail') }}}}"
)
assert_result_info(info, False)
assert info.rate_limit is None
# Test valid device attribute match (is_device_attr)
info = render_to_info(
hass, f"{{{{ is_device_attr('{device_entry.id}', 'model', 'test') }}}}"
)
assert_result_info(info, True)
assert info.rate_limit is None
async def test_area_id(hass):
"""Test area_id function."""
config_entry = MockConfigEntry(domain="light")
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
area_registry = mock_area_registry(hass)
# Test non existing entity id
info = render_to_info(hass, "{{ area_id('sensor.fake') }}")
assert_result_info(info, None)
assert info.rate_limit is None
# Test non existing device id (hex value)
info = render_to_info(hass, "{{ area_id('123abc') }}")
assert_result_info(info, None)
assert info.rate_limit is None
# Test non existing area name
info = render_to_info(hass, "{{ area_id('fake area name') }}")
assert_result_info(info, None)
assert info.rate_limit is None
# Test wrong value type
info = render_to_info(hass, "{{ area_id(56) }}")
assert_result_info(info, None)
assert info.rate_limit is None
area_entry_entity_id = area_registry.async_get_or_create("sensor.fake")
# Test device with single entity, which has no area
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_entry = entity_registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=config_entry,
device_id=device_entry.id,
)
info = render_to_info(hass, f"{{{{ area_id('{device_entry.id}') }}}}")
assert_result_info(info, None)
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ area_id('{entity_entry.entity_id}') }}}}")
assert_result_info(info, None)
assert info.rate_limit is None
# Test device ID, entity ID and area name as input with area name that looks like
# a device ID. Try a filter too
area_entry_hex = area_registry.async_get_or_create("123abc")
device_entry = device_registry.async_update_device(
device_entry.id, area_id=area_entry_hex.id
)
entity_entry = entity_registry.async_update_entity(
entity_entry.entity_id, area_id=area_entry_hex.id
)
info = render_to_info(hass, f"{{{{ '{device_entry.id}' | area_id }}}}")
assert_result_info(info, area_entry_hex.id)
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ area_id('{entity_entry.entity_id}') }}}}")
assert_result_info(info, area_entry_hex.id)
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ area_id('{area_entry_hex.name}') }}}}")
assert_result_info(info, area_entry_hex.id)
assert info.rate_limit is None
# Test device ID, entity ID and area name as input with area name that looks like an
# entity ID
area_entry_entity_id = area_registry.async_get_or_create("sensor.fake")
device_entry = device_registry.async_update_device(
device_entry.id, area_id=area_entry_entity_id.id
)
entity_entry = entity_registry.async_update_entity(
entity_entry.entity_id, area_id=area_entry_entity_id.id
)
info = render_to_info(hass, f"{{{{ area_id('{device_entry.id}') }}}}")
assert_result_info(info, area_entry_entity_id.id)
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ area_id('{entity_entry.entity_id}') }}}}")
assert_result_info(info, area_entry_entity_id.id)
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ area_id('{area_entry_entity_id.name}') }}}}")
assert_result_info(info, area_entry_entity_id.id)
assert info.rate_limit is None
# Make sure that when entity doesn't have an area but its device does, that's what
# gets returned
entity_entry = entity_registry.async_update_entity(
entity_entry.entity_id, area_id=area_entry_entity_id.id
)
info = render_to_info(hass, f"{{{{ area_id('{entity_entry.entity_id}') }}}}")
assert_result_info(info, area_entry_entity_id.id)
assert info.rate_limit is None
async def test_area_name(hass):
"""Test area_name function."""
config_entry = MockConfigEntry(domain="light")
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
area_registry = mock_area_registry(hass)
# Test non existing entity id
info = render_to_info(hass, "{{ area_name('sensor.fake') }}")
assert_result_info(info, None)
assert info.rate_limit is None
# Test non existing device id (hex value)
info = render_to_info(hass, "{{ area_name('123abc') }}")
assert_result_info(info, None)
assert info.rate_limit is None
# Test non existing area id
info = render_to_info(hass, "{{ area_name('1234567890') }}")
assert_result_info(info, None)
assert info.rate_limit is None
# Test wrong value type
info = render_to_info(hass, "{{ area_name(56) }}")
assert_result_info(info, None)
assert info.rate_limit is None
# Test device with single entity, which has no area
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_entry = entity_registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=config_entry,
device_id=device_entry.id,
)
info = render_to_info(hass, f"{{{{ area_name('{device_entry.id}') }}}}")
assert_result_info(info, None)
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ area_name('{entity_entry.entity_id}') }}}}")
assert_result_info(info, None)
assert info.rate_limit is None
# Test device ID, entity ID and area id as input. Try a filter too
area_entry = area_registry.async_get_or_create("123abc")
device_entry = device_registry.async_update_device(
device_entry.id, area_id=area_entry.id
)
entity_entry = entity_registry.async_update_entity(
entity_entry.entity_id, area_id=area_entry.id
)
info = render_to_info(hass, f"{{{{ '{device_entry.id}' | area_name }}}}")
assert_result_info(info, area_entry.name)
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ area_name('{entity_entry.entity_id}') }}}}")
assert_result_info(info, area_entry.name)
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ area_name('{area_entry.id}') }}}}")
assert_result_info(info, area_entry.name)
assert info.rate_limit is None
# Make sure that when entity doesn't have an area but its device does, that's what
# gets returned
entity_entry = entity_registry.async_update_entity(
entity_entry.entity_id, area_id=None
)
info = render_to_info(hass, f"{{{{ area_name('{entity_entry.entity_id}') }}}}")
assert_result_info(info, area_entry.name)
assert info.rate_limit is None
async def test_area_entities(hass):
"""Test area_entities function."""
config_entry = MockConfigEntry(domain="light")
entity_registry = mock_registry(hass)
device_registry = mock_device_registry(hass)
area_registry = mock_area_registry(hass)
# Test non existing device id
info = render_to_info(hass, "{{ area_entities('deadbeef') }}")
assert_result_info(info, [])
assert info.rate_limit is None
# Test wrong value type
info = render_to_info(hass, "{{ area_entities(56) }}")
assert_result_info(info, [])
assert info.rate_limit is None
area_entry = area_registry.async_get_or_create("sensor.fake")
entity_registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=config_entry,
area_id=area_entry.id,
)
info = render_to_info(hass, f"{{{{ area_entities('{area_entry.id}') }}}}")
assert_result_info(info, ["light.hue_5678"])
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ '{area_entry.name}' | area_entities }}}}")
assert_result_info(info, ["light.hue_5678"])
assert info.rate_limit is None
# Test for entities that inherit area from device
device_entry = device_registry.async_get_or_create(
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
config_entry_id=config_entry.entry_id,
suggested_area="sensor.fake",
)
entity_registry.async_get_or_create(
"light",
"hue_light",
"5678",
config_entry=config_entry,
device_id=device_entry.id,
)
info = render_to_info(hass, f"{{{{ '{area_entry.name}' | area_entities }}}}")
assert_result_info(info, ["light.hue_5678", "light.hue_light_5678"])
assert info.rate_limit is None
async def test_area_devices(hass):
"""Test area_devices function."""
config_entry = MockConfigEntry(domain="light")
device_registry = mock_device_registry(hass)
area_registry = mock_area_registry(hass)
# Test non existing device id
info = render_to_info(hass, "{{ area_devices('deadbeef') }}")
assert_result_info(info, [])
assert info.rate_limit is None
# Test wrong value type
info = render_to_info(hass, "{{ area_devices(56) }}")
assert_result_info(info, [])
assert info.rate_limit is None
area_entry = area_registry.async_get_or_create("sensor.fake")
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
suggested_area=area_entry.name,
)
info = render_to_info(hass, f"{{{{ area_devices('{area_entry.id}') }}}}")
assert_result_info(info, [device_entry.id])
assert info.rate_limit is None
info = render_to_info(hass, f"{{{{ '{area_entry.name}' | area_devices }}}}")
assert_result_info(info, [device_entry.id])
assert info.rate_limit is None
def test_closest_function_to_coord(hass):
"""Test closest function to coord."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
tpl = template.Template(
'{{ closest("%s", %s, states.test_domain).entity_id }}'
% (hass.config.latitude + 0.3, hass.config.longitude + 0.3),
hass,
)
assert tpl.async_render() == "test_domain.closest_zone"
tpl = template.Template(
'{{ (states.test_domain | closest("%s", %s)).entity_id }}'
% (hass.config.latitude + 0.3, hass.config.longitude + 0.3),
hass,
)
assert tpl.async_render() == "test_domain.closest_zone"
def test_async_render_to_info_with_branching(hass):
"""Test async_render_to_info function by domain."""
hass.states.async_set("light.a", "off")
hass.states.async_set("light.b", "on")
hass.states.async_set("light.c", "off")
info = render_to_info(
hass,
"""
{% if states.light.a == "on" %}
{{ states.light.b.state }}
{% else %}
{{ states.light.c.state }}
{% endif %}
""",
)
assert_result_info(info, "off", {"light.a", "light.c"})
assert info.rate_limit is None
info = render_to_info(
hass,
"""
{% if states.light.a.state == "off" %}
{% set domain = "light" %}
{{ states[domain].b.state }}
{% endif %}
""",
)
assert_result_info(info, "on", {"light.a", "light.b"})
assert info.rate_limit is None
def test_async_render_to_info_with_complex_branching(hass):
"""Test async_render_to_info function by domain."""
hass.states.async_set("light.a", "off")
hass.states.async_set("light.b", "on")
hass.states.async_set("light.c", "off")
hass.states.async_set("vacuum.a", "off")
hass.states.async_set("device_tracker.a", "off")
hass.states.async_set("device_tracker.b", "off")
hass.states.async_set("lock.a", "off")
hass.states.async_set("sensor.a", "off")
hass.states.async_set("binary_sensor.a", "off")
info = render_to_info(
hass,
"""
{% set domain = "vacuum" %}
{% if states.light.a == "on" %}
{{ states.light.b.state }}
{% elif states.light.a == "on" %}
{{ states.device_tracker }}
{% elif states.light.a == "on" %}
{{ states[domain] | list }}
{% elif states('light.b') == "on" %}
{{ states[otherdomain] | map(attribute='entity_id') | list }}
{% elif states.light.a == "on" %}
{{ states["nonexist"] | list }}
{% else %}
else
{% endif %}
""",
{"otherdomain": "sensor"},
)
assert_result_info(info, ["sensor.a"], {"light.a", "light.b"}, {"sensor"})
assert info.rate_limit == template.DOMAIN_STATES_RATE_LIMIT
async def test_async_render_to_info_with_wildcard_matching_entity_id(hass):
"""Test tracking template with a wildcard."""
template_complex_str = r"""
{% for state in states.cover %}
{% if state.entity_id | regex_match('.*\.office_') %}
{{ state.entity_id }}={{ state.state }}
{% endif %}
{% endfor %}
"""
hass.states.async_set("cover.office_drapes", "closed")
hass.states.async_set("cover.office_window", "closed")
hass.states.async_set("cover.office_skylight", "open")
info = render_to_info(hass, template_complex_str)
assert info.domains == {"cover"}
assert info.entities == set()
assert info.all_states is False
assert info.rate_limit == template.DOMAIN_STATES_RATE_LIMIT
async def test_async_render_to_info_with_wildcard_matching_state(hass):
"""Test tracking template with a wildcard."""
template_complex_str = """
{% for state in states %}
{% if state.state | regex_match('ope.*') %}
{{ state.entity_id }}={{ state.state }}
{% endif %}
{% endfor %}
"""
hass.states.async_set("cover.office_drapes", "closed")
hass.states.async_set("cover.office_window", "closed")
hass.states.async_set("cover.office_skylight", "open")
hass.states.async_set("cover.x_skylight", "open")
hass.states.async_set("binary_sensor.door", "open")
await hass.async_block_till_done()
info = render_to_info(hass, template_complex_str)
assert not info.domains
assert info.entities == set()
assert info.all_states is True
assert info.rate_limit == template.ALL_STATES_RATE_LIMIT
hass.states.async_set("binary_sensor.door", "closed")
info = render_to_info(hass, template_complex_str)
assert not info.domains
assert info.entities == set()
assert info.all_states is True
assert info.rate_limit == template.ALL_STATES_RATE_LIMIT
template_cover_str = """
{% for state in states.cover %}
{% if state.state | regex_match('ope.*') %}
{{ state.entity_id }}={{ state.state }}
{% endif %}
{% endfor %}
"""
hass.states.async_set("cover.x_skylight", "closed")
info = render_to_info(hass, template_cover_str)
assert info.domains == {"cover"}
assert info.entities == set()
assert info.all_states is False
assert info.rate_limit == template.DOMAIN_STATES_RATE_LIMIT
def test_nested_async_render_to_info_case(hass):
"""Test a deeply nested state with async_render_to_info."""
hass.states.async_set("input_select.picker", "vacuum.a")
hass.states.async_set("vacuum.a", "off")
info = render_to_info(
hass, "{{ states[states['input_select.picker'].state].state }}", {}
)
assert_result_info(info, "off", {"input_select.picker", "vacuum.a"})
assert info.rate_limit is None
def test_result_as_boolean(hass):
"""Test converting a template result to a boolean."""
assert template.result_as_boolean(True) is True
assert template.result_as_boolean(" 1 ") is True
assert template.result_as_boolean(" true ") is True
assert template.result_as_boolean(" TrUE ") is True
assert template.result_as_boolean(" YeS ") is True
assert template.result_as_boolean(" On ") is True
assert template.result_as_boolean(" Enable ") is True
assert template.result_as_boolean(1) is True
assert template.result_as_boolean(-1) is True
assert template.result_as_boolean(500) is True
assert template.result_as_boolean(0.5) is True
assert template.result_as_boolean(0.389) is True
assert template.result_as_boolean(35) is True
assert template.result_as_boolean(False) is False
assert template.result_as_boolean(" 0 ") is False
assert template.result_as_boolean(" false ") is False
assert template.result_as_boolean(" FaLsE ") is False
assert template.result_as_boolean(" no ") is False
assert template.result_as_boolean(" off ") is False
assert template.result_as_boolean(" disable ") is False
assert template.result_as_boolean(0) is False
assert template.result_as_boolean(0.0) is False
assert template.result_as_boolean("0.00") is False
assert template.result_as_boolean(None) is False
def test_closest_function_to_entity_id(hass):
"""Test closest function to entity id."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
info = render_to_info(
hass,
"{{ closest(zone, states.test_domain).entity_id }}",
{"zone": "zone.far_away"},
)
assert_result_info(
info,
"test_domain.closest_zone",
["test_domain.closest_home", "test_domain.closest_zone", "zone.far_away"],
["test_domain"],
)
info = render_to_info(
hass,
"{{ ([states.test_domain, 'test_domain.closest_zone'] "
"| closest(zone)).entity_id }}",
{"zone": "zone.far_away"},
)
assert_result_info(
info,
"test_domain.closest_zone",
["test_domain.closest_home", "test_domain.closest_zone", "zone.far_away"],
["test_domain"],
)
def test_closest_function_to_state(hass):
"""Test closest function to state."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
assert (
template.Template(
"{{ closest(states.zone.far_away, states.test_domain).entity_id }}", hass
).async_render()
== "test_domain.closest_zone"
)
def test_closest_function_invalid_state(hass):
"""Test closest function invalid state."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
for state in ("states.zone.non_existing", '"zone.non_existing"'):
assert (
template.Template("{{ closest(%s, states) }}" % state, hass).async_render()
is None
)
def test_closest_function_state_with_invalid_location(hass):
"""Test closest function state with invalid location."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{"latitude": "invalid latitude", "longitude": hass.config.longitude + 0.1},
)
assert (
template.Template(
"{{ closest(states.test_domain.closest_home, states) }}", hass
).async_render()
is None
)
def test_closest_function_invalid_coordinates(hass):
"""Test closest function invalid coordinates."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
assert (
template.Template(
'{{ closest("invalid", "coord", states) }}', hass
).async_render()
is None
)
assert (
template.Template(
'{{ states | closest("invalid", "coord") }}', hass
).async_render()
is None
)
def test_closest_function_no_location_states(hass):
"""Test closest function without location states."""
assert (
template.Template("{{ closest(states).entity_id }}", hass).async_render() == ""
)
def test_generate_filter_iterators(hass):
"""Test extract entities function with none entities stuff."""
info = render_to_info(
hass,
"""
{% for state in states %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "", all_states=True)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "", domains=["sensor"])
hass.states.async_set("sensor.test_sensor", "off", {"attr": "value"})
# Don't need the entity because the state is not accessed
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "sensor.test_sensor", domains=["sensor"])
# But we do here because the state gets accessed
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},
{% endfor %}
""",
)
assert_result_info(info, "sensor.test_sensor=off,", [], ["sensor"])
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.attributes.attr }},
{% endfor %}
""",
)
assert_result_info(info, "sensor.test_sensor=value,", [], ["sensor"])
def test_generate_select(hass):
"""Test extract entities function with none entities stuff."""
template_str = """
{{ states.sensor|selectattr("state","equalto","off")
|join(",", attribute="entity_id") }}
"""
tmp = template.Template(template_str, hass)
info = tmp.async_render_to_info()
assert_result_info(info, "", [], [])
assert info.domains_lifecycle == {"sensor"}
hass.states.async_set("sensor.test_sensor", "off", {"attr": "value"})
hass.states.async_set("sensor.test_sensor_on", "on")
info = tmp.async_render_to_info()
assert_result_info(
info,
"sensor.test_sensor",
[],
["sensor"],
)
assert info.domains_lifecycle == {"sensor"}
async def test_async_render_to_info_in_conditional(hass):
"""Test extract entities function with none entities stuff."""
template_str = """
{{ states("sensor.xyz") == "dog" }}
"""
tmp = template.Template(template_str, hass)
info = tmp.async_render_to_info()
assert_result_info(info, False, ["sensor.xyz"], [])
hass.states.async_set("sensor.xyz", "dog")
hass.states.async_set("sensor.cow", "True")
await hass.async_block_till_done()
template_str = """
{% if states("sensor.xyz") == "dog" %}
{{ states("sensor.cow") }}
{% else %}
{{ states("sensor.pig") }}
{% endif %}
"""
tmp = template.Template(template_str, hass)
info = tmp.async_render_to_info()
assert_result_info(info, True, ["sensor.xyz", "sensor.cow"], [])
hass.states.async_set("sensor.xyz", "sheep")
hass.states.async_set("sensor.pig", "oink")
await hass.async_block_till_done()
tmp = template.Template(template_str, hass)
info = tmp.async_render_to_info()
assert_result_info(info, "oink", ["sensor.xyz", "sensor.pig"], [])
def test_jinja_namespace(hass):
"""Test Jinja's namespace command can be used."""
test_template = template.Template(
(
"{% set ns = namespace(a_key='') %}"
"{% set ns.a_key = states.sensor.dummy.state %}"
"{{ ns.a_key }}"
),
hass,
)
hass.states.async_set("sensor.dummy", "a value")
assert test_template.async_render() == "a value"
hass.states.async_set("sensor.dummy", "another value")
assert test_template.async_render() == "another value"
def test_state_with_unit(hass):
"""Test the state_with_unit property helper."""
hass.states.async_set("sensor.test", "23", {ATTR_UNIT_OF_MEASUREMENT: "beers"})
hass.states.async_set("sensor.test2", "wow")
tpl = template.Template("{{ states.sensor.test.state_with_unit }}", hass)
assert tpl.async_render() == "23 beers"
tpl = template.Template("{{ states.sensor.test2.state_with_unit }}", hass)
assert tpl.async_render() == "wow"
tpl = template.Template(
"{% for state in states %}{{ state.state_with_unit }} {% endfor %}", hass
)
assert tpl.async_render() == "23 beers wow"
tpl = template.Template("{{ states.sensor.non_existing.state_with_unit }}", hass)
assert tpl.async_render() == ""
def test_length_of_states(hass):
"""Test fetching the length of states."""
hass.states.async_set("sensor.test", "23")
hass.states.async_set("sensor.test2", "wow")
hass.states.async_set("climate.test2", "cooling")
tpl = template.Template("{{ states | length }}", hass)
assert tpl.async_render() == 3
tpl = template.Template("{{ states.sensor | length }}", hass)
assert tpl.async_render() == 2
def test_render_complex_handling_non_template_values(hass):
"""Test that we can render non-template fields."""
assert template.render_complex(
{True: 1, False: template.Template("{{ hello }}", hass)}, {"hello": 2}
) == {True: 1, False: 2}
def test_urlencode(hass):
"""Test the urlencode method."""
tpl = template.Template(
("{% set dict = {'foo': 'x&y', 'bar': 42} %}" "{{ dict | urlencode }}"),
hass,
)
assert tpl.async_render() == "foo=x%26y&bar=42"
tpl = template.Template(
("{% set string = 'the quick brown fox = true' %}" "{{ string | urlencode }}"),
hass,
)
assert tpl.async_render() == "the%20quick%20brown%20fox%20%3D%20true"
async def test_cache_garbage_collection():
"""Test caching a template."""
template_string = (
"{% set dict = {'foo': 'x&y', 'bar': 42} %} {{ dict | urlencode }}"
)
tpl = template.Template(
(template_string),
)
tpl.ensure_valid()
assert template._NO_HASS_ENV.template_cache.get(
template_string
) # pylint: disable=protected-access
tpl2 = template.Template(
(template_string),
)
tpl2.ensure_valid()
assert template._NO_HASS_ENV.template_cache.get(
template_string
) # pylint: disable=protected-access
del tpl
assert template._NO_HASS_ENV.template_cache.get(
template_string
) # pylint: disable=protected-access
del tpl2
assert not template._NO_HASS_ENV.template_cache.get(
template_string
) # pylint: disable=protected-access
def test_is_template_string():
"""Test is template string."""
assert template.is_template_string("{{ x }}") is True
assert template.is_template_string("{% if x == 2 %}1{% else %}0{%end if %}") is True
assert template.is_template_string("{# a comment #} Hey") is True
assert template.is_template_string("1") is False
assert template.is_template_string("Some Text") is False
async def test_protected_blocked(hass):
"""Test accessing __getattr__ produces a template error."""
tmp = template.Template('{{ states.__getattr__("any") }}', hass)
with pytest.raises(TemplateError):
tmp.async_render()
tmp = template.Template('{{ states.sensor.__getattr__("any") }}', hass)
with pytest.raises(TemplateError):
tmp.async_render()
tmp = template.Template('{{ states.sensor.any.__getattr__("any") }}', hass)
with pytest.raises(TemplateError):
tmp.async_render()
async def test_demo_template(hass):
"""Test the demo template works as expected."""
hass.states.async_set("sun.sun", "above", {"elevation": 50, "next_rising": "later"})
for i in range(2):
hass.states.async_set(f"sensor.sensor{i}", "on")
demo_template_str = """
{## Imitate available variables: ##}
{% set my_test_json = {
"temperature": 25,
"unit": "°C"
} %}
The temperature is {{ my_test_json.temperature }} {{ my_test_json.unit }}.
{% if is_state("sun.sun", "above_horizon") -%}
The sun rose {{ relative_time(states.sun.sun.last_changed) }} ago.
{%- else -%}
The sun will rise at {{ as_timestamp(strptime(state_attr("sun.sun", "next_rising"), "")) | timestamp_local }}.
{%- endif %}
For loop example getting 3 entity values:
{% for states in states | slice(3) -%}
{% set state = states | first %}
{%- if loop.first %}The {% elif loop.last %} and the {% else %}, the {% endif -%}
{{ state.name | lower }} is {{state.state_with_unit}}
{%- endfor %}.
"""
tmp = template.Template(demo_template_str, hass)
result = tmp.async_render()
assert "The temperature is 25" in result
assert "is on" in result
assert "sensor0" in result
assert "sensor1" in result
assert "sun" in result
async def test_slice_states(hass):
"""Test iterating states with a slice."""
hass.states.async_set("sensor.test", "23")
tpl = template.Template(
"{% for states in states | slice(1) -%}{% set state = states | first %}{{ state.entity_id }}{%- endfor %}",
hass,
)
assert tpl.async_render() == "sensor.test"
async def test_lifecycle(hass):
"""Test that we limit template render info for lifecycle events."""
hass.states.async_set("sun.sun", "above", {"elevation": 50, "next_rising": "later"})
for i in range(2):
hass.states.async_set(f"sensor.sensor{i}", "on")
hass.states.async_set("sensor.removed", "off")
await hass.async_block_till_done()
hass.states.async_set("sun.sun", "below", {"elevation": 60, "next_rising": "later"})
for i in range(2):
hass.states.async_set(f"sensor.sensor{i}", "off")
hass.states.async_set("sensor.new", "off")
hass.states.async_remove("sensor.removed")
await hass.async_block_till_done()
tmp = template.Template("{{ states | count }}", hass)
info = tmp.async_render_to_info()
assert info.all_states is False
assert info.all_states_lifecycle is True
assert info.rate_limit is None
assert info.has_time is False
assert info.entities == set()
assert info.domains == set()
assert info.domains_lifecycle == set()
assert info.filter("sun.sun") is False
assert info.filter("sensor.sensor1") is False
assert info.filter_lifecycle("sensor.new") is True
assert info.filter_lifecycle("sensor.removed") is True
async def test_template_timeout(hass):
"""Test to see if a template will timeout."""
for i in range(2):
hass.states.async_set(f"sensor.sensor{i}", "on")
tmp = template.Template("{{ states | count }}", hass)
assert await tmp.async_render_will_timeout(3) is False
tmp3 = template.Template("static", hass)
assert await tmp3.async_render_will_timeout(3) is False
tmp4 = template.Template("{{ var1 }}", hass)
assert await tmp4.async_render_will_timeout(3, {"var1": "ok"}) is False
slow_template_str = """
{% for var in range(1000) -%}
{% for var in range(1000) -%}
{{ var }}
{%- endfor %}
{%- endfor %}
"""
tmp5 = template.Template(slow_template_str, hass)
assert await tmp5.async_render_will_timeout(0.000001) is True
async def test_template_timeout_raise(hass):
"""Test we can raise from."""
tmp2 = template.Template("{{ error_invalid + 1 }}", hass)
with pytest.raises(TemplateError):
assert await tmp2.async_render_will_timeout(3) is False
async def test_lights(hass):
"""Test we can sort lights."""
tmpl = """
{% set lights_on = states.light|selectattr('state','eq','on')|map(attribute='name')|list %}
{% if lights_on|length == 0 %}
No lights on. Sleep well..
{% elif lights_on|length == 1 %}
The {{lights_on[0]}} light is on.
{% elif lights_on|length == 2 %}
The {{lights_on[0]}} and {{lights_on[1]}} lights are on.
{% else %}
The {{lights_on[:-1]|join(', ')}}, and {{lights_on[-1]}} lights are on.
{% endif %}
"""
states = []
for i in range(10):
states.append(f"light.sensor{i}")
hass.states.async_set(f"light.sensor{i}", "on")
tmp = template.Template(tmpl, hass)
info = tmp.async_render_to_info()
assert info.entities == set()
assert info.domains == {"light"}
assert "lights are on" in info.result()
for i in range(10):
assert f"sensor{i}" in info.result()
async def test_template_errors(hass):
"""Test template rendering wraps exceptions with TemplateError."""
with pytest.raises(TemplateError):
template.Template("{{ now() | rando }}", hass).async_render()
with pytest.raises(TemplateError):
template.Template("{{ utcnow() | rando }}", hass).async_render()
with pytest.raises(TemplateError):
template.Template("{{ now() | random }}", hass).async_render()
with pytest.raises(TemplateError):
template.Template("{{ utcnow() | random }}", hass).async_render()
async def test_state_attributes(hass):
"""Test state attributes."""
hass.states.async_set("sensor.test", "23")
tpl = template.Template(
"{{ states.sensor.test.last_changed }}",
hass,
)
assert tpl.async_render() == str(hass.states.get("sensor.test").last_changed)
tpl = template.Template(
"{{ states.sensor.test.object_id }}",
hass,
)
assert tpl.async_render() == hass.states.get("sensor.test").object_id
tpl = template.Template(
"{{ states.sensor.test.domain }}",
hass,
)
assert tpl.async_render() == hass.states.get("sensor.test").domain
tpl = template.Template(
"{{ states.sensor.test.context.id }}",
hass,
)
assert tpl.async_render() == hass.states.get("sensor.test").context.id
tpl = template.Template(
"{{ states.sensor.test.state_with_unit }}",
hass,
)
assert tpl.async_render() == 23
tpl = template.Template(
"{{ states.sensor.test.invalid_prop }}",
hass,
)
assert tpl.async_render() == ""
tpl = template.Template(
"{{ states.sensor.test.invalid_prop.xx }}",
hass,
)
with pytest.raises(TemplateError):
tpl.async_render()
async def test_unavailable_states(hass):
"""Test watching unavailable states."""
for i in range(10):
hass.states.async_set(f"light.sensor{i}", "on")
hass.states.async_set("light.unavailable", "unavailable")
hass.states.async_set("light.unknown", "unknown")
hass.states.async_set("light.none", "none")
tpl = template.Template(
"{{ states | selectattr('state', 'in', ['unavailable','unknown','none']) | map(attribute='entity_id') | list | join(', ') }}",
hass,
)
assert tpl.async_render() == "light.none, light.unavailable, light.unknown"
tpl = template.Template(
"{{ states.light | selectattr('state', 'in', ['unavailable','unknown','none']) | map(attribute='entity_id') | list | join(', ') }}",
hass,
)
assert tpl.async_render() == "light.none, light.unavailable, light.unknown"
async def test_legacy_templates(hass):
"""Test if old template behavior works when legacy templates are enabled."""
hass.states.async_set("sensor.temperature", "12")
assert (
template.Template("{{ states.sensor.temperature.state }}", hass).async_render()
== 12
)
await async_process_ha_core_config(hass, {"legacy_templates": True})
assert (
template.Template("{{ states.sensor.temperature.state }}", hass).async_render()
== "12"
)
async def test_no_result_parsing(hass):
"""Test if templates results are not parsed."""
hass.states.async_set("sensor.temperature", "12")
assert (
template.Template("{{ states.sensor.temperature.state }}", hass).async_render(
parse_result=False
)
== "12"
)
assert (
template.Template("{{ false }}", hass).async_render(parse_result=False)
== "False"
)
assert (
template.Template("{{ [1, 2, 3] }}", hass).async_render(parse_result=False)
== "[1, 2, 3]"
)
async def test_is_static_still_ast_evals(hass):
"""Test is_static still converts to native type."""
tpl = template.Template("[1, 2]", hass)
assert tpl.is_static
assert tpl.async_render() == [1, 2]
async def test_result_wrappers(hass):
"""Test result wrappers."""
for text, native, orig_type, schema in (
("[1, 2]", [1, 2], list, vol.Schema([int])),
("{1, 2}", {1, 2}, set, vol.Schema({int})),
("(1, 2)", (1, 2), tuple, vol.ExactSequence([int, int])),
('{"hello": True}', {"hello": True}, dict, vol.Schema({"hello": bool})),
):
tpl = template.Template(text, hass)
result = tpl.async_render()
assert isinstance(result, orig_type)
assert isinstance(result, template.ResultWrapper)
assert result == native
assert result.render_result == text
schema(result) # should not raise
# Result with render text stringifies to original text
assert str(result) == text
# Result without render text stringifies same as original type
assert str(template.RESULT_WRAPPERS[orig_type](native)) == str(
orig_type(native)
)
async def test_parse_result(hass):
"""Test parse result."""
for tpl, result in (
('{{ "{{}}" }}', "{{}}"),
("not-something", "not-something"),
("2a", "2a"),
("123E5", "123E5"),
("1j", "1j"),
("1e+100", "1e+100"),
("0xface", "0xface"),
("123", 123),
("10", 10),
("123.0", 123.0),
(".5", 0.5),
("0.5", 0.5),
("-1", -1),
("-1.0", -1.0),
("+1", 1),
("5.", 5.0),
("123_123_123", "123_123_123"),
# ("+48100200300", "+48100200300"), # phone number
("010", "010"),
("0011101.00100001010001", "0011101.00100001010001"),
):
assert template.Template(tpl, hass).async_render() == result
async def test_undefined_variable(hass, caplog):
"""Test a warning is logged on undefined variables."""
tpl = template.Template("{{ no_such_variable }}", hass)
assert tpl.async_render() == ""
assert (
"Template variable warning: 'no_such_variable' is undefined when rendering '{{ no_such_variable }}'"
in caplog.text
)
|
{
"content_hash": "783204d6f03e1938c69e6b9418eee6c2",
"timestamp": "",
"source": "github",
"line_count": 3476,
"max_line_length": 191,
"avg_line_length": 31.204257767548906,
"alnum_prop": 0.5807073184223628,
"repo_name": "jawilson/home-assistant",
"id": "4a97b99d05d46d2494f6608d24283fc1522b2f1d",
"size": "108479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/helpers/test_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
from Classes import Status
from runner import run
from test_data import tc1, cpp_source_code_add_two_numbers, c_source_code_add_two_numbers_compile_error
def test_c_add_two_numbers_code():
out = run(cpp_source_code_add_two_numbers, "cpp", ["g++", "-o", "program"], ["./program"], [tc1])
assert 1 == len(out)
assert '57' == out[0].stdout
assert Status.OK == out[0].status
|
{
"content_hash": "a9d7e5f3da0df65eebf78512c4eb9c35",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 103,
"avg_line_length": 35.45454545454545,
"alnum_prop": 0.6641025641025641,
"repo_name": "jbuddha/OpenRank",
"id": "21115a4ed5ff4727cf7a4a10ba55703a8ba653e0",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "framework/test_runner_cpp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "196"
},
{
"name": "JavaScript",
"bytes": "17243"
},
{
"name": "Python",
"bytes": "20138"
},
{
"name": "Vue",
"bytes": "2282"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtGui, QtCore
from Views import ui_dialog_splitcolumn as dsplitcolumn
from poplerGUI import ui_logic_preview as tprev
from poplerGUI import class_modelviewpandas as view
from poplerGUI.logiclayer import class_helpers as hlp
from poplerGUI import class_inputhandler as ini
class SplitColumnDialog(QtGui.QDialog, dsplitcolumn.Ui_Dialog):
'''
User Logic to deal with split a column from one into two based
on user supplied separator (currently regex does not work)
'''
update_data = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.previous_click = False
# Place holders for user inputs
self.splitcolumnlned = {}
# Place holder: Data Model/ Data model view
self.splitcolumnmodel = None
self.viewEdit = view.PandasTableModelEdit(None)
# Placeholders: Data tables
self.splitcolumntable = None
# Actions
self.btnPreview.clicked.connect(self.submit_change)
self.btnSaveClose.clicked.connect(self.submit_change)
self.btnCancel.clicked.connect(self.close)
# Update boxes/preview box
self.message = QtGui.QMessageBox
self.error = QtGui.QErrorMessage()
self.preview = tprev.TablePreview()
def submit_change(self):
sender = self.sender()
self.splitcolumnlned = {
'column_name':
self.lnedColumnname.text().strip(),
'split_column_by':
self.lnedSplitcolumnby.text()
}
print('split inputs: ', self.splitcolumnlned)
self.splitcolumnini = ini.InputHandler(
name='splitcolumn',
lnedentry=self.splitcolumnlned
)
self.facade.input_register(self.splitcolumnini)
self.facade.create_log_record('splitcolumn')
self._log = self.facade._tablelog['splitcolumn']
if self.previous_click is True:
self.viewEdit = view.PandasTableModelEdit(None)
else:
pass
try:
self.splitcolumntable = hlp.split_column(
self.facade._data,
self.splitcolumnlned['column_name'],
self.splitcolumnlned['split_column_by']
)
self.previous_click = True
hlp.write_column_to_log(
self.splitcolumnlned, self._log, 'splitcolumn')
except Exception as e:
print(str(e))
self.error.showMessage(
'Could not split column: ' + str(e))
if sender is self.btnPreview:
self.viewEdit.set_data(
self.splitcolumntable)
self.preview.tabviewPreview.setModel(
self.viewEdit)
self.preview.show()
elif sender is self.btnSaveClose:
self.facade._data = self.splitcolumntable
self.update_data.emit('update')
self.close()
|
{
"content_hash": "d224af2486d9c00d09e845e8d44054ec",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 66,
"avg_line_length": 36.629629629629626,
"alnum_prop": 0.6164475901584092,
"repo_name": "bibsian/database-development",
"id": "541aa67dab055812783e036ecb82800b7a480806",
"size": "2989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poplerGUI/ui_logic_splitcolumn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1241917"
}
],
"symlink_target": ""
}
|
"""Python part of the warnings subsystem."""
import sys
__all__ = ["warn", "warn_explicit", "showwarning",
"formatwarning", "filterwarnings", "simplefilter",
"resetwarnings", "catch_warnings"]
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
msg = WarningMessage(message, category, filename, lineno, file, line)
_showwarnmsg_impl(msg)
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
msg = WarningMessage(message, category, filename, lineno, None, line)
return _formatwarnmsg_impl(msg)
def _showwarnmsg_impl(msg):
file = msg.file
if file is None:
file = sys.stderr
if file is None:
# sys.stderr is None when run with pythonw.exe:
# warnings get lost
return
text = _formatwarnmsg(msg)
try:
file.write(text)
except OSError:
# the file (probably stderr) is invalid - this warning gets lost.
pass
def _formatwarnmsg_impl(msg):
s = ("%s:%s: %s: %s\n"
% (msg.filename, msg.lineno, msg.category.__name__,
msg.message))
if msg.line is None:
try:
import linecache
line = linecache.getline(msg.filename, msg.lineno)
except Exception:
# When a warning is logged during Python shutdown, linecache
# and the import machinery don't work anymore
line = None
linecache = None
else:
line = msg.line
if line:
line = line.strip()
s += " %s\n" % line
if msg.source is not None:
try:
import tracemalloc
tb = tracemalloc.get_object_traceback(msg.source)
except Exception:
# When a warning is logged during Python shutdown, tracemalloc
# and the import machinery don't work anymore
tb = None
if tb is not None:
s += 'Object allocated at (most recent call last):\n'
for frame in tb:
s += (' File "%s", lineno %s\n'
% (frame.filename, frame.lineno))
try:
if linecache is not None:
line = linecache.getline(frame.filename, frame.lineno)
else:
line = None
except Exception:
line = None
if line:
line = line.strip()
s += ' %s\n' % line
return s
# Keep a reference to check if the function was replaced
_showwarning_orig = showwarning
def _showwarnmsg(msg):
"""Hook to write a warning to a file; replace if you like."""
try:
sw = showwarning
except NameError:
pass
else:
if sw is not _showwarning_orig:
# warnings.showwarning() was replaced
if not callable(sw):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
sw(msg.message, msg.category, msg.filename, msg.lineno,
msg.file, msg.line)
return
_showwarnmsg_impl(msg)
# Keep a reference to check if the function was replaced
_formatwarning_orig = formatwarning
def _formatwarnmsg(msg):
"""Function to format a warning the standard way."""
try:
fw = formatwarning
except NameError:
pass
else:
if fw is not _formatwarning_orig:
# warnings.formatwarning() was replaced
return fw(msg.message, msg.category,
msg.filename, msg.lineno, line=msg.line)
return _formatwarnmsg_impl(msg)
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
if message or module:
import re
if message:
message = re.compile(message, re.I)
else:
message = None
if module:
module = re.compile(module)
else:
module = None
_add_filter(action, message, category, module, lineno, append=append)
def simplefilter(action, category=Warning, lineno=0, append=False):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
_add_filter(action, None, category, None, lineno, append=append)
def _add_filter(*item, append):
# Remove possible duplicate filters, so new one will be placed
# in correct place. If append=True and duplicate exists, do nothing.
if not append:
try:
filters.remove(item)
except ValueError:
pass
filters.insert(0, item)
else:
if item not in filters:
filters.append(item)
_filters_mutated()
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
_filters_mutated()
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError as msg:
print("Invalid -W option ignored:", msg, file=sys.stderr)
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,)) from None
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,)) from None
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,)) from None
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,)) from None
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
def _is_internal_frame(frame):
"""Signal whether the frame is an internal CPython implementation detail."""
filename = frame.f_code.co_filename
return 'importlib' in filename and '_bootstrap' in filename
def _next_external_frame(frame):
"""Find the next frame that doesn't involve CPython internals."""
frame = frame.f_back
while frame is not None and _is_internal_frame(frame):
frame = frame.f_back
return frame
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1, source=None):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
if not (isinstance(category, type) and issubclass(category, Warning)):
raise TypeError("category must be a Warning subclass, "
"not '{:s}'".format(type(category).__name__))
# Get context information
try:
if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)):
# If frame is too small to care or if the warning originated in
# internal code, then do not try to hide any frames.
frame = sys._getframe(stacklevel)
else:
frame = sys._getframe(1)
# Look for one frame less since the above line starts us off.
for x in range(stacklevel-1):
frame = _next_external_frame(frame)
if frame is None:
raise ValueError
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = frame.f_globals
lineno = frame.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith(".pyc"):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals, source)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None,
source=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if registry.get('version', 0) != _filters_version:
registry.clear()
registry['version'] = _filters_version
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
import linecache
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
# Print message and context
msg = WarningMessage(message, category, filename, lineno, source)
_showwarnmsg(msg)
class WarningMessage(object):
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line", "source")
def __init__(self, message, category, filename, lineno, file=None,
line=None, source=None):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
self.file = file
self.line = line
self.source = source
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, *, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._module._filters_mutated()
self._showwarning = self._module.showwarning
self._showwarnmsg_impl = self._module._showwarnmsg_impl
if self._record:
log = []
self._module._showwarnmsg_impl = log.append
# Reset showwarning() to the default implementation to make sure
# that _showwarnmsg() calls _showwarnmsg_impl()
self._module.showwarning = self._module._showwarning_orig
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module._filters_mutated()
self._module.showwarning = self._showwarning
self._module._showwarnmsg_impl = self._showwarnmsg_impl
# Private utility function called by _PyErr_WarnUnawaitedCoroutine
def _warn_unawaited_coroutine(coro):
msg_lines = [
f"coroutine '{coro.__qualname__}' was never awaited\n"
]
if coro.cr_origin is not None:
import linecache, traceback
def extract():
for filename, lineno, funcname in reversed(coro.cr_origin):
line = linecache.getline(filename, lineno)
yield (filename, lineno, funcname, line)
msg_lines.append("Coroutine created at (most recent call last)\n")
msg_lines += traceback.format_list(list(extract()))
msg = "".join(msg_lines).rstrip("\n")
# Passing source= here means that if the user happens to have tracemalloc
# enabled and tracking where the coroutine was created, the warning will
# contain that traceback. This does mean that if they have *both*
# coroutine origin tracking *and* tracemalloc enabled, they'll get two
# partially-redundant tracebacks. If we wanted to be clever we could
# probably detect this case and avoid it, but for now we don't bother.
warn(msg, category=RuntimeWarning, stacklevel=2, source=coro)
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
try:
from _warnings import (filters, _defaultaction, _onceregistry,
warn, warn_explicit, _filters_mutated)
defaultaction = _defaultaction
onceregistry = _onceregistry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
_filters_version = 1
def _filters_mutated():
global _filters_version
_filters_version += 1
_warnings_defaults = False
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
# Several warning categories are ignored by default in regular builds
if not hasattr(sys, 'gettotalrefcount'):
filterwarnings("default", category=DeprecationWarning,
module="__main__", append=1)
simplefilter("ignore", category=DeprecationWarning, append=1)
simplefilter("ignore", category=PendingDeprecationWarning, append=1)
simplefilter("ignore", category=ImportWarning, append=1)
simplefilter("ignore", category=ResourceWarning, append=1)
del _warnings_defaults
|
{
"content_hash": "f915d1a7c865e690a6345e8288a6198b",
"timestamp": "",
"source": "github",
"line_count": 554,
"max_line_length": 86,
"avg_line_length": 35.63357400722022,
"alnum_prop": 0.6008307583202472,
"repo_name": "Microsoft/PTVS",
"id": "81f98647786d2c7b37eb51b04aa0fcd51c44e400",
"size": "19741",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/warnings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12235396"
},
{
"name": "C++",
"bytes": "212001"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "44322"
},
{
"name": "Python",
"bytes": "847130"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
import os
import json
import boto
import urllib2
from boto.s3.key import Key
import boto3
import click
import logging
from copy import copy
from collections import OrderedDict
from datetime import date, timedelta
from elasticsearch import Elasticsearch, RequestError, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
import requests
from reader import csv_reader
logger = logging.getLogger('landsat8.meta')
bucket_name = os.getenv('BUCKETNAME', 'landsat8-meta')
thumbs_bucket_name = os.getenv('THUMBS_BUCKETNAME', 'ad-thumbnails')
s3 = boto3.resource('s3')
es_index = 'sat-api'
es_type = 'landsat8'
def get_credentials():
obj = get_instance_metadata()
return obj['iam']['security-credentials'].values()[0]
def connection_to_es(es_host, es_port, aws=False):
args = {}
if aws:
cred = get_credentials()
access_key = cred['AccessKeyId']
secret_access = cred['SecretAccessKey']
token = cred['Token']
region = os.getenv('AWS_DEFAULT_REGION', 'us-east-1')
awsauth = AWS4Auth(access_key, secret_access, region, 'es',
session_token=token)
args = {
'http_auth': awsauth,
'use_ssl': True,
'verify_certs': True,
'connection_class': RequestsHttpConnection
}
es = Elasticsearch(hosts=[{
'host': es_host,
'port': es_port
}], **args)
return es
def create_index(index_name, doc_type, es_host, es_port, **kwargs):
body = {
doc_type: {
'properties': {
'scene_id': {'type': 'string', 'index': 'not_analyzed'},
'product_id': {'type': 'string'},
'satellite_name': {'type': 'string'},
'cloud_coverage': {'type': 'float'},
'date': {'type': 'date'},
'data_geometry': {
'type': 'geo_shape',
'tree': 'quadtree',
'precision': '5mi'}
}
}
}
es = connection_to_es(es_host, es_port, kwargs['aws'])
es.indices.create(index=index_name, ignore=400)
es.indices.put_mapping(
doc_type=doc_type,
body=body,
index=index_name
)
def meta_constructor(metadata):
internal_meta = copy(metadata)
data_geometry = {
'type': 'Polygon',
'crs': {
'type': 'name',
'properties': {
'name': 'urn:ogc:def:crs:EPSG:8.9:4326'
}
},
'coordinates': [[
[metadata.get('upperRightCornerLongitude'), metadata.get('upperRightCornerLatitude')],
[metadata.get('upperLeftCornerLongitude'), metadata.get('upperLeftCornerLatitude')],
[metadata.get('lowerLeftCornerLongitude'), metadata.get('lowerLeftCornerLatitude')],
[metadata.get('lowerRightCornerLongitude'), metadata.get('lowerRightCornerLatitude')],
[metadata.get('upperRightCornerLongitude'), metadata.get('upperRightCornerLatitude')]
]]
}
body = OrderedDict([
('scene_id', metadata.get('sceneID')),
('product_id', metadata.get('LANDSAT_PRODUCT_ID')),
('satellite_name', 'landsat-8'),
('cloud_coverage', metadata.get('cloudCoverFull', 100)),
('date', metadata.get('acquisitionDate')),
('thumbnail', metadata.get('browseURL')),
('data_geometry', data_geometry)
])
body.update(internal_meta)
return body
def elasticsearch_updater(product_dir, metadata, **kwargs):
try:
es = connection_to_es(kwargs['es_host'], kwargs['es_port'], kwargs['aws'])
body = meta_constructor(metadata)
logger.info('Pushing to Elasticsearch')
try:
es.index(index=es_index, doc_type=es_type, id=body['scene_id'],
body=body)
except RequestError as e:
body['data_geometry'] = None
es.index(index=es_index, doc_type=es_type, id=body['scene_id'],
body=body)
except Exception as e:
logger.error('Unhandled error occured while writing to elasticsearch')
logger.error('Details: %s' % e.__str__())
def dynamodb_updater(product_dir, metadata, **kwargs):
client = boto3.client('dynamodb', region_name='us-east-1')
client.put_item(
TableName='landsat',
Item={
'scene_id': {
'S': metadata['sceneID']
},
'body': {
'S': json.dumps(meta_constructor(metadata))
}
},
ReturnValues='NONE',
ReturnConsumedCapacity='NONE',
ReturnItemCollectionMetrics='NONE'
)
print('Posted %s to DynamoDB' % metadata['sceneID'])
def thumbnail_writer(product_dir, metadata, **kwargs):
"""
Extra function to download images from USGS, then upload to S3 and call
the ES metadata writer afterwards.
"""
# Download original thumbnail
output_file = metadata['sceneID'] + '.jpg'
thumbnail = 'https://' + thumbs_bucket_name + '.s3.amazonaws.com/' + output_file
new_thumb = requests.get(thumbnail)
if new_thumb.status_code != 200:
# Upload thumbnail to S3
thumbs_bucket_name2 = os.getenv('THUMBS_BUCKETNAME', 'ad-thumbnails')
try:
print('uploading %s' % output_file)
c = boto.connect_s3()
b = c.get_bucket(thumbs_bucket_name2)
k = Key(b, name=output_file)
k.set_metadata('Content-Type', 'image/jpeg')
r = requests.get(metadata['browseURL'])
k.set_contents_from_string(r.content, policy='public-read')
except Exception as e:
print(e)
# Update metadata record
metadata['thumbnail'] = thumbnail
dynamodb_updater(product_dir, meta_constructor(metadata), **kwargs)
elasticsearch_updater(product_dir, metadata, **kwargs)
return
def file_writer(product_dir, metadata):
body = meta_constructor(metadata)
if not os.path.exists(product_dir):
os.makedirs(product_dir)
f = open(os.path.join(product_dir, body['scene_id'] + '.json'), 'w')
f.write(json.dumps(body))
logger.info('saving to disk at %s' % product_dir)
f.close()
def s3_writer(product_dir, metadata):
# make sure product_dir doesn't start with slash (/) or dot (.)
if product_dir.startswith('.'):
product_dir = product_dir[1:]
if product_dir.startswith('/'):
product_dir = product_dir[1:]
body = meta_constructor(metadata)
key = os.path.join(product_dir, body['scene_id'] + '.json')
s3.Object(bucket_name, key).put(Body=json.dumps(body), ACL='public-read', ContentType='application/json')
logger.info('saving to s3 at %s')
def last_updated(today):
""" Gets the latest time a product added to Elasticsearch """
bucket = s3.Bucket(bucket_name)
start_day = today.day
start_month = today.month
yr_counter = 0
while True:
m_counter = 0
year = today.year - yr_counter
if year < 2015:
break
while True:
month = start_month - m_counter
if month == 0:
start_month = 12
break
d_counter = 0
while True:
day = start_day - d_counter
if day == 0:
start_day = 31
break
path = os.path.join(str(year), str(month), str(day))
logger.info('checking %s' % path)
objs = bucket.objects.filter(Prefix=path).limit(1)
if list(objs):
return date(year, month, day)
d_counter += 1
m_counter += 1
yr_counter += 1
return None
@click.command()
@click.argument('ops', metavar='<operations: choices: s3 | es | disk | thumbs>', nargs=-1)
@click.option('--start', default=None, help='Start Date. Format: YYYY-MM-DD')
@click.option('--end', default=None, help='End Date. Format: YYYY-MM-DD')
@click.option('--es-host', default='localhost', help='Elasticsearch host address')
@click.option('--es-port', default=9200, type=int, help='Elasticsearch port number')
@click.option('--folder', default='.', help='Destination folder if is written to disk')
@click.option('--download', is_flag=True,
help='Sets the updater to download the metadata file first instead of streaming it')
@click.option('--aws', is_flag=True, default=False,
help='Uses AWS STS to obtain aws credentials for accessing the various services')
@click.option('--download-folder', default=None,
help='The folder to save the downloaded metadata to. Defaults to a temp folder')
@click.option('-v', '--verbose', is_flag=True)
@click.option('--concurrency', default=20, type=int, help='Process concurrency. Default=20')
def main(ops, start, end, es_host, es_port, folder, download,
aws, download_folder, verbose, concurrency):
if not ops:
raise click.UsageError('No Argument provided. Use --help if you need help')
accepted_args = {
'es': elasticsearch_updater,
's3': s3_writer,
'disk': file_writer,
'thumbs': thumbnail_writer,
'db': dynamodb_updater
}
writers = []
for op in ops:
if op in accepted_args.keys():
writers.append(accepted_args[op])
else:
raise click.UsageError('Operation (%s) is not supported' % op)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
if verbose:
ch.setLevel(logging.INFO)
else:
ch.setLevel(logging.ERROR)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if 'es' in ops or 'thumbs' in ops:
create_index(es_index, es_type, es_host, es_port, aws=aws)
if not start and not end:
delta = timedelta(days=3)
start = date.today() - delta
start = '{0}-{1}-{2}'.format(start.year, start.month, start.day)
csv_reader(folder, writers, start_date=start, end_date=end, download=download, download_path=download_folder,
num_worker_threads=concurrency, es_host=es_host, es_port=es_port, aws=aws)
if __name__ == '__main__':
main()
|
{
"content_hash": "082517dc0aaf7ae5f9a05b05df8ab7c7",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 113,
"avg_line_length": 31.734567901234566,
"alnum_prop": 0.589768527523828,
"repo_name": "sat-utils/landsat8-metadata",
"id": "55c1d4a841e9f878b2cbf40b92bd8bd6e5d0ef61",
"size": "10282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17095"
}
],
"symlink_target": ""
}
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
@app.route('/<name>')
def hello(name=None):
return render_template('main.html', name=name)
if __name__ == '__main__':
app.run()
|
{
"content_hash": "31a79b277b3a416029a3fefc2fec1d30",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 50,
"avg_line_length": 16.923076923076923,
"alnum_prop": 0.6090909090909091,
"repo_name": "mkhuthir/learnPython",
"id": "089328fc0638cec476d994177eaf6c4561919c7f",
"size": "244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Book_learning-python-r1.1/ch10/flask/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7706"
}
],
"symlink_target": ""
}
|
import sys
import requests
import logging
import ssl
import json
import pkg_resources
from requests.adapters import HTTPAdapter
try:
from requests.packages.urllib3.poolmanager import PoolManager
except ImportError:
from urllib3.poolmanager import PoolManager
from qds_sdk.retry import retry
from qds_sdk.exception import *
log = logging.getLogger("qds_connection")
"""
see http://stackoverflow.com/questions/14102416/python-requests-requests-exceptions-sslerror-errno-8-ssl-c504-eof-occurred
"""
class MyAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize,
block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
class Connection:
def __init__(self, auth, base_url, skip_ssl_cert_check, reuse=True):
self.auth = auth
self.base_url = base_url
self.skip_ssl_cert_check = skip_ssl_cert_check
self._headers = {'User-Agent': 'qds-sdk-py-%s' % pkg_resources.get_distribution("qds-sdk").version,
'Content-Type': 'application/json'}
self.reuse = reuse
if reuse:
self.session = requests.Session()
self.session.mount('https://', MyAdapter())
@retry((RetryWithDelay, requests.Timeout), tries=6, delay=30, backoff=2)
def get_raw(self, path, params=None):
return self._api_call_raw("GET", path, params=params)
@retry((RetryWithDelay, requests.Timeout), tries=6, delay=30, backoff=2)
def get(self, path, params=None):
return self._api_call("GET", path, params=params)
def put(self, path, data=None):
return self._api_call("PUT", path, data)
def post(self, path, data=None):
return self._api_call("POST", path, data)
def delete(self, path, data=None):
return self._api_call("DELETE", path, data)
def _api_call_raw(self, req_type, path, data=None, params=None):
url = self.base_url.rstrip('/') + '/' + path
if self.reuse:
x = self.session
else:
x = requests
kwargs = {'headers': self._headers, 'auth': self.auth, 'verify': not self.skip_ssl_cert_check}
if data:
kwargs['data'] = json.dumps(data)
if params:
kwargs['params'] = params
log.info("[%s] %s" % (req_type, url))
log.info("Payload: %s" % json.dumps(data, indent=4))
log.info("Params: %s" % params)
if req_type == 'GET':
r = x.get(url, timeout=300, **kwargs)
elif req_type == 'POST':
r = x.post(url, timeout=300, **kwargs)
elif req_type == 'PUT':
r = x.put(url, timeout=300, **kwargs)
elif req_type == 'DELETE':
r = x.delete(url, timeout=300, **kwargs)
else:
raise NotImplemented
self._handle_error(r)
return r
def _api_call(self, req_type, path, data=None, params=None):
return self._api_call_raw(req_type, path, data=data, params=params).json()
def _handle_error(self, response):
"""Raise exceptions in response to any http errors
Args:
response: A Response object
Raises:
BadRequest: if HTTP error code 400 returned.
UnauthorizedAccess: if HTTP error code 401 returned.
ForbiddenAccess: if HTTP error code 403 returned.
ResourceNotFound: if HTTP error code 404 is returned.
MethodNotAllowed: if HTTP error code 405 is returned.
ResourceConflict: if HTTP error code 409 is returned.
ResourceInvalid: if HTTP error code 422 is returned.
ClientError: if HTTP error code falls in 401 - 499.
ServerError: if HTTP error code falls in 500 - 599.
ConnectionError: if unknown HTTP error code returned.
"""
code = response.status_code
if 200 <= code < 400:
return
if code == 400:
sys.stderr.write(response.text + "\n")
raise BadRequest(response)
elif code == 401:
sys.stderr.write(response.text + "\n")
raise UnauthorizedAccess(response)
elif code == 403:
sys.stderr.write(response.text + "\n")
raise ForbiddenAccess(response)
elif code == 404:
sys.stderr.write(response.text + "\n")
raise ResourceNotFound(response)
elif code == 405:
sys.stderr.write(response.text + "\n")
raise MethodNotAllowed(response)
elif code == 409:
sys.stderr.write(response.text + "\n")
raise ResourceConflict(response)
elif code == 422:
sys.stderr.write(response.text + "\n")
raise ResourceInvalid(response)
elif code in (449, 502, 503, 504):
sys.stderr.write(response.text + "\n")
raise RetryWithDelay(response)
elif 401 <= code < 500:
sys.stderr.write(response.text + "\n")
raise ClientError(response)
elif 500 <= code < 600:
sys.stderr.write(response.text + "\n")
raise ServerError(response)
else:
raise ConnectionError(response)
|
{
"content_hash": "462632e013e4ef866e524c7b54743148",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 122,
"avg_line_length": 35.18300653594771,
"alnum_prop": 0.582203232398291,
"repo_name": "tanishgupta1/qds-sdk-py",
"id": "bd0848c71d8f169217eda2683a268171875471a2",
"size": "5383",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "build/lib/qds_sdk/connection.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "495330"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
from cms.test_utils.compat import skipIf
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import TemporaryDirectory
from sphinx.application import Sphinx
import cms
import os
import socket
from cms.utils.compat.string_io import StringIO
ROOT_DIR = os.path.dirname(cms.__file__)
DOCS_DIR = os.path.abspath(os.path.join(ROOT_DIR, u'..', u'docs'))
def has_no_internet():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('4.4.4.2', 80))
s.send(b"hello")
except socket.error: # no internet
return True
return False
class DocsTestCase(CMSTestCase):
"""
Test docs building correctly for HTML
"""
@skipIf(has_no_internet(), "No internet")
def test_html(self):
nullout = StringIO()
with TemporaryDirectory() as OUT_DIR:
app = Sphinx(
DOCS_DIR,
DOCS_DIR,
OUT_DIR,
OUT_DIR,
"html",
warningiserror=True,
status=nullout,
)
try:
app.build()
except:
print(nullout.getvalue())
raise
|
{
"content_hash": "010a46ade15da355ce796775c6d5989f",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 67,
"avg_line_length": 26.829787234042552,
"alnum_prop": 0.578112609040444,
"repo_name": "SurfasJones/icecream-info",
"id": "b59c093da00465df420f47fe9d95222bd6202193",
"size": "1285",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "icecream/lib/python2.7/site-packages/cms/tests/docs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "288937"
},
{
"name": "JavaScript",
"bytes": "589933"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "18137514"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "10274"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
class DominoesPool():
def __init__(self):
#self.known_bricks_in_pool = []
#self.possible_bricks_in_pool = []
#self.number_of_bricks_in_pool = 0
pass
|
{
"content_hash": "80f28ed4547c1956edf7bbecd9cab92c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.6144578313253012,
"repo_name": "theysconator/DominoesAI",
"id": "eea8f098399dc2aa3880183e1ce9a2b70aa8bb9d",
"size": "166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dominoes/pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3540"
}
],
"symlink_target": ""
}
|
from enaml.widgets.raw_widget import ProxyRawWidget
from .qt_control import QtControl
class QtRawWidget(QtControl, ProxyRawWidget):
""" A Qt implementation of an Enaml ProxyRawWidget.
"""
def create_widget(self):
""" Create the underlying widget for the control.
"""
self.widget = self.declaration.create_widget(self.parent_widget())
#--------------------------------------------------------------------------
# ProxyRawWidget API
#--------------------------------------------------------------------------
def get_widget(self):
""" Retrieve the underlying toolkit widget.
"""
return self.widget
|
{
"content_hash": "12bc754c8bda80addd9eb5115781a56c",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 29.52173913043478,
"alnum_prop": 0.508100147275405,
"repo_name": "ContinuumIO/ashiba",
"id": "a1dc104cd6953d2c08caa32b2cad44d6e23c0a23",
"size": "1029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enaml/enaml/qt/qt_raw_widget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4560"
},
{
"name": "C",
"bytes": "738"
},
{
"name": "C++",
"bytes": "77464"
},
{
"name": "CSS",
"bytes": "2286"
},
{
"name": "Emacs Lisp",
"bytes": "1210"
},
{
"name": "HTML",
"bytes": "4891"
},
{
"name": "JavaScript",
"bytes": "17243"
},
{
"name": "Makefile",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "3241535"
},
{
"name": "Shell",
"bytes": "119"
},
{
"name": "VimL",
"bytes": "1821"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name = "liboozie",
version = "3.6.0",
url = 'http://github.com/cloudera/hue',
description = "Oozie Libraries",
packages = find_packages('src'),
package_dir = {'': 'src' },
install_requires = ['setuptools', 'desktop'],
# Even libraries need to be registered as desktop_apps,
# if they have configuration, like this one.
entry_points = { 'desktop.sdk.lib': 'liboozie=liboozie' },
)
|
{
"content_hash": "53991b279f023fbc89c625312020c9ca",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 64,
"avg_line_length": 35.142857142857146,
"alnum_prop": 0.6138211382113821,
"repo_name": "vitan/hue",
"id": "3c04d2a67b20438d8c9a0a250554fc7304ec9a45",
"size": "1262",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "desktop/libs/liboozie/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "2350097"
},
{
"name": "C++",
"bytes": "178518"
},
{
"name": "CSS",
"bytes": "502213"
},
{
"name": "Emacs Lisp",
"bytes": "14875"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "21550731"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2677283"
},
{
"name": "Makefile",
"bytes": "86291"
},
{
"name": "Mako",
"bytes": "2038826"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "31475669"
},
{
"name": "Scala",
"bytes": "64604"
},
{
"name": "Shell",
"bytes": "48346"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "100994"
},
{
"name": "XSLT",
"bytes": "342237"
}
],
"symlink_target": ""
}
|
"""
This API can be used to interact with and modify Motorola S-Record files.
.. moduleauthor:: Yves-Noel Weweler <y.weweler@gmail.com>
"""
__author__ = 'Yves-Noel Weweler <y.weweler@gmail.com>'
__status__ = 'Development'
__version__ = '0.0.1'
__all__ = ('SRECError', 'SRecordParsingError', 'NotSRecFileError')
class SRECError(Exception):
"""
General error of this module. All other exceptions are derived from
this class.
"""
pass
class SRecordParsingError(SRECError):
"""
Parsing a single S-Record failed.
"""
pass
class NotSRecFileError(SRECError):
"""
Given file is not a Motorola S-Record file.
"""
pass
|
{
"content_hash": "82463e35dc81a663045e26be7143281b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 73,
"avg_line_length": 20.875,
"alnum_prop": 0.6541916167664671,
"repo_name": "yweweler/pysrec",
"id": "b3bb3d717225caf804453038f5d5578fc48c23a0",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zwuenf/pysrec/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16755"
}
],
"symlink_target": ""
}
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import pytest
import numpy as np
# RiBuild Modules
from delphin_6_automation.delphin_setup import weather_modeling
from delphin_6_automation.file_parsing import weather_parser
# -------------------------------------------------------------------------------------------------------------------- #
# TEST WEATHER MODELS
@pytest.mark.skip('Catch ration model is not implemented correctly')
def test_rain_model_1(test_folder):
rain = weather_parser.ccd_to_list(test_folder + '/weather/vertical_rain.ccd')
wind_speed = weather_parser.ccd_to_list(test_folder + '/weather/wind_speed.ccd')
wind_direction = weather_parser.ccd_to_list(test_folder + '/weather/wind_direction.ccd')
wall_location = {'height': 5.0, 'width': 5.0}
wdr = weather_modeling.driving_rain(rain, wind_direction, wind_speed, wall_location, 90, 0)
assert rain == wdr
def test_rain_model_2(test_folder):
rain = weather_parser.ccd_to_list(test_folder + '/weather/vertical_rain.ccd')
wind_speed = weather_parser.ccd_to_list(test_folder + '/weather/wind_speed.ccd')
wind_direction = weather_parser.ccd_to_list(test_folder + '/weather/wind_direction.ccd')
wall_location = {'height': 5.0, 'width': 5.0}
wdr = weather_modeling.driving_rain(rain, wind_direction, wind_speed, wall_location, 90, 0, 1)
assert rain == wdr
def test_solar_radiation(test_folder):
# TODO - Create some assertments
diff_rad = np.array(weather_parser.ccd_to_list(test_folder + '/weather/diffuse_radiation.ccd'))
dir_rad = np.array(weather_parser.ccd_to_list(test_folder + '/weather/direct_radiation.ccd'))
radiation = diff_rad + dir_rad
short_wave = weather_modeling.short_wave_radiation(radiation, -2.083, 57.167, 0, 230)
|
{
"content_hash": "142bc70af0261b62ca4178144116ad6c",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 120,
"avg_line_length": 40.25,
"alnum_prop": 0.6211180124223602,
"repo_name": "thp44/delphin_6_automation",
"id": "cdc0ebf72373852bab673813f035f387c5e96632",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytest/test_weather_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "137563"
}
],
"symlink_target": ""
}
|
"""
Test the RR Lyrae helper functions.
"""
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
import numpy as np
import pytest
from ..core import *
from ..rrlyrae import *
def test_gaia_rv_error():
d = np.linspace(1.,50.,100)*u.kpc
rv_errs = gaia_rv_error(d)
def test_gaia_pm_error():
d = np.linspace(1.,50.,100)*u.kpc
pm_errs = gaia_pm_error(d)
vtan_errs = pm_errs.to(u.rad/u.yr).value/u.yr*d
vtan_errs = vtan_errs.to(u.km/u.s)
|
{
"content_hash": "923f6d5dcbecea85ae3568d0cd39bb3a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 82,
"avg_line_length": 20,
"alnum_prop": 0.6596774193548387,
"repo_name": "abonaca/gary",
"id": "a9c9527edaef60bdfa2ae02e2db16bf0d8bb2651",
"size": "636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gary/observation/tests/test_rrlyrae.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "67332"
},
{
"name": "C++",
"bytes": "7004"
},
{
"name": "Python",
"bytes": "490956"
}
],
"symlink_target": ""
}
|
from gftools import fonts_public_pb2
import enum
import re
class Udhr():
def __init__(self, key, iso639_3, iso15924, bcp47, direction, ohchr, stage, loc, name):
self.key = key
self.iso639_3 = iso639_3
self.iso15924 = iso15924
self.bcp47 = bcp47
self.direction = direction
self.ohchr = ohchr
self.stage = stage
self.loc = loc
self.name = name
self.title = None
self.preamble = None
self.articles = []
def Parse(self, translation_data):
if translation_data is None or self.stage < 2:
return
if translation_data.find('./{*}title') is not None:
self.title = translation_data.find('./{*}title').text
preamble_data = translation_data.find('./{*}preamble')
if preamble_data is not None:
if preamble_data.find('./{*}title') is not None:
self.preamble = {
'title':
preamble_data.find('./{*}title').text,
'content': [
para.text for para in preamble_data.findall('./{*}para')
],
}
articles_data = translation_data.findall('./{*}article')
for article_data in articles_data:
title_data = article_data.find('./{*}title')
article = {
'id':
int(article_data.get('number')),
'title': None if title_data is None else title_data.text,
'content': [
para.text for para in article_data.findall('./{*}para')
],
}
self.articles.append(article)
def LoadArticleOne(self, article_one):
self.articles.append({'id': 0, 'title': None, 'content': [article_one]})
def GetSampleTexts(self):
extractor = SampleTextExtractor(self)
return extractor.GetSampleTexts()
class SampleTextExtractor():
class TextType(enum.Enum):
GLYPHS = 1
WORD = 2
PHRASE = 3
SENTENCE = 4
PARAGRAPH = 5
PASSAGE = 6
def __init__(self, udhr):
self._udhr = udhr
self._glyphs = iter(self._GetGlyphs())
self._words = iter(self._GetWords())
self._paragraphs = iter(self._GetParagraphs())
self._phrase_history = set()
self._non_word_regex = re.compile(r'[^\w]+')
self._space_regex = re.compile(r'\s+')
self._non_space_regex = re.compile(r'[^\s]+')
self._non_word_space_regex = re.compile(r'[^\w\s]+')
self._any_regex = re.compile(r'.')
def _DisplayLength(self, s):
"""Returns length of given string. Omits combining characters.
Some entire scripts will not be counted; in those cases, the raw length of
the string is returned.
"""
word_space_length = len(self._non_word_space_regex.sub('', s))
space_length = len(self._non_space_regex.sub('', s))
if word_space_length == space_length:
return len(s)
return word_space_length
def _GetGlyphs(self):
seen = set()
for article in self._udhr.articles:
for para in article['content']:
for ch in (self._non_word_regex.sub('', para) or self._space_regex.sub('', para)):
ch = ch.lower()
if ch not in seen:
seen.add(ch)
yield ch
def _GetWords(self):
if self._space_regex.search(self._udhr.articles[0]['content'][0]) is not None:
splitter = self._space_regex
else:
splitter = self._non_word_regex
seen = set()
for article in self._udhr.articles:
for para in article['content']:
for s in splitter.split(para):
if s not in seen:
seen.add(s)
yield s
def _GetParagraphs(self):
if self._udhr.preamble is not None:
for para in self._udhr.preamble['content']:
yield para
for article in self._udhr.articles:
for para in article['content']:
yield para
def _ExtractGlyphs(self, min_chars, max_chars):
s = ''
for ch in self._glyphs:
s += ch.upper()
if len(s) >= min_chars:
break
if ch != ch.upper():
s += ch
if len(s) >= min_chars:
break
return s
def _ExtractWord(self, min_chars, max_chars):
for iterator in [self._words, self._GetWords()]:
for w in iterator:
if w is None:
continue
if min_chars <= self._DisplayLength(w) <= max_chars:
return w
# Fallback to using multiple words for languages with very small words
return self._ExtractPhrase(min_chars, max_chars)
def _ExtractPhrase(self, min_chars, max_chars):
for iterator in [self._paragraphs, self._GetParagraphs()]:
for para in iterator:
if para is None:
continue
for regex in [self._any_regex, self._space_regex, self._non_word_regex]:
breaks = [-1]
for match in regex.finditer(para, min_chars):
breaks.append(match.start())
phrase = para[breaks[0]+1:breaks[len(breaks)-1]]
p_size = self._DisplayLength(phrase)
while p_size > max_chars and len(breaks) > 1:
breaks.pop()
phrase = para[breaks[0]+1:breaks[len(breaks)-1]]
p_size = self._DisplayLength(phrase)
if min_chars <= p_size and phrase not in self._phrase_history:
self._phrase_history.add(phrase)
return phrase
return self._ExtractParagraph(min_chars, max_chars)
def _ExtractSentence(self, min_chars, max_chars):
# Sentence delimination may differ between scripts, so tokenizing on spaces
# would be unreliable. Prefer to use _ExtractPhrase.
return self._ExtractPhrase(min_chars, max_chars)
def _ExtractParagraph(self, min_chars, max_chars):
for iterator in [self._paragraphs, self._GetParagraphs()]:
for para in iterator:
if para is None:
continue
if min_chars <= self._DisplayLength(para) <= max_chars:
return para
# Paragraphs likely insufficient length; try combining into passages
return self._ExtractPassage(min_chars, max_chars)
def _ExtractPassage(self, min_chars, max_chars):
p = []
p_size = 0
while p_size < min_chars:
for iterator in [self._paragraphs, self._GetParagraphs()]:
for para in iterator:
if para is None:
continue
p.append(para)
p_size = self._DisplayLength(' '.join(p))
if max_chars < p_size:
p.pop()
elif min_chars <= p_size:
return '\n'.join(p)
assert len(p) > 0, 'Unable to extract passage: ' + self._udhr.key
if len(p) == 0:
p.append([p for p in self._GetParagraphs()][0])
return '\n'.join(p)
def _Get(self, text_type, **kwargs):
if 'char_count' in kwargs:
min_chars = kwargs['char_count']
max_chars = kwargs['char_count']
else:
min_chars = kwargs['min_chars']
max_chars = kwargs['max_chars']
if text_type == self.TextType.GLYPHS:
return self._ExtractGlyphs(min_chars, max_chars)
if text_type == self.TextType.WORD:
return self._ExtractWord(min_chars, max_chars)
if text_type == self.TextType.PHRASE:
return self._ExtractPhrase(min_chars, max_chars)
if text_type == self.TextType.SENTENCE:
return self._ExtractSentence(min_chars, max_chars)
if text_type == self.TextType.PARAGRAPH:
return self._ExtractParagraph(min_chars, max_chars)
if text_type == self.TextType.PASSAGE:
return self._ExtractPassage(min_chars, max_chars)
raise Exception('Unsupported text type: ' + text_type)
def GetSampleTexts(self):
sample_text = fonts_public_pb2.SampleTextProto()
sample_text.masthead_full = self._Get(self.TextType.GLYPHS, char_count = 4)
sample_text.masthead_partial = self._Get(self.TextType.GLYPHS, char_count = 2)
sample_text.styles = self._Get(self.TextType.PHRASE, min_chars = 40, max_chars = 60)
sample_text.tester = self._Get(self.TextType.PHRASE, min_chars = 60, max_chars = 90)
sample_text.poster_sm = self._Get(self.TextType.PHRASE, min_chars = 10, max_chars = 17)
sample_text.poster_md = self._Get(self.TextType.PHRASE, min_chars = 6, max_chars = 12)
sample_text.poster_lg = self._Get(self.TextType.WORD, min_chars = 3, max_chars = 8)
sample_text.specimen_48 = self._Get(self.TextType.SENTENCE, min_chars = 50, max_chars = 80)
sample_text.specimen_36 = self._Get(self.TextType.PARAGRAPH, min_chars = 100, max_chars = 120)
sample_text.specimen_32 = self._Get(self.TextType.PARAGRAPH, min_chars = 140, max_chars = 180)
sample_text.specimen_21 = self._Get(self.TextType.PASSAGE, min_chars = 300, max_chars = 500)
sample_text.specimen_16 = self._Get(self.TextType.PASSAGE, min_chars = 550, max_chars = 750)
return sample_text
|
{
"content_hash": "d656fcd9c9b8e7e6f678ed3d401c7c08",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 98,
"avg_line_length": 35.9125,
"alnum_prop": 0.6139923424991298,
"repo_name": "googlefonts/gftools",
"id": "991e47396cb1c3d1e512bc8418a79fbe48d4297c",
"size": "8619",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "Lib/gftools/util/udhr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "19620"
},
{
"name": "PureBasic",
"bytes": "432"
},
{
"name": "Python",
"bytes": "663599"
}
],
"symlink_target": ""
}
|
try:
from collections import OrderedDict # NOQA
except ImportError: # Python < 2.7
from ordereddict import OrderedDict # NOQA
# Import simplejson if we have it (Python 2.6), and use json from the
# standard library otherwise.
#
# Note: Python 2.6 does have a JSON library, but it lacks `object_pairs_hook`
# as a keyword argument to `json.loads`, so we still need simplejson on
# Python 2.6.
import sys
if sys.version_info < (2, 7):
import simplejson as json # NOQA
else:
import json # NOQA
|
{
"content_hash": "3009c22661439d6bb374d6459b713bfb",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 77,
"avg_line_length": 30.235294117647058,
"alnum_prop": 0.7101167315175098,
"repo_name": "ansible/tower-cli",
"id": "9e922e429bd689190580fd828c6525764ab943c0",
"size": "1268",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tower_cli/compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2029"
},
{
"name": "Python",
"bytes": "770993"
},
{
"name": "Shell",
"bytes": "1890"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.