prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# TODO arrays of dimension given by sizeof expression
item = ('a', 0)
# Remove one level of indirection for function types (CFUNCTYPE is
# already a pointer)
off = 0
for i, j in enumerate(item):
if type(j) is tuple and j and j[0] == 'f':
item = item[:i+1+off] + item[i+2+off:]
off -= 1
return item
for c in type_string:
if c == '.':
buf = flush()
elif c == '(':
push() # Push param list
buf = flush()
push() # Push item
elif c == ',':
buf = flush()
pop() # Pop item
push() # Push item
elif c == ')':
buf = flush()
pop() # Pop item
pop() # Pop param list
else:
buf += c
flush()
type_tuple = finalize(stack[0])
return type_tuple
class SwigInterfaceHandler(object):
def __init__(self):
self.name = None
self.cdecls = []
self.constants = []
def attribute(self, attrs):
if attrs['name'] == 'name':
self.name = str(attrs['value'])
def typemap(self, attrs):
return IgnoreElementHandler()
def cdecl(self, attrs):
handler = CDeclHandler(attrs)
self.cdecls.append(handler)
return handler
def constant(self, attrs):
handler = ConstantHandler(attrs)
self.constants.append(handler)
return handler
def class_(self, attrs):
handler = ClassHandler(attrs)
self.cdecls.append(handler)
return handler
def classforward(self, attrs):
handler = ClassForwardHandler(attrs)
self.cdecls.append(handler)
return handler
def enum(self, attrs):
handler = EnumHandler(attrs)
self.cdecls.append(handler)
return handler
def get_map(self):
map = {}
for cdecl in self.cdecls:
# ('typedef', type)
if cdecl.kind == 'typedef':
map[cdecl.name] = (cdecl.kind, cdecl.get_type(with_decl=True))
# ('enum', items)
elif cdecl.kind == 'enum':
enum = (cdecl.kind, cdecl.get_items())
map[cdecl.kind + ' ' + cdecl.name] = enum
map[cdecl.get_tdname()] = enum
# ('struct', variables)
# ('union', variables)
elif cdecl.kind in ('struct', 'union'):
class_ = (cdecl.kind, cdecl.get_variables())
map[cdecl.kind + ' ' + cdecl.name] = class_
map[cdecl.get_tdname()] = class_
# ('function', type)
elif cdecl.kind == 'function':
map[cdecl.name] = (cdecl.kind, cdecl.get_type(with_decl=True))
# ('variable', type)
elif cdecl.kind == 'variable':
map[cdecl.name] = (cdecl.kind, cdecl.get_type())
else:
assert False, (cdecl.kind, cdecl.type, cdecl.name)
# Constants: ('constant', value)
for constant in self.constants:
map[constant.name] = ('constant', constant.get_value())
import pprint
pprint.pprint(map)
return map
class IgnoreElementHandler(object):
pass
class ConstantHandler(object):
name = None
value = None
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = | str(attrs['value'])
elif name == 'value':
self.value = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_value(self):
if self.type in ('int', 'long'):
# Yes, ugly and bad -- most C int constan | ts can also be
# parsed as Python expressions; e.g. "1L << 8".
return int(eval(self.value))
return self.value
class EnumHandler(object):
name = None
tdname = None
kind = 'enum'
unnamed = False
def __init__(self, attrs):
self.items = []
def attribute(self, attrs):
name = attrs['name']
if name == 'name' and not self.unnamed:
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
self.unnamed = True
elif name == 'tdname':
self.tdname = str(attrs['value'])
def enumitem(self, attrs):
handler = EnumItemHandler(attrs)
self.items.append(handler)
return handler
def get_items(self):
items = []
index = 0
for item in self.items:
try:
# TODO parse enumvalueex properly
index = int(item.value)
except ValueError:
index += 1
items.append((item.name, index))
return tuple(items)
def get_tdname(self):
if self.tdname:
return self.tdname
else:
return self.name
class EnumItemHandler(object):
name = None
value = None
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
elif name == 'enumvalueex':
self.value = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_value(self):
if self.type in ('int', 'long'):
# Yes, ugly and bad -- most C int constants can also be
# parsed as Python expressions; e.g. "1L << 8".
return int(eval(self.value))
return self.value
class CDeclHandler(object):
name = None
kind = None
type = None
decl = ''
params = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'kind':
self.kind = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
elif name == 'decl':
self.decl = str(attrs['value'])
def parmlist(self, attrs):
self.params = []
handler = ParmListHandler(attrs, self.params)
return handler
def get_params(self):
# (type, ...)
if self.params is None:
return None
return tuple(p.get_type() for p in self.params)
def get_type(self, with_decl=False):
if with_decl:
return parse_type(self.decl + self.type)
else:
return parse_type(self.type)
def __str__(self):
if self.params:
return self.name + \
'(' + ', '.join(map(str, self.params)) + ') : ' + self.type
else:
return self.name + ' : ' + self.type
class ParmListHandler(object):
def __init__(self, attrs, params):
self.params = params
def parm(self, attrs):
param = ParmHandler(attrs)
self.params.append(param)
return param
class ParmHandler(object):
name = ''
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_type(self):
return parse_type(self.type)
def __str__(self):
return self.name + ' : ' + self.type
class ClassHandler(object):
name = ''
kind = None
tdname = None
unnamed = False
def __init__(self, attrs):
self.cdecls = []
def attribute(self, attrs):
name = attrs['name']
if name == 'name' and not self.unnamed:
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
self.unnamed = True
elif |
""" Specify the NetworkNode with its action, context-menus """
# Copyright (C) 2009-2010, Ecole Polytechnique Federale de Lausanne (EPFL) and
# University Hospital Center and University of Lausanne (UNIL-CHUV)
#
# Modified BSD License
# Standard library imports
import os
# Enthought library imports
from traits.api import Instance, Str, Any
from traitsui.api import TreeNode
from traitsui.menu import Menu, Action, Separator
# ConnectomeViewer imports
from cviewer.plugins.cff2.cnetwork import CNetwork
# Logging import
import logging
logger = loggin | g.getLogger('root.'+__name__)
class CNetworkTreeNode(TreeNode):
# The object that contains the container ;^)
parent = Any
# the network associated with this node
node_for=[CNetwork]
# a default icons
# Name of group item icon
icon_group = Str('home.png')
# Name of leaf item icon
icon_item=Str('home.png')
# Name of opened group item icon
icon_open=Str('home.png')
# labels
label='dname'
###
# Priva | te Traits
# activate / deactivate logic
# if the node is activated, this means that there exists a
# corresponding RenderManager instance
_ShowName = Instance(Action,
kw={'name': 'Show name',
'action': 'object.show_name',
'tooltip': 'Shows the network name'}, )
_ChangeParameters = Instance(Action,
kw={'name': 'Edge Parameters',
'action': 'object._edge_parameters',
'tooltip': 'Thresholding and Change Attributes',
'enabled_when' : 'object.loaded == True'}, )
_RenderMatrixAction = Instance(Action,
kw={'name': 'Connectome Matrix Viewer',
'action': 'object.invoke_matrix_viewer',
'tooltip':'View the connectivity matrices',
'enabled_when':'object.loaded == True'}, )
# the menu shown after right-click
menu = Instance(Menu, transient=True)
def get_children(self, object):
""" Get the object's children. """
pass
# Collate the window's views into categories.
#return object.surfaces + object.volumes + object.tracks
######################################################################
# Non-public interface
######################################################################
def _menu_default(self):
""" Standard menus for network nodes """
menu_actions = []
return Menu( *menu_actions)
|
_eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, op, other, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
# tzawareness failure
dti != other
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_dt64arr_iadd_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2 | 000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
def test_dt64arr_isub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_n | aive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64(" |
if self.candidate is not None:
return True
if self.running is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.services._meta import _ietf_netconf as meta
return meta._meta_table['EditConfigRpc.Input.Target']['meta_info']
@property
def _common_path(self):
return '/ietf-netconf:edit-config/ietf-netconf:input'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
if self.parent is None:
raise YPYError(error_msg='Parent reference is needed to determine if entity has configuration data')
return self.parent.is_config()
def _has_data(self):
if not self.is_config():
return False
if self.config is not None:
return True
if self.default_operation is not None:
return True
if self.error_option is not None:
return True
if self.target is not None and self.target._has_data():
return True
if self.test_option is not None:
return True
if self.url is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.services._meta import _ietf_netconf as meta
return meta._meta_table['EditConfigRpc.Input']['meta_info']
@property
def _common_path(self):
return '/ietf-netconf:edit-config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.input is not None and self.input._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.services._meta import _ietf_netconf as meta
return meta._meta_table['EditConfigRpc']['meta_info']
class CopyConfigRpc(object):
"""
Create or replace an entire configuration datastore with the
contents of another complete configuration datastore.
.. attribute:: input
**type**\: :py:class:`Input <ietf_netconf.CopyConfigRpc.Input>`
"""
_prefix = 'nc'
_revision = '2011-06-01'
def __init__(self):
self.input = CopyConfigRpc.Input()
self.input.parent = self
self.is_rpc = True
class Input(object):
"""
.. attribute:: source
Particular configuration to copy from
**type**\: :py:class:`Source <ietf_netconf.CopyConfigRpc.Input. | Source>`
.. attribute:: target
Particular configuration to copy to
**type**\: :py:class:`Target <ietf_netconf.CopyConfigRpc.Input.Target>`
.. attribute:: with_defaults
|
The explicit defaults processing mode requested
**type**\: :py:class:`WithDefaultsModeEnum <ietf_netconf_with_defaults.WithDefaultsModeEnum>`
"""
_prefix = 'nc'
_revision = '2011-06-01'
def __init__(self):
self.parent = None
self.source = CopyConfigRpc.Input.Source()
self.source.parent = self
self.target = CopyConfigRpc.Input.Target()
self.target.parent = self
self.with_defaults = None
class Target(object):
"""
Particular configuration to copy to.
.. attribute:: candidate
The candidate configuration is the config target
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: running
The running configuration is the config target. This is optional\-to\-implement on the server
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: startup
The startup configuration is the config target
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: url
The URL\-based configuration is the config target
**type**\: str
"""
_prefix = 'nc'
_revision = '2011-06-01'
def __init__(self):
self.parent = None
self.candidate = None
self.running = None
self.startup = None
self.url = None
@property
def _common_path(self):
return '/ietf-netconf:copy-config/ietf-netconf:input/ietf-netconf:target'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
if self.parent is None:
raise YPYError(error_msg='Parent reference is needed to determine if entity has configuration data')
return self.parent.is_config()
def _has_data(self):
if not self.is_config():
return False
if self.candidate is not None:
return True
if self.running is not None:
return True
if self.startup is not None:
return True
if self.url is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.services._meta import _ietf_netconf as meta
return meta._meta_table['CopyConfigRpc.Input.Target']['meta_info']
class Source(object):
"""
Particular configuration to copy from.
.. attribute:: candidate
The candidate configuration is the config source
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: config
Inline Config content\: <config> element. Represents an entire configuration datastore, not a subset of the running datastore
**type**\: anyxml
.. attribute:: running
The running configuration is the config source
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: startup
The startup configuration is the config source
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: url
The URL\-based configuration is the config source
**type**\: str
"""
_prefix = 'nc'
_revision = '2011-06-01'
def __init__(self):
self.parent = None
self.candidate = None
self.config = None
self.running = None
self.startup = None
self.url = None
@property
def _common_path(self):
return '/ietf-netconf:copy-config/ietf-netconf:input/ietf-netconf:source'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
if self.parent is None:
raise YPYError(error_msg='Parent reference is needed to determine if entity has configuration data')
return self.parent.is_config()
def _has_data(self):
if not self.is_config():
return False
if self.candidate is not None:
return True
if self.config is not None:
return True
if self.running is not None:
return True
if self.startup is not No |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import division,print_function,absolute_import,unicode_literals
import sys
import os
os.chdir(sys.path[0])
sys.path.append('/mnt/sda2/github/TSF1KEV/TSFpy')
from TSF_io import *
#from TSF_Forth import *
from TSF_shuffle import *
from TSF_match import *
from TSF_calc import *
from TSF_time import *
TSF_Forth_init(TSF_io_argvs(),[TSF_shuffle_Initwords,TSF_match_Initwords,TSF_calc_Initwords,TSF_time_Initwords])
TSF_Forth_setTSF("TSF_T | ab-Separated-Forth:",
" | \t".join(["UTF-8","#TSF_encoding","replace:","#TSF_this","help:","#TSF_echothe","0","#TSF_fin."]),
TSF_style="T")
TSF_Forth_setTSF("help:",
"\t".join(["usage: ./TSF.py [command|file.tsf] [argv] ...",
"commands:",
" --help this commands view",
" --about about TSF UTF-8 text (Japanese) view\" ",
" --python TSF.tsf to Python.py view or save\" ",
" --helloworld \"Hello world 1 #TSF_echoN\" sample",
" --quine TSF_Forth_viewthey() Quine (self source) sample",
" --99beer 99 Bottles of Beer sample",
" --fizzbuzz ([0]#3Z1~0)+([0]#5Z2~0) Fizz Buzz Fizz&Buzz sample",
" --zundoko Zun Zun Zun Zun Doko VeronCho sample",
" --fibonacci Fibonacci number 0,1,1,2,3,5,8,13,21,55... sample",
" --prime prime numbers 2,3,5,7,11,13,17,19,23,29... sample",
" --calcFX fractions calculator \"1/3-m1|2\"-> p5|6 sample",
" --calcDC fractions calculator \"1/3-m1|2\"-> 0.8333... sample",
" --calcKN fractions calculator \"1/3-m1|2\"-> 6 bunno 5 sample",
" --calender \"@000y@0m@0dm@wdec@0h@0n@0s\"-> TSF_time_getdaytime() sample"]),
TSF_style="N")
TSF_Forth_setTSF("replace:",
"\t".join(["replaceN:","#TSF_carbonthe","#TSF_calender","replaceN:","0","#TSF_pokethe","help:","replaceO:","replaceN:","#TSF_replacestacks"]),
TSF_style="T")
TSF_Forth_setTSF("replaceO:",
"\t".join(["TSF_time_getdaytime()"]),
TSF_style="N")
TSF_Forth_setTSF("replaceN:",
"\t".join(["@000y@0m@0dm@wdec@0h@0n@0s"]),
TSF_style="N")
TSF_Forth_addfin(TSF_io_argvs())
TSF_Forth_argvsleftcut(TSF_io_argvs(),1)
TSF_Forth_run()
|
#!/usr/bin/python
# Import modules for CGI handling
import cgi
from IPy import parseAddress, IP
import re
from subprocess import Popen, PIPE
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
ip_address = form.getvalue('ip_address')
if form.getvalue('fqdn') == None:
fqdn = 'blank'
else:
fqdn = form.getvalue('fqdn')
action = form.getvalue('action')
password = form.getvalue('password')
#ip_address = '10.0.0.1'
#fqdn = 'test21'
def is_valid_ip(ip_address):
try:
ip = parseAddress(ip_address)
except Exception:
return False
if not ip:
return False
else:
return ip[0]
def get_in_addr(ip_address):
return IP(ip_address).reverseName()
def is_valid_fqdn(fqdn):
try:
if len(fqdn) > 255:
return False
except:
return False
if fqdn[-1] == ".":
fqdn = fqdn[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in fqdn.split("."))
def is_valid_action(action):
if action == 'add' or action == 'delete':
return True
else:
return False
def is_valid_password(password):
if password == 'Gt500gelf':
return True
else:
return False
def is_form_valid(ip_address, fqdn, action, password):
valid_fqdn = is_valid_fqdn(fqdn)
valid_ip = is_valid_ip(ip_address)
valid_action = is_valid_action(action)
valid_password = is_valid_password(password)
if valid_fqdn and valid_ip and valid_action and valid_password:
return True
else:
return False
def get_existing_ptr_record(ip_address):
proc_exist = Popen([r'c:\windows\system32\nslookup.exe', ip_address, 'dc1.presidiolab.local'], stdout=PIPE)
for i in proc_exist.stdout:
reg_match = re.match("Name:\s+(\S+)", str(i), re.IGNORECASE)
try:
return reg_match.group(1)
except:
pass
def add_a_ptr_record(fqdn, ip_address):
proc_a_ptr = Popen(['c:\windows\system32\dnscmd.exe', 'dc1.presidiolab.local', '/RecordAdd', 'presidiolab.local', fqdn.partition('.')[0].rstrip(), '/CreatePTR', 'A', ip_address], shell=True, stdout=PIPE)
return proc_a_ptr.stdout
def delete_ptr_record(ip_address):
in_addr = get_in_addr(ip_address)
proc_ptr = Popen(['c:\windows\system32\dnscmd.exe', 'dc1.presidiolab.local', '/RecordDelete', in_addr.split('.',1)[1], in_addr.split('.',1)[0], 'PTR', '/f'], shell=True, stdout=PIPE)
return proc_ptr.stdout
def delete_a_record(fqdn, ip_address):
proc_a = Popen(['c:\windows\system32\dnscmd.exe', 'dc1.presidiolab.local', '/RecordDelete', 'presidiolab.local', fqdn.partition('.')[0], 'A', ip_address, '/f'], shell=True, stdout=PIPE)
return proc_a.stdout
def print_blank_html_form():
print_html_header()
def print_html_header():
print 'Content-type:text/html\r\n\r\n'
print '<html>'
print '<head>'
print ' <title>DNS Updater</title>'
print '</head>'
def print_html_form(valid_form, del_ptr_output, delete_a_output, add_a_ptr_output):
print ' <body>'
print '<samp>'
print '<table align=left border=0 cellpadding=1 cellspacing=1 style=width:100%'
print ' <table align=center border=0 cellpadding=1 cellspacing=1 style=width:100%>'
print ' <tbody>'
print ' <form action=./web-update-dns.py method=post>'
print ' <tr>'
print ' <td align="center">Hostname: <input placeholder=hostname type=text name=fqdn></td>'
print ' </tr>'
print ' <tr>'
print ' <td align="center">IP Address: <input placeholder=10.0.0.1 type=text name=ip_address></td>'
print ' </tr>'
print ' <tr>'
print ' <td align="center">Update Password: <input type=password name=password></td>'
print ' </tr>'
print ' <tr>'
print ' <td align="center" width="500"><input type=radio name=action value=delete> Delete <input type=radio name=action value=add /> Update</td>'
print ' </tr>'
print ' <td align="center">'
if valid_form:
print 'DNS Record Updated Successfully!</td></tr>'
else:
print 'Please enter a valid IP address, hostname, action, and update password.</td></tr>'
print ' <tr>'
print ' <td align="center"><input type=submit value=Submit></td>'
print ' </tr></table><br>'
print ' <table align=left border=0 cellpadding=1 cellspacing=1 style=width:100%><tr><td align="center" width="1000">'
try:
for i in del_ptr_output:
print i
except:
pass
print ' </td></tr>'
print ' <tr><td align="center" width="1000">'
try:
for i in delete_a_output:
print i
except:
pass
print ' </td></tr>'
print ' <tr><td align="center" width="1000">'
try:
for i in add_a_ptr_output:
print i
except:
pass
print ' </td></tr>'
print ' </form>'
print ' </tbody>'
print ' </table>'
print '</table>'
print '</samp>'
def print_html_footer():
print ' </body>'
print '</html>'
valid_form = is_form_valid(ip_address, fqdn, action, password)
del_ptr_output = []
delete_a_output = []
add_a_ptr_output = []
if valid_form:
if action == 'add':
cont = 1
while cont == 1:
existing_record = get_existing_ptr_record(ip_address)
if existing_record:
delete_a_output = delete_a_record(existing_record, ip_address)
del_ptr_output = delete_ptr_record(ip_address)
del exi | sting_record
#cont = 0
else:
if fqdn == 'blank':
pass
cont = 0
else:
add_a_ptr_output = add_a_ptr_record(fqdn, ip_address)
cont = 0
elif action == 'delete':
cont = 1
while cont == 1:
| existing_record = get_existing_ptr_record(ip_address)
if existing_record:
delete_a_output = delete_a_record(existing_record, ip_address)
del_ptr_output = delete_ptr_record(ip_address)
del existing_record
#cont = 0
else:
cont = 0
pass
print_html_header()
print_html_form(valid_form, del_ptr_output, delete_a_output, add_a_ptr_output)
print_html_footer()
else:
del_ptr_output = []
delete_a_output = []
add_a_ptr_output = []
print_html_header()
print_html_form(valid_form, del_ptr_output, delete_a_output, add_a_ptr_output)
print_html_footer()
|
from datetime import datetime
import mongoengine
import elasticsearch
from elasticsearch import helpers
from models.employer import Employer
import models.term as Term
import shared.secrets as secrets
import shared.logger as logger
COMPONENT = 'Search'
elastic_instance = elasticsearch.Elasticsearch()
def index_employer_waterlooworks(employer):
employer_document = {
"_index": "waterlooworks",
"_type": "employers",
"_id": employer.name,
"_source": {
"employer_name": employer.name,
"employer_jobs": [str(job.id) for job in employer.jobs]
}
}
elastic_instance.index('waterlooworks', 'employers', employer_document, id=employer.name)
def update_employer_waterlooworks(employer):
employer_document = {
"doc": {
"employer_name": employer.name,
"employer_jobs": [str(job.id) for job in employer.jobs]
}
}
elastic_instance.update('waterlooworks', 'employers', employer.name, body=employer_document)
def delete_employer_waterlooworks(employer):
elastic_instance.delete('waterlooworks', 'employers', employer.name, ignore=[404])
def index_job_waterlooworks(employer, job):
job_document = {
"_index": "waterlooworks",
"_type": "jobs",
"_parent": employer.name,
"_id": str(job.id),
"_source": {
"employer_name": employer.name,
"job_title": job.title,
"job_year": job.year,
"job_term": job.term,
"job_summary": job.summary,
"job_ke | ywords": [k.keyword for k in job.keywords],
"job_locations": [location.name for location in job.location],
"job_programs": job.programs,
"job_levels": job.levels
}
}
elastic_ | instance.index('waterlooworks', 'jobs', job_document, id=str(job.id), parent=employer.name)
def update_job_waterlooworks(employer, job):
job_document = {
"doc": {
"employer_name": employer.name,
"job_title": job.title,
"job_year": job.year,
"job_term": job.term,
"job_summary": job.summary,
"job_keywords": [k.keyword for k in job.keywords],
"job_locations": [location.name for location in job.location],
"job_programs": job.programs,
"job_levels": job.levels
}
}
elastic_instance.update('waterlooworks', 'jobs', str(job.id), body=job_document, parent=employer.name)
def delete_job_waterlooworks(employer, job):
elastic_instance.delete('waterlooworks', 'job', str(job.id), parent=employer.name, ignore=[404])
def index_waterlooworks():
logger.info(COMPONENT, 'Indexing waterlooworks data')
elastic_instance.indices.delete(index='waterlooworks', ignore=[404])
elastic_instance.indices.create('waterlooworks', body={
"mappings": {
"employers": {
"properties": {
"employer_name": {"type": "string"},
"employer_jobs": {"type": "string"}
}
},
"jobs": {
"_parent": {
"type": "employers"
},
"properties": {
"job_title": {"type": "string"},
"job_year": {"type": "integer"},
"job_term": {"type": "string"},
"job_summary": {"type": "string"},
"job_locations": {"type": "string"},
"job_programs": {"type": "string"},
"job_levels": {"type": "string"}
}
}
}
})
logger.info(COMPONENT, 'Indexing waterlooworks employers and jobs')
employers = []
jobs = []
for employer in Employer.objects.only('name', 'jobs'):
logger.info(COMPONENT, 'Indexing employer: {}'.format(employer.name))
employer_document = {
"_index": "waterlooworks",
"_type": "employers",
"_id": employer.name,
"_source": {
"employer_name": employer.name,
"employer_jobs": [str(job.id) for job in employer.jobs]
}
}
employers.append(employer_document)
for job in employer.jobs:
if not job.deprecated:
logger.info(COMPONENT, 'Indexing job: {} for employer: {}'.format(job.title, employer.name))
job_document = {
"_index": "waterlooworks",
"_type": "jobs",
"_parent": employer.name,
"_id": str(job.id),
"_source": {
"employer_name": employer.name,
"job_title": job.title,
"job_year": job.year,
"job_term": job.term,
"job_summary": job.summary,
"job_keywords": [k.keyword for k in job.keywords],
"job_locations": [location.name for location in job.location],
"job_programs": job.programs,
"job_levels": job.levels
}
}
jobs.append(job_document)
if len(jobs) == 1000:
helpers.bulk(elastic_instance, jobs)
jobs = []
if len(employers) == 1000:
helpers.bulk(elastic_instance, employers)
employers = []
if len(employers) > 0:
helpers.bulk(elastic_instance, employers)
if len(jobs) > 0:
helpers.bulk(elastic_instance, jobs)
def query_jobs_and_employers(query, page):
start_page = 10 * (int(page) - 1)
now = datetime.now()
response = elastic_instance.search(index='waterlooworks', doc_type=['jobs'], body={
"from": start_page, "size": 10,
"sort": [
{"job_year": "desc"},
"_score"
],
"query": {
"bool": {
"should": [
{
"match": {
"job_term": Term.get_term(now.month)
}
}
],
"must": {
"multi_match": {
"query": query,
"type": "cross_fields",
"fields": ["employer_name^4", "job_title^4", "job_term"]
}
}
}
}
})
return response
def query_jobs(query, page):
start_page = 10 * (int(page) - 1)
now = datetime.now()
body = {
"from": start_page, "size": 10,
"sort": [
{"job_year": "desc"},
"_score"
],
"query": {
"bool": {
"should": [
{
"match": {
"job_term": Term.get_term(now.month)
}
}
]
}
}
}
if query:
body['query']['bool']['must'] = {
"multi_match": {
"query": query,
"type": "cross_fields",
"fields": ["job_title^4", "job_keywords^4", "job_summary^3", "job_term"]
}
}
response = elastic_instance.search(index='waterlooworks', doc_type=['jobs'], body=body)
return response
if __name__ == "__main__":
mongoengine.connect(secrets.MONGO_DATABASE, host=secrets.MONGO_HOST, port=secrets.MONGO_PORT)
index_waterlooworks()
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you und | er the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless req | uired by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""fix description field in connection to be text
Revision ID: 64a7d6477aae
Revises: f5b5ec089444
Create Date: 2020-11-25 08:56:11.866607
"""
import sqlalchemy as sa # noqa
from alembic import op # noqa
# revision identifiers, used by Alembic.
revision = '64a7d6477aae'
down_revision = '61ec73d9401f'
branch_labels = None
depends_on = None
def upgrade():
"""Apply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.String(length=5000),
type_=sa.Text(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column('connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text())
def downgrade():
"""Unapply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.Text(5000),
type_=sa.String(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column(
'connection',
'description',
existing_type=sa.Text(),
type_=sa.String(length=5000),
existing_nullable=True,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CON | DITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------- | --------------------
"""\
========================
websiteErrorPage
========================
A simple HTTP request handler for HTTPServer.
websiteErrorPage generates basic HTML error pages for an HTTP server.
"""
from Axon.Component import component
def getErrorPage(errorcode, msg = ""):
if errorcode == 400:
return {
"statuscode" : "400",
"data" : u"<html>\n<title>400 Bad Request</title>\n<body style='background-color: black; color: white;'>\n<h2>400 Bad Request</h2>\n<p>" + msg + "</p></body>\n</html>\n\n",
"type" : "text/html",
}
elif errorcode == 404:
return {
"statuscode" : "404",
"data" : u"<html>\n<title>404 Not Found</title>\n<body style='background-color: black; color: white;'>\n<h2>404 Not Found</h2>\n<p>" + msg + u"</p></body>\n</html>\n\n",
"type" : "text/html"
}
elif errorcode == 500:
return {
"statuscode" : "500",
"data" : u"<html>\n<title>500 Internal Server Error</title>\n<body style='background-color: black; color: white;'>\n<h2>500 Internal Server Error</h2>\n<p>" + msg + u"</p></body>\n</html>\n\n",
"type" : "text/html"
}
elif errorcode == 501:
return {
"statuscode" : "501",
"data" : u"<html>\n<title>501 Not Implemented</title>\n<body style='background-color: black; color: white;'>\n<h2>501 Not Implemented</h2>\n<p>" + msg + u"</p></body>\n</html>\n\n",
"type" : "text/html"
}
else:
return {
"statuscode" : str(errorcode),
"data" : u"",
"type" : "text/html"
}
class websiteErrorPage(component):
def __init__(self, errorcode, msg = ""):
super(websiteErrorPage, self).__init__()
self.errorcode = errorcode
self.msg = msg
def main(self):
resource = getErrorPage(self.errorcode, self.msg) # get the error page
resource["incomplete"] = False # mark its data as being complete (i.e. no more chunks to come)
self.send(resource, "outbox") # send it on to HTTPRequestHandler
self.send(producerFinished(self), "signal") # and signal that this component has terminated
__kamaelia_components__ = ( websiteErrorPage, )
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration | ):
dependencies = [
('devices', '0012_auto_20140925_1540'),
]
operations = [
migrations.AlterField(
model_name='ap',
name='notes',
field=models.TextField(default=b'', verbose_name=b'Notes', blank=True),
),
migrations.AlterField(
model_name='phone',
name='notes',
field=models.TextField(def | ault=b'', verbose_name=b'Notes', blank=True),
),
migrations.AlterField(
model_name='switch',
name='notes',
field=models.TextField(default=b'', verbose_name=b'Notes', blank=True),
),
migrations.AlterField(
model_name='ups',
name='notes',
field=models.TextField(default=b'', verbose_name=b'Notes', blank=True),
),
]
|
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if not value.isdigit():
raise ConfigurationError("The value of %s must be "
"an integer" % (option,))
return int(value)
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
def validate_positive_integer(option, value):
"""Validate that 'value' is a positive integer.
"""
val = validate_integer(option, value)
if val < 0:
raise ConfigurationError("The value of %s must be "
"a positive integer" % (option,))
return val
def validate_readable(option, value):
"""Validates that 'value' is file-like and readable.
"""
# First make sure its a string py3.3 open(True, 'r') succeeds
# Used in ssl cert checking due to poor ssl module error reporting
value = validate_basestring(option, value)
open(value, 'r').close()
return value
def validate_cert_reqs(option, value):
"""Validate the cert reqs are valid. It must be None or one of the three
values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or ``ssl.CERT_REQUIRED``"""
if value is None:
return value
if HAS_SSL:
if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED):
return value
raise ConfigurationError("The value of %s must be one of: "
"`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or "
"`ssl.CERT_REQUIRED" % (option,))
else:
raise ConfigurationError("The value of %s is set but can't be "
"validated. The ssl module is not available"
% (option,))
def validate_positive_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or None.
"""
if value is None:
return value
return validate_positive_integer(option, value)
def validate_basestring(option, value):
"""Validates that 'value' is an instance of `basestring`.
"""
if isinstance(value, basestring):
return value
raise TypeError("Wrong type for %s, value must be an "
"instance of %s" % (option, basestring.__name__))
def validate_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if value.isdigit():
return int(value)
return value
raise TypeError("Wrong type for %s, value must be an "
"integer or a string" % (option,))
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
err = ConfigurationError("%s must be a positive int or float" % (option,))
try:
value = float(value)
except (ValueError, TypeError):
raise err
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise err
return value
def validate_timeout_or_none(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds.
"""
if value is None:
return value
return validate_positive_float(option, value) / 1000.0
def validate_read_preference(dummy, value):
"""Validate read preference for a ReplicaSetConnection.
"""
if value in read_preferences.modes:
return value
# Also allow string form of enum for uri_parser
try:
return read_preferences.mongos_enum(value)
except ValueError:
raise ConfigurationError("Not a valid read preference")
def validate_tag_sets(dummy, value):
"""Validate tag sets for a ReplicaSetConnection.
"""
if value is None:
return [{}]
if not isinstance(value, list):
raise ConfigurationError((
"Tag sets %s invalid, must be a list" ) % repr(value))
if len(value) == 0:
raise ConfigurationError((
"Tag sets %s invalid, must be None or contain at least one set of"
" tags") % repr(value))
for tags in value:
if not isinstance(tags, dict):
raise ConfigurationError(
"Tag set %s invalid, must be a dict" % repr(tags))
return value
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
if value not in MECHANISMS:
raise ConfigurationError("%s must be in "
"%s" % (option, MECHANISMS))
return value
# jounal is an alias for j,
# wtimeoutms is an alias for wtimeout
VALIDATORS = {
'replicaset': validate_basestring,
'slaveok': validate_boolean,
'slave_okay': validate_boolean,
'safe': validate_boolean,
'w': validate_int_or_basestring,
'wtimeout': validate_integer,
'wtimeoutms': validate_integer,
'fsync': validate_boolean,
'j': validate_boolean,
'journal': validate_boolean,
'connecttimeoutms': validate_timeout_or_none,
'sockettimeoutms': validate_timeout_or_none,
'waitqueuetimeoutms': validate_timeout_or_none,
'waitqueuemultiple': validate_positive_integer_or_none,
'ssl': validate_boolean,
'ssl_keyfile': validate_readable,
'ssl_certfile': validate_readable,
'ssl_cert_reqs': validate_cert_reqs,
'ssl_ca_certs': validate_readable,
'readpreference': validate_read_preference,
'read_preference': validate_read_preference,
'tag_sets': validate_tag_sets,
'secondaryacceptablelatencyms': validate_positive_float,
'secondary_acceptable_latency_ms': validate_positive_float,
'auto_start_request': validate_boolean,
'use_greenlets': validate_boolean,
'authmechanism': validate_auth_mechanism,
'authsource': validate_basestring,
'gssapiservicename': validate_basestring,
}
_AUTH_OPTIONS = frozenset(['gssapiservicename'])
def validate_auth_option(option, value):
"""Validate optional authentication parameters.
"""
lower, value = validate(option, value)
if lower not in _AUTH_OPTIONS:
raise ConfigurationError('Unknown '
'authentication option: %s' % (option,))
return lower, value
def validate(option, value):
"""Generic validation function.
"""
lower = option.lower()
validator = VALIDATORS.get(lower, raise_config_error)
value = validator(option, value)
return lower, value
SAFE_OPTIONS = frozenset([
'w',
'wtimeout',
'wtimeoutms',
'fsync',
'j',
'journal'
])
class WriteConcern(dict):
def __init__(self, *args, **kwargs):
"""A subclass of dict that overrides __setitem__ to
validate write concern options.
"""
super(WriteConcern, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key not in SAFE_OPTIONS:
raise ConfigurationError("%s is not a valid write "
"concern option." % (key,))
key, value = validate(key, value)
super(WriteConcern, self).__setitem__(key, value)
class BaseObject(object):
"""A base class that provides attributes and methods common
to multiple pymongo classes.
SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO 10GEN
"""
def __init__(self, **options):
self.__slave_okay = False
self.__read_pref = ReadPreference.PRIMARY
self.__tag_sets = [{}]
self.__secondary_acceptable_laten | cy_ms = 15
self.__safe = None
self.__write_concern = WriteConcern()
self.__set_options(options)
if (self.__re | ad_pref == ReadPreference.PRIMARY
and self.__tag_sets != [{}]
):
raise ConfigurationError(
"ReadPreference PRIMARY cannot be combined with tags")
# If safe hasn't been implicitly set by write concerns then set it.
if self.__safe is None:
if options.get("w") == 0:
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed | by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting tools/style_variable_generator/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
WHITELIST = [r'.+_test.py$']
def CheckChangeOnUpload(input_api, output_ | api):
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', whitelist=WHITELIST)
def CheckChangeOnCommit(input_api, output_api):
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', whitelist=WHITELIST)
|
# -*- coding: utf-8 -*-
"""
Image processing and feature extraction functions.
"""
import cv2
import numpy as np
def pad_image(im, width, height, border=255):
"""pad char image in a larger image"""
xoff = abs(int((im.shape[1] - width) / 2))
yoff = abs(int((im.shape[0] - height) / 2))
if width >= im.shape[1]:
x_min_old = 0
x_max_old = im.shape[1]
x_min_new = xoff
x_max_new = im.shape[1] + xoff
else:
x_min_old = xoff
x_max_old = width + xoff
x_min_new = 0
x_max_new = width
if height >= im.shape[0]:
y_min_old = 0
y_max_old = im.shape[0]
y_min_new = yoff
y_max_new = im.shape[0] + yoff
else:
y_min_old = yoff
y_max_old = height + yoff
y_min_new = 0
y_max_new = height
image_subset = im[y_min_old:y_max_old, x_min_old:x_max_old]
new_bmp = np.ones((height, width, 3), dtype=np.uint8) * border
new_bmp[y_min_new:y_max_new, x_min_new:x_max_new] = image_subset
return new_bmp
def transform_random(image, trans_size, rot_size, scale_size):
"""apply a small random transformation to an image"""
# TODO: make ranges of random numbers input parameters
trans = (np.random.rand(2) - 0.5) * np.array(trans_size)
rot = (np.random.rand(4) - 0.5) * rot_size
scale = 1.0 + scale_size * (np.random.rand(1)[0] - 0.5)
x_size = image.shape[1]
y_size = image.shape[0]
trans_to_center = np.float32(
[[1, 0, -x_size / 2.0],
[0, 1, -y_size / 2.0],
[0, 0, 1]])
trans_from_center = np.float32(
[[1, 0, x_size / 2.0],
[0, 1, y_size / 2.0],
[0, 0, 1]])
trans_random = np.float32(
[[1 + rot[0], 0 + rot[1], trans[0]],
[0 + rot[2], 1 + rot[3], trans[1]],
[0, 0, 1]])
trans_scale = np.identity(3, dtype=np.float32) * scale
tmat = np.dot(trans_from_center, np.dot(trans_scale, np.dot(trans_random, trans_to_center)))[0:2, :]
image_new = cv2.warpAffine(
image, tmat,
(image.shape[1], image.shape[0]),
borderValue=(255, 255, 255))
# cv2.imshow("image", image)
# cv2.imshow("new_image", image_new)
# cv2.waitKey()
return image_new
def filter_cc(image):
"""find connected components in a threshold image and white out
everything except the second largest"""
# TODO: better way to select relevant components
comp_filt = np.copy(image)
gray = 255 - np.array(np.sum(image, axis=2) / 3.0, dtype=np.uint8)
_, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
connectivity = 4
comps = cv2.connectedComponentsWithStats(thresh, connectivity, cv2.CV_32S)
labels = comps[1]
sizes = comps[2][:, cv2.CC_STAT_AREA]
# get index of second-largest component
if len(sizes) > 1:
second_largest_idx = np.argsort(sizes)[-2]
else:
second_largest_idx = np.argsort(sizes)[-1]
# eliminate everything else
for label_idx in range(len(sizes)):
if label_idx != second_largest_idx:
comp_filt[labels == label_idx] = 255
# cv2.imshow("image", image)
# cv2.imshow("gray", gray)
# cv2.imshow("thresh", thresh)
# cv2.imshow("comp_filt", comp_filt)
# cv2.waitKey()
return comp_filt
def align(image, x_align=True, y_align=True):
"""shift an image so the center of mass of the pixels is centered"""
# TODO: this should just operate on grayscale
gray = 255 - np.array(np.sum(image, axis=2) / 3.0, dtype=np.uint8)
if x_align:
x_size = image.shape[1]
x_mean = np.sum(np.sum(gray, axis=0) * np.arange(x_size)) / np.sum(gray)
x_shift = x_size / 2.0 - x_mean
else:
x_shift = 0.0
if y_align:
y_size = image.shape[0]
y_mean = np.sum(np.sum(gray, axis=1) * np.arange(y_size)) / np.sum(gray)
y_shift = y_size / 2.0 - y_mean
e | lse:
y_shift = 0.0
tmat = np.float32(
[[1, 0, x_shift],
[0, 1, y_shift]])
new_image = cv2.warpAffine(
image, tmat, (image.shape[1], image.shape[0]), borderValue=(255, 255, 255))
# cv2.imshow("image", image)
# cv2.imshow("new_image", new_image)
# cv2.waitKey()
return new_image
def grayscale(image):
"""convert RGB ubyte image to grayscale"""
return np.sum(image, axis=2) / 3.0
def downsample(image, scal | e_factor):
"""downsample an image and unravel to create a feature vector"""
feats = cv2.resize(
image,
(int(image.shape[0] * scale_factor),
int(image.shape[1] * scale_factor)))
return feats
def downsample_4(image):
"""create a feature vector from four downsampling amounts"""
return downsample_multi(image, [0.4, 0.2, 0.1, 0.05])
def downsample_multi(image, scales):
"""create a feature vector from arbitrary downsampling amounts"""
return np.hstack([np.ravel(downsample(image, x)) for x in scales])
def max_pool(im):
"""perform 2x2 max pooling"""
return np.max(
np.stack(
(im[0::2, 0::2],
im[0::2, 1::2],
im[1::2, 0::2],
im[1::2, 1::2]),
axis=-1),
axis=-1)
def max_pool_multi(image, ns):
"""perform multiple levels of max pooling and unravel
to create a feature vector"""
# TODO: move this to a higher level
# image_gray = _grayscale(image)
if 1 in ns:
res = [image]
else:
res = []
for n in range(2, max(ns) + 1):
image = max_pool(image)
if n in ns:
res.append(image)
return np.hstack([np.ravel(y) for y in res])
def column_ex(gray):
"""experimental feature - something like the center of mass of
overlapping columns of the image"""
width = 2
# mul_mat = np.arange(y_size)[:, np.newaxis]
# for some reason, it works a lot better to not divide by the sum of the
# whole window but only the first column.
mul_mat = np.linspace(0, 1, gray.shape[0])[:, np.newaxis]
y_agg = np.array([(np.sum(gray[:, idx + width] * mul_mat) /
np.sum(gray[:, idx]))
for idx in range(gray.shape[1] - width)])
y_agg[~np.isfinite(y_agg)] = 0.0
res = np.hstack((y_agg, np.diff(y_agg)))
return res
def extract_pos(pos, im, border=255):
"""extract a position (tuple of start and end) from an image"""
# this is intended to have the correct logic to always return an image
# of the width of the position even if it is off the edge of the image
target_width = pos[1] - pos[0]
extract = im[:, np.maximum(pos[0], 0):pos[1]]
# print(cpos, extract.shape, im.shape)
if extract.shape[1] < target_width:
res = np.ones((im.shape[0], target_width, 3), dtype=np.ubyte) * border
if pos[0] < 0:
pr = (-pos[0], -pos[0] + extract.shape[1])
else:
pr = (0, extract.shape[1])
# print(pr, flush=True)
res[:, pr[0]:pr[1]] = extract
return res
else:
res = extract
return res
|
from dj | ango.contrib import admin
from models.snippets import Contact
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
pas | s
|
nse, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from sqlalchemy import or_
from sqlalchemy.orm import joinedload, contains_eager, subqueryload
from joj.model import Dataset, DatasetType, DrivingDataset, DrivingDatasetParameterValue, Parameter, \
DrivingDatasetLocation
from joj.services.general import DatabaseService
from joj.model.non_database.spatial_extent import SpatialExtent
from joj.model.non_database.temporal_extent import TemporalExtent
from joj.utils import constants
from joj.model.non_database.driving_dataset_jules_params import DrivingDatasetJulesParams
class DatasetService(DatabaseService):
"""Encapsulates operations on Map datasets"""
def get_datasets_for_user(self, user_id, dataset_type=None, dataset_type_id=None):
"""
Returns a list of datasets that the supplied user has access to,
and is of a particular type. This can be specified as either an ID
or a name, depending on which is more convenient
:param user_id: ID of the user to get a list of datasets for
:param dataset_type: String name of the dataset type
:param dataset_type_id: ID of the dataset type
"""
with self.readonly_scope() as session:
# Find all datasets that are viewable by this user (private)
# or are public (null viewable_by)
# Note SQLAlchemy wants '== None' not 'is None'
if dataset_type_id is None and dataset_type is None:
return session.query(DatasetType).join(DatasetType.datasets) \
| .options(contains_eager(DatasetType.datasets)) \
.filter(or_(Dataset.viewable_by_user_id == user_id,
Dataset.viewable_by_user_id == None), Dataset.deleted == False).all()
elif dataset_type_id is None:
return session.query(Dataset).join(DatasetType).filter(DatasetType.type == dataset_type,
| or_(Dataset.viewable_by_user_id == user_id,
Dataset.viewable_by_user_id == None),
Dataset.deleted == False).all()
else:
return session.query(Dataset).filter(Dataset.dataset_type_id == dataset_type_id,
or_(Dataset.viewable_by_user_id == user_id,
Dataset.viewable_by_user_id == None),
Dataset.deleted == False).all()
def get_dataset_types(self):
"""Returns all of the dataset types in the system"""
with self.readonly_scope() as session:
return session.query(DatasetType).all()
def get_dataset_by_id(self, dataset_id, user_id=None):
""" Returns a single dataset with the given ID
:param dataset_id: ID of the dataset to look for
:param user_id: Optional user ID to match
"""
with self.readonly_scope() as session:
dataset = session.query(Dataset) \
.options(joinedload(Dataset.dataset_type)) \
.filter(Dataset.id == dataset_id,
or_(Dataset.viewable_by_user_id == user_id,
Dataset.viewable_by_user_id == None)).one()
if dataset.data_range_to < dataset.data_range_from:
tmp = dataset.data_range_from
dataset.data_range_from = dataset.data_range_to
dataset.data_range_to = tmp
return dataset
def get_all_datasets(self):
"""
Returns a list of all active datasets in EcoMaps
"""
with self.readonly_scope() as session:
return session.query(Dataset) \
.options(joinedload(Dataset.dataset_type)) \
.filter(Dataset.deleted == False) \
.all()
def create_coverage_dataset(self, name, wms_url, netcdf_url, low_res_url,
data_range_from, data_range_to, is_categorical):
"""
Creates a coverage dataset in the EcoMaps DB
@param name: Display name of the dataset
@param wms_url: Endpoint for the mapping server
@param netcdf_url: URL of the OpenDAP endpoint for this dataset
@param low_res_url: URL for accessing the NetCDF file over the HTTP protocol
@param data_range_from: Low range for the data
@param data_range_to: High range for the data
@param is_categorical: Set to true if the data is categorical (not continuous)
"""
with self.transaction_scope() as session:
dataset_type = session.query(DatasetType).filter(DatasetType.type == 'Coverage').one()
dataset = Dataset()
dataset.name = name
dataset.dataset_type = dataset_type
dataset.netcdf_url = netcdf_url
dataset.wms_url = wms_url
dataset.low_res_url = low_res_url
dataset.data_range_from = data_range_from
dataset.data_range_to = data_range_to
dataset.is_categorical = is_categorical
session.add(dataset)
def create_point_dataset(self, name, wms_url, netcdf_url):
"""
Creates a point dataset in the EcoMaps DB
@param name: Display name of the dataset
@param wms_url: Endpoint for the mapping server
@param netcdf_url: URL of the OpenDAP endpoint for this dataset
"""
with self.transaction_scope() as session:
dataset_type = session.query(DatasetType).filter(DatasetType.type == 'Point').one()
dataset = Dataset()
dataset.name = name
dataset.dataset_type = dataset_type
dataset.netcdf_url = netcdf_url
dataset.wms_url = wms_url
dataset.low_res_url = None
session.add(dataset)
def delete(self, id, user_id):
"""
Soft-deletes a dataset to remove it from active lists
@param id: ID of dataset to delete
@param user_id: ID of the user attempting the delete operation
"""
# First let's make sure the user specified can access the dataset
ds = self.get_dataset_by_id(id, user_id)
if ds:
with self.transaction_scope() as session:
dataset = session.query(Dataset).get(id)
dataset.deleted = True
session.add(dataset)
def update(self, id, data_range_from, data_range_to, is_categorical):
"""
Updates basic properties on the dataset specified
@param id: ID of the dataset to update
@param data_range_from: Low range of data
@param data_range_to: High range of data
@param is_categorical: Set to true for non-continuous data
"""
with self.transaction_scope() as session:
dataset = session.query(Dataset).get(id)
dataset.data_range_from = data_range_from
dataset.data_range_to = data_range_to
dataset.is_categorical = is_categorical
session.add(dataset)
def get_driving_datasets(self, user):
"""
Returns a list of availiable driving datasets
If you are an admin this is all of them, if you are a normal user this is only the published ones
:return: List of driving datasets
"""
with self.readonly_scope() as session:
query = session.query(DrivingDataset)\
.options(joinedload(Dri |
init__(*args, **kwargs)
emails = get_all_user_mails(self.instance)
self.fields['email'].choices = [(x, x) for x in sorted(emails)]
self.fields['username'].valid = self.instance.username
class ContactForm(forms.Form):
"""Form for contacting site owners."""
subject = forms.CharField(
label=_('Subject'),
required=True,
max_length=100
)
name = forms.CharField(
label=_('Your name'),
required=True,
max_length=30
)
email = EmailField(
label=_('Your email'),
required=True,
)
message = forms.CharField(
label=_('Message'),
required=True,
help_text=_(
'Please contact us in English, otherwise we might '
'be unable to understand your request.'
),
max_length=2000,
widget=forms.Textarea
)
content = forms.CharField(required=False)
def clean_content(self):
"""Check if content is empty."""
if self.cleaned_data['content'] != '':
raise forms.ValidationError('Invalid value')
return ''
class EmailForm(forms.Form, UniqueEmailMixin):
"""Email change form."""
required_css_class = "required"
error_css_class = "error"
email = EmailField(
strip=False,
label=_("E-mail"),
help_text=_('Activation email will be sent here.'),
)
content = forms.CharField(required=False)
def clean_content(self):
"""Check if content is empty."""
if self.cleaned_data['content'] != '':
raise forms.ValidationError('Invalid value')
return ''
class RegistrationForm(EmailForm):
"""Registration form."""
required_css_class = "required"
error_css_class = "error"
username = UsernameField()
first_name = FullNameField()
content = forms.CharField(required=False)
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
super(RegistrationForm, self).__init__(*args, **kwargs)
def clean_content(self):
"""Check if content is empty."""
if self.cleaned_data['content'] != '':
raise forms.ValidationError('Invalid value')
return ''
def clean(self):
if not check_rate_limit(self.request):
raise forms.ValidationError(
_('Too many registration attempts!')
)
return self.cleaned_data
class SetPasswordForm(DjangoSetPasswordForm):
new_password1 = PasswordField(
label=_("New password"),
help_text=password_validation.password_validators_help_text_html(),
)
new_password2 = PasswordField(
label=_("New password confirmation"),
)
# pylint: disable=W0222
def save(self, request, delete_session=False):
notify_account_activity(
self.user,
request,
'password',
password=self.user.password
)
# Change the password
password = self.cleaned_data["new_password1"]
self.user.set_password(password)
self.user.save(update_fields=['password'])
if delete_session:
request.session.flush()
else:
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(request, self.user)
# Change key for current session
request.session.cycle_key()
messages.success(
request,
_('Your password has been changed.')
)
class CaptchaForm(forms.Form):
captcha = forms.IntegerField(required=True)
def __init__(self, request, form=None, data=None, *args, **kwargs):
super(CaptchaForm, self).__init__(data, *args, **kwargs)
self.fresh = False
self.request = request
self.form = form
if data is None or 'captcha' not in request.session:
self.generate_captcha()
self.fresh = True
else:
self.captcha = MathCaptcha.from_hash(
request.session.pop('captcha')
)
def generate_captcha(self):
self.captcha = MathCaptcha()
self.request.session['captcha'] = self.captcha.hashed
# Set correct label
self.fields['captcha'].label = pgettext(
'Question for a mathematics-based CAPTCHA, '
'the %s is an arithmetic problem',
'What is %s?'
) % self.captcha.display
def clean_captcha(self):
"""Validation for captcha."""
if (self.fresh or
not self.captcha.validate(self.cleaned_data['captcha'])):
self.generate_captcha()
rotate_token(self.request)
raise forms.ValidationError(
_('Please check your math and try again with new expression.')
)
if self.form.is_valid():
mail = self.form.cleaned_data['email']
else:
mail = 'NONE'
LOGGER.info(
'Passed captcha for %s (%s = %s)',
mail,
self.captcha.question,
self.cleaned_data['captcha']
)
class PasswordConfirmForm(forms.Form):
password = PasswordField(
label=_("Current password"),
help_text=_(
'Keep the field empty if you have not yet set your password.'
),
required=False,
)
def __init__(self, request, *args, **kwargs):
self.request = request
super(PasswordConfirmForm, self).__init__(*args, **kwargs)
def clean_password(self):
cur_password = self.cleaned_data['password']
if self.request.user.has_usable_password():
valid = self.request.user.check_password(cur_password)
else:
valid = (cur_password == '')
if not valid:
rotate_token(self.request)
raise forms.ValidationError(
_('You have entered an invalid password.')
)
class ResetForm(EmailForm):
def clean_email(self):
if self.cleaned_data['email'] == 'noreply@weblate.org':
raise forms.ValidationError(
'No password reset for deleted or anonymous user.'
)
return super(ResetForm, self).clean_email()
class LoginForm(forms.Form):
username = forms.CharField(
max_length=254,
label=_('Username or email')
)
password = PasswordField(
label=_("Password"),
)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super(LoginForm, self).__init__(*args, **kwargs)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
if not check_rate_limit(self.request):
| raise forms.ValidationError(
_('Too many authentication attempts!')
)
self.user_cache = authenticate(
username=username,
password=password
)
if self.user_cache is None:
try:
notify_account_activity(
try_get_user(username),
| self.request,
'failed-auth',
method='Password',
name=username,
)
except User.DoesNotExist:
pass
rotate_token(self.request)
raise forms.ValidationError(
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program i | s distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
####################################################### | #######################
from django import template
register = template.Library()
@register.filter
def score_display(value, decimal_option):
if value is None or str(value) == '-':
return ""
else:
try:
if decimal_option:
return "{0:.2f}".format(value)
else:
return "{0:.0f}".format(value)
except:
return value
|
MAXDB = "SAP MaxDB"
MSSQL = "Microsoft SQL Server"
MYSQL = "MySQL"
ORACLE = "Oracle"
PGSQL = "PostgreSQL"
SQLITE = "SQLite"
SYBASE = "Sybase"
HSQLDB = "HSQLDB"
INFORMIX = "Informix"
class DBMS_DIRECTORY_NAME:
ACCESS = "access"
DB2 = "db2"
FIREBIRD = "firebird"
MAXDB = "maxdb"
MSSQL = "mssqlserver"
MYSQL = "mysql"
ORACLE = "oracle"
PGSQL = "postgresql"
SQLITE = "sqlite"
SYBASE = "sybase"
HSQLDB = "hsqldb"
INFORMIX = "informix"
class CUSTOM_LOGGING:
PAYLOAD = 9
TRAFFIC_OUT = 8
TRAFFIC_IN = 7
class OS:
LINUX = "Linux"
WINDOWS = "Windows"
class PLACE:
GET = "GET"
POST = "POST"
URI = "URI"
COOKIE = "Cookie"
USER_AGENT = "User-Agent"
REFERER = "Referer"
HOST = "Host"
CUSTOM_POST = "(custom) POST"
CUSTOM_HEADER = "(custom) HEADER"
class POST_HINT:
SOAP = "SOAP"
JSON = "JSON"
JSON_LIKE = "JSON-like"
MULTIPART = "MULTIPART"
XML = "XML (generic)"
ARRAY_LIKE = "Array-like"
class HTTPMETHOD:
GET = "GET"
POST = "POST"
HEAD = "HEAD"
PUT = "PUT"
DELETE = "DELETE"
TRACE = "TRACE"
OPTIONS = "OPTIONS"
CONNECT = "CONNECT"
PATCH = "PATCH"
class NULLCONNECTION:
HEAD = "HEAD"
RANGE = "Range"
SKIP_READ = "skip-read"
class REFLECTIVE_COUNTER:
MISS = "MISS"
HIT = "HIT"
class CHARSET_TYPE:
BINARY = 1
DIGITS = 2
HEXADECIMAL = 3
ALPHA = 4
ALPHANUM = 5
class HEURISTIC_TEST:
CASTED = 1
NEGATIVE = 2
POSITIVE = 3
class HASH:
MYSQL = r'(?i)\A\*[0-9a-f]{40}\Z'
MYSQL_OLD = r'(?i)\A(?![0-9]+\Z)[0-9a-f]{16}\Z'
POSTGRES = r'(?i)\Amd5[0-9a-f]{32}\Z'
MSSQL = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{40}\Z'
MSSQL_OLD = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{80}\Z'
MSSQL_NEW = r'(?i)\A0x0200[0-9a-f]{8}[0-9a-f]{128}\Z'
ORACLE = r'(?i)\As:[0-9a-f]{60}\Z'
ORACLE_OLD = r'(?i)\A[01-9a-f]{16}\Z'
MD5_GENERIC = r'(?i)\A[0-9a-f]{32}\Z'
SHA1_GENERIC = r'(?i)\A[0-9a-f]{40}\Z'
SHA224_GENERIC = r'(?i)\A[0-9a-f]{28}\Z'
SHA384_GENERIC = r'(?i)\A[0-9a-f]{48}\Z'
SHA512_GENERIC = r'(?i)\A[0-9a-f]{64}\Z'
CRYPT_GENERIC = r'(?i)\A(?!\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z)(?![0-9]+\Z)[./0-9A-Za-z]{13}\Z'
WORDPRESS = r'(?i)\A\$P\$[./0-9A-Za-z]{31}\Z'
# Reference: http://www.zytrax.com/tech/web/mobile_ids.html
class MOBILES:
BLACKBERRY = ("BlackBerry 9900", "Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+")
GALAXY = ("Samsung Galaxy S", "Mozilla/5.0 (Linux; U; Android 2.2; en-US; SGH-T959D Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1")
HP = ("HP iPAQ 6365", "Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)")
HTC = ("HTC Sensation", "Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30")
IPHONE = ("Apple iPhone 4s", "Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3")
NEXUS = ("Google Nexus 7", "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19")
NOKIA = ("Nokia N97", "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344")
class PROXY_TYPE:
HTTP = "HTTP"
HTTPS = "HTTPS"
SOCKS4 = "SOCKS4"
SOCKS5 = "SOCKS5"
class REGISTRY_OPERATION:
READ = "read"
ADD = "add"
DELETE = "delete"
class DUMP_FORMAT:
CSV = "CSV"
HTML = "HTML"
SQLITE = "SQLITE"
class HTTP_HEADER:
ACCEPT = "Accept"
ACCEPT_CHARSET = "Accept-Charset"
ACCEPT_ENCODING = "Accept-Encoding"
ACCEPT_LANGUAGE = "Accept-Language"
AUTHORIZATION = "Authorization"
CACHE_CONTROL = "Cache-Control"
CONNECTION = "Connection"
CONTENT_ENCODING = "Content-Encoding"
CONTENT_LENGTH = "Content-Length"
CONTENT_RANGE = "Content-Range"
CONTENT_TYPE = "Content-Type"
COOKIE = "Cookie"
EXPIRES = "Expires"
HOST = "Host"
IF_MODIFIED_SINCE = "If-Modified-Since"
LAST_MODIFIED = "Last-Modified"
LOCATION = "Location"
PRAGMA = "Pragma"
PROXY_AUTHORIZATION = "Proxy-Authorization"
PROXY_CONNECTION = "Proxy-Connection"
RANGE = "Range"
REFERER = "Referer"
REFRESH = "Refresh" # Reference: http://stackoverflow.com/a/283794
SERVER = "Server"
SET_COOKIE = "Set-Cookie"
TRANSFER_ENCODING = "Transfer-Encoding"
URI = "URI"
USER_AGENT = "User-Agent"
VIA = "Via"
X_POWERED_BY = "X-Powered-By"
class EXPECTED:
BOOL = "bool"
INT = "int"
class OPTION_TYPE:
BOOLEAN = "boolean"
INTEGER = "integer"
FLOAT = "float"
STRING = "string"
class HASHDB_KEYS:
DBMS = "DBMS"
DBMS_FORK = "DBMS_FORK"
CHECK_WAF_RESULT = "CHECK_WAF_RESULT"
CONF_TMP_PATH = "CONF_TMP_PATH"
KB_ABS_FILE_PATHS = "KB_ABS_FILE_PATHS"
KB_BRUTE_COLUMNS = "KB_BRUTE_COLUMNS"
KB_BRUTE_TABLES = "KB_BRUT | E_TABLES"
KB_CHARS = "KB_CHARS"
KB_DYNAMIC_MARKINGS = "KB_DYNAMIC_MARKINGS"
KB_INJECTIONS = "KB_INJECTIONS"
KB_ERROR_CHUNK_LENG | TH = "KB_ERROR_CHUNK_LENGTH"
KB_XP_CMDSHELL_AVAILABLE = "KB_XP_CMDSHELL_AVAILABLE"
OS = "OS"
class REDIRECTION:
YES = "Y"
NO = "N"
class PAYLOAD:
SQLINJECTION = {
1: "boolean-based blind",
2: "error-based",
3: "inline query",
4: "stacked queries",
5: "AND/OR time-based blind",
6: "UNION query",
}
PARAMETER = {
1: "Unescaped numeric",
2: "Single quoted string",
3: "LIKE single quoted string",
4: "Double quoted string",
5: "LIKE double quoted string",
}
RISK = {
0: "No risk",
1: "Low risk",
2: "Medium risk",
3: "High risk",
}
CLAUSE = {
0: "Always",
1: "WHERE",
2: "GROUP BY",
3: "ORDER BY",
4: "LIMIT",
5: "OFFSET",
6: "TOP",
7: "Table name",
8: "Column name",
}
class METHOD:
COMPARISON = "comparison"
GREP = "grep"
TIME = "time"
UNION = "union"
class TECHNIQUE:
BOOLEAN = 1
ERROR = 2
QUERY = 3
STACKED = 4
TIME = 5
UNION = 6
class WHERE:
ORIGINAL = 1
NEGATIVE = 2
REPLACE = 3
class WIZARD:
BASIC = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba")
INTERMEDIATE = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getUsers", "getDbs", "getTables", "getSchema", "excludeSysDbs")
ALL = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getHostname", "getUsers", "getPasswordHashes", "getPrivileges", "getRoles", "dumpAll")
class ADJUST_TIME_DELAY:
DISABLE = -1
NO = 0
YES = 1
class WEB_API:
PHP = "php"
ASP = "asp"
ASPX = "aspx"
JSP = "jsp"
class CONTENT_TYPE:
TARGET = 0
TECHNIQUES = 1
DBMS_FINGERPRINT = 2
BANNER = 3
CURRENT_USER = 4
CURRENT_DB = 5
HOSTNAME = 6
IS_DBA = 7
USERS = 8
PASSWORDS = 9
PRIVILEGES = 10
ROLES = 11
DBS = 12
TABLES = 13
COLUMNS = 14
SCHEMA = 15
COUNT = 16
DUMP_TABLE = 17
SEARCH = 18
SQL_QUERY = 19
COMMON_TABLES = 20
COMMON_COLUMNS = 21
FILE_READ = 22
FILE_WRITE = 23
OS_CMD = 24
REG_READ = 25
PART_RUN_CONTENT_TYPES = {
"checkDbms": CONTENT_TYPE.TECHNIQUES,
"getFingerprint": CONTENT_TYPE.DBMS_FINGERPRINT,
"getBanner": CONTENT_TYPE.BANNER,
"getCurrentUser": CONTENT_TYPE.CURRENT_USER,
"getCurrentDb": CONT |
self._check_reimport(node)
self._check_import_as_rename(node)
self._check_toplevel(node)
names = [name for name, _ in node.names]
if len(names) >= 2:
self.add_message("multiple-imports", args=", ".join(names), node=node)
for name in names:
self.check_deprecated_module(node, name)
self._check_preferred_module(node, name)
imported_module = self._get_imported_module(node, name)
if isinstance(node.parent, astroid.Module):
# Allow imports nested
self._check_position(node)
if isinstance(node.scope(), astroid.Module):
self._record_import(node, imported_module)
if imported_module is None:
continue
self._add_imported_module(node, imported_module.name)
@check_messages(*MSGS)
def visit_importfrom(self, node):
"""triggered when a from statement is seen"""
basename = node.modname
imported_module = self._get_imported_module(node, basename)
self._check_import_as_rename(node)
self._check_misplaced_future(node)
self.check_deprecated_module(node, basename)
self._check_preferred_module(node, basename)
self._check_wildcard_imports(node, imported_module)
self._check_same_line_imports(node)
self._check_reimport(node, basename=basename, level=node.level)
self._check_toplevel(node)
if isinstance(node.parent, astroid.Module):
# Allow imports nested
self._check_position(node)
if isinstance(node.scope(), astroid.Module):
self._record_import(node, imported_module)
if imported_module is None:
return
for name, _ in node.names:
if name != "*":
self._add_imported_module(node, f"{imported_module.name}.{name}")
else:
self._add_imported_module(node, imported_module.name)
@check_messages(*MSGS)
def leave_module(self, node):
# Check imports are grouped by category (standard, 3rd party, local)
std_imports, ext_imports, loc_imports = self._check_imports_order(node)
# Check that imports are grouped by package within a given category
met_import = set() # set for 'import x' style
met_from = set() # set for 'from x import y' style
current_package = None
for import_node, import_name in std_imports + ext_imports + loc_imports:
if not self.linter.is_message_enabled(
"ungrouped-imports", import_node.fromlineno
):
continue
if isinstance(import_node, astroid.node_classes.ImportFrom):
met = met_from
else:
met = met_import
package, _, _ = import_name.partition(".")
if current_package and current_package != package and package in met:
self.add_message("ungrouped-imports", node=import_node, args=package)
current_package = package
met.add(package)
self._imports_stack = []
self._first_non_import_node = None
def compute_first_non_import_node(self, node):
if not self.linter.is_message_enabled("wrong-import-position", node.fromlineno):
return
# if the node does not contain an import instruction, and if it is the
# first node of the module, keep a track of it (all the import positions
# of the module will be compared to the position of this first
# instruction)
if self._first_non_import_node:
return
if not isinstance(node.parent, astroid.Module):
return
nested_allowed = [astroid.TryExcept, astroid.TryFinally]
is_nested_allowed = [
allowed for allowed in nested_allowed if isinstance(node, allowed)
| ]
if is_nested_allowed and any(
node.nodes_of_class((astroid.Import, astroid.ImportFrom))
):
return
if isinstance(node, astroid.Assign):
# Add compatibility for module | level dunder names
# https://www.python.org/dev/peps/pep-0008/#module-level-dunder-names
valid_targets = [
isinstance(target, astroid.AssignName)
and target.name.startswith("__")
and target.name.endswith("__")
for target in node.targets
]
if all(valid_targets):
return
self._first_non_import_node = node
visit_tryfinally = (
visit_tryexcept
) = (
visit_assignattr
) = (
visit_assign
) = (
visit_ifexp
) = visit_comprehension = visit_expr = visit_if = compute_first_non_import_node
def visit_functiondef(self, node):
if not self.linter.is_message_enabled("wrong-import-position", node.fromlineno):
return
# If it is the first non import instruction of the module, record it.
if self._first_non_import_node:
return
# Check if the node belongs to an `If` or a `Try` block. If they
# contain imports, skip recording this node.
if not isinstance(node.parent.scope(), astroid.Module):
return
root = node
while not isinstance(root.parent, astroid.Module):
root = root.parent
if isinstance(root, (astroid.If, astroid.TryFinally, astroid.TryExcept)):
if any(root.nodes_of_class((astroid.Import, astroid.ImportFrom))):
return
self._first_non_import_node = node
visit_classdef = visit_for = visit_while = visit_functiondef
def _check_misplaced_future(self, node):
basename = node.modname
if basename == "__future__":
# check if this is the first non-docstring statement in the module
prev = node.previous_sibling()
if prev:
# consecutive future statements are possible
if not (
isinstance(prev, astroid.ImportFrom)
and prev.modname == "__future__"
):
self.add_message("misplaced-future", node=node)
return
def _check_same_line_imports(self, node):
# Detect duplicate imports on the same line.
names = (name for name, _ in node.names)
counter = collections.Counter(names)
for name, count in counter.items():
if count > 1:
self.add_message("reimported", node=node, args=(name, node.fromlineno))
def _check_position(self, node):
"""Check `node` import or importfrom node position is correct
Send a message if `node` comes before another instruction
"""
# if a first non-import instruction has already been encountered,
# it means the import comes after it and therefore is not well placed
if self._first_non_import_node:
self.add_message("wrong-import-position", node=node, args=node.as_string())
def _record_import(self, node, importedmodnode):
"""Record the package `node` imports from"""
if isinstance(node, astroid.ImportFrom):
importedname = node.modname
else:
importedname = importedmodnode.name if importedmodnode else None
if not importedname:
importedname = node.names[0][0].split(".")[0]
if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:
# We need the importedname with first point to detect local package
# Example of node:
# 'from .my_package1 import MyClass1'
# the output should be '.my_package1' instead of 'my_package1'
# Example of node:
# 'from . import my_package2'
# the output should be '.my_package2' instead of '{pyfile}'
importedname = "." + importedname
self._imports_stack.append((node, importedname))
@staticmethod
def _is_fallback_import(node, imports):
imports = [import_node for (import_node, _) in imports]
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserRole.order'
db.add_column('core_userrole', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'UserProfile.order'
db.add_column('core_userprofile', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserRole.order'
db.delete_column('core_userrole', 'order')
# Deleting field 'UserProfile.order'
db.delete_column('core_userprofile', 'order')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields | .AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True' | , 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'games': ('django.db.models.fields.TextField', [], {}),
'genre': ('django.db.models.fields.TextField', [], {}),
'hobbies': ('django.db.models.fields.TextField', [], {}),
'howfound': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'picurl': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UserRole']"}),
'scene': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'core.userrole': {
'Meta': {'object_name': 'UserRole'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plural': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['core']
|
_MSR_SINT6',
0x40000097: 'HV_X64_MSR_SINT7',
0x40000098: 'HV_X64_MSR_SINT8',
0x40000099: 'HV_X64_MSR_SINT9',
0x4000009A: 'HV_X64_MSR_SINT10',
0x4000009B: 'HV_X64_MSR_SINT11',
0x4000009C: 'HV_X64_MSR_SINT12',
0x4000009D: 'HV_X64_MSR_SINT13',
0x4000009E: 'HV_X64_MSR_SINT14',
0x4000009F: 'HV_X64_MSR_SINT15',
0x400000B0: 'HV_X64_MSR_STIMER0_CONFIG',
0x400000B1: 'HV_X64_MSR_STIMER0_COUNT',
0x400000B2: 'HV_X64_MSR_STIMER1_CONFIG',
0x400000B3: 'HV_X64_MSR_STIMER1_COUNT',
0x400000B4: 'HV_X64_MSR_STIMER2_CONFIG',
0x400000B5: 'HV_X64_MSR_STIMER2_COUNT',
0x400000B6: 'HV_X64_MSR_STIMER3_CONFIG',
0x400000B7: 'HV_X64_MSR_STIMER3_COUNT',
0x400000C1: 'HV_X64_MSR_POWER_STATE_TRIGGER_C1',
0x400000C2: 'HV_X64_MSR_POWER_STATE_TRIGGER_C2',
0x400000C3: 'HV_X64_MSR_POWER_STATE_TRIGGER_C3',
0x400000D1: 'HV_X64_MSR_POWER_STATE_CONFIG_C1',
0x400000D2: 'HV_X64_MSR_POWER_STATE_CONFIG_C2',
0x400000D3: 'HV_X64_MSR_POWER_STATE_CONFIG_C3',
0x400000E0: 'HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE',
0x400000E1: 'HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE',
0x400000E2: 'HV_X64_MSR_STATS_VP_RETAIL_PAGE',
0x400000E3: 'HV_X64_MSR_STATS_VP_INTERNAL_PAGE',
0x400000F0: 'HV_X64_MSR_GUEST_IDLE',
0x400000F1: 'HV_X64_MSR_SYNTH_DEBUG_CONTROL',
0x400000F2: 'HV_X64_MSR_SYNTH_DEBUG_STATUS',
0x400000F3: 'HV_X64_MSR_SYNTH_DEBUG_SEND_BUFFER',
0x400000F4: 'HV_X64_MSR_SYNTH_DEBUG_RECEIVE_BUFFER',
0x400000F5: 'HV_X64_MSR_SYNTH_DEBUG_PENDING_BUFFER',
0x40000100: 'HV_X64_MSR_CRASH_P0',
0x40000101: 'HV_X64_MSR_CRASH_P1',
0x40000102: 'HV_X64_MSR_CRASH_P2',
0x40000103: 'HV_X64_MSR_CRASH_P3',
0x40000104: 'HV_X64_MSR_CRASH_P4',
0x40000105: 'HV_X64_MSR_CRASH_CTL'
}
def get_msr_name(code, defvalue = ''):
return msrs[code] if code in msrs else defvalue
hypercall_status_codes = {
0x0000: 'HV_STATUS_SUCCESS',
0x0001: 'HV_RESERVED_01H',
0x0002: 'HV_STATUS_INVALID_HYPERCALL_CODE',
0x0003: 'HV_STATUS_INVALID_HYPERCALL_INPUT',
0x0004: 'HV_STATUS_INVALID_ALIGNMENT',
0x0005: 'HV_STATUS_INVALID_PARAMETER',
0x0006: 'HV_STATUS_ACCESS_DENIED',
0x0007: 'HV_STATUS_INVALID_PARTITION_STATE',
0x0008: 'HV_STATUS_OPERATION_DENIED',
0x0009: 'HV_STATUS_UNKNOWN_PROPERTY',
0x000A: 'HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE',
0x000B: 'HV_STATUS_INSUFFICIENT_MEMORY',
0x000C: 'HV_STATUS_PARTITION_TOO_DEEP',
0x000D: 'HV_STATUS_INVALID_PARTITION_ID',
0x000E: 'HV_STATUS_INVALID_VP_INDEX',
0x000F: 'HV_RESERVED_0FH',
0x0010: 'HV_RESERVED_10H', |
0x0011: 'HV_STATUS_INVALID_PORT_ID',
0x0012: 'HV_STATUS_INVALID_CONNECTION_ID',
0x0013: 'HV_STATUS_INSUFFICIENT_BUFFERS',
0x0014: 'HV_STATUS_NOT_ACKNOWLEDGED',
0x0015: 'HV_RESERVED_15H',
0x0016: 'HV_STATUS_ACKNOWLEDGED',
0x0017: 'HV_STATUS_INVALID_SAVE_RESTORE_STATE',
0x0018: 'HV_STATUS_INVALID_SYNIC_STATE',
0x0019: 'HV_STATUS_OBJECT_IN_USE',
0x001A: 'HV_ | STATUS_INVALID_PROXIMITY_DOMAIN_INFO',
0x001B: 'HV_STATUS_NO_DATA',
0x001C: 'HV_STATUS_INACTIVE',
0x001D: 'HV_STATUS_NO_RESOURCES',
0x001E: 'HV_STATUS_FEATURE_UNAVAILABLE',
0x001F: 'HV_STATUS_PARTIAL_PACKET',
0x0020: 'HV_STATUS_PROCESSOR_FEATURE_SSE3_NOT_SUPPORTED',
0x0021: 'HV_STATUS_PROCESSOR_FEATURE_LAHFSAHF_NOT_SUPPORTED',
0x0022: 'HV_STATUS_PROCESSOR_FEATURE_SSSE3_NOT_SUPPORTED',
0x0023: 'HV_STATUS_PROCESSOR_FEATURE_SSE4_1_NOT_SUPPORTED',
0x0024: 'HV_STATUS_PROCESSOR_FEATURE_SSE4_2_NOT_SUPPORTED',
0x0025: 'HV_STATUS_PROCESSOR_FEATURE_SSE4A_NOT_SUPPORTED',
0x0026: 'HV_STATUS_PROCESSOR_FEATURE_XOP_NOT_SUPPORTED',
0x0027: 'HV_STATUS_PROCESSOR_FEATURE_POPCNT_NOT_SUPPORTED',
0x0028: 'HV_STATUS_PROCESSOR_FEATURE_CMPXCHG16B_NOT_SUPPORTED',
0x0029: 'HV_STATUS_PROCESSOR_FEATURE_ALTMOVCR8_NOT_SUPPORTED',
0x002A: 'HV_STATUS_PROCESSOR_FEATURE_LZCNT_NOT_SUPPORTED',
0x002B: 'HV_STATUS_PROCESSOR_FEATURE_MISALIGNED_SSE_NOT_SUPPORTED',
0x002C: 'HV_STATUS_PROCESSOR_FEATURE_MMX_EXT_NOT_SUPPORTED',
0x002D: 'HV_STATUS_PROCESSOR_FEATURE_3DNOW_NOT_SUPPORTED',
0x002E: 'HV_STATUS_PROCESSOR_FEATURE_EXTENDED_3DNOW_NOT_SUPPORTED',
0x002F: 'HV_STATUS_PROCESSOR_FEATURE_PAGE_1GB_NOT_SUPPORTED',
0x0030: 'HV_STATUS_PROCESSOR_CACHE_LINE_FLUSH_SIZE_INCOMPATIBLE',
0x0031: 'HV_STATUS_PROCESSOR_FEATURE_XSAVE_NOT_SUPPORTED',
0x0032: 'HV_STATUS_PROCESSOR_FEATURE_XSAVEOPT_NOT_SUPPORTED',
0x0033: 'HV_STATUS_INSUFFICIENT_BUFFER',
0x0034: 'HV_STATUS_PROCESSOR_FEATURE_XSAVE_AVX_NOT_SUPPORTED',
0x0035: 'HV_STATUS_PROCESSOR_FEATURE_XSAVE_FEATURE_NOT_SUPPORTED',
0x0036: 'HV_STATUS_PROCESSOR_XSAVE_SAVE_AREA_INCOMPATIBLE',
0x0037: 'HV_STATUS_INCOMPATIBLE_PROCESSOR',
0x0038: 'HV_STATUS_INSUFFICIENT_DEVICE_DOMAINS',
0x0039: 'HV_STATUS_PROCESSOR_FEATURE_AES_NOT_SUPPORTED',
0x003A: 'HV_STATUS_PROCESSOR_FEATURE_PCLMULQDQ_NOT_SUPPORTED',
0x003B: 'HV_STATUS_PROCESSOR_FEATURE_INCOMPATIBLE_XSAVE_FEATURES',
0x003C: 'HV_STATUS_CPUID_FEATURE_VALIDATION_ERROR',
0x003D: 'HV_STATUS_CPUID_XSAVE_FEATURE_VALIDATION_ERROR',
0x003E: 'HV_STATUS_PROCESSOR_STARTUP_TIMEOUT',
0x003F: 'HV_STATUS_SMX_ENABLED',
0x0040: 'HV_STATUS_PROCESSOR_FEATURE_PCID_NOT_SUPPORTED',
0x0041: 'HV_STATUS_INVALID_LP_INDEX',
0x0042: 'HV_STATUS_FEATURE_FMA4_NOT_SUPPORTED',
0x0043: 'HV_STATUS_FEATURE_F16C_NOT_SUPPORTED',
0x0044: 'HV_STATUS_PROCESSOR_FEATURE_RDRAND_NOT_SUPPORTED',
0x0045: 'HV_STATUS_PROCESSOR_FEATURE_RDWRFSGS_NOT_SUPPORTED',
0x0046: 'HV_STATUS_PROCESSOR_FEATURE_SMEP_NOT_SUPPORTED',
0x0047: 'HV_STATUS_PROCESSOR_FEATURE_ENHANCED_FAST_STRING_NOT_SUPPORTED',
0x0048: 'HV_STATUS_PROCESSOR_FEATURE_MOVBE_NOT_SUPPORTED',
0x0049: 'HV_STATUS_PROCESSOR_FEATURE_BMI1_NOT_SUPPORTED',
0x004A: 'HV_STATUS_PROCESSOR_FEATURE_BMI2_NOT_SUPPORTED',
0x004B: 'HV_STATUS_PROCESSOR_FEATURE_HLE_NOT_SUPPORTED',
0x004C: 'HV_STATUS_PROCESSOR_FEATURE_RTM_NOT_SUPPORTED',
0x004D: 'HV_STATUS_PROCESSOR_FEATURE_XSAVE_FMA_NOT_SUPPORTED',
0x004E: 'HV_STATUS_PROCESSOR_FEATURE_XSAVE_AVX2_NOT_SUPPORTED',
0x004F: 'HV_STATUS_PROCESSOR_FEATURE_NPIEP1_NOT_SUPPORTED'
}
def get_hypercall_status(code, defvalue = ''):
return hypercall_status_codes[code] if code in hypercall_status_codes else defvalue
hypercall_names = {
0x0001: 'HvSwitchVirtualAddressSpace',
0x0002: 'HvFlushVirtualAddressSpace',
0x0003: 'HvFlushVirtualAddressList',
0x0004: 'HvGetLogicalProcessorRunTime',
0x0008: 'HvNotifyLongSpinWait',
0x0009: 'HvParkedVirtualProcessors',
0x0040: 'HvCreatePartition',
0x0041: 'HvInitializePartition',
0x0042: 'HvFinalizePartition',
0x0043: 'HvDeletePartition',
0x0044: 'HvGetPartitionProperty',
0x0045: 'HvSetPartitionProperty',
0x0046: 'HvGetPartitionId',
0x0047: 'HvGetNextChildPartition',
0x0048: 'HvDepositMemory',
0x0049: 'HvWithdrawMemory',
0x004A: 'HvGetMemoryBalance',
0x004B: 'HvMapGpaPages',
0x004C: 'HvUnmapGpaPages',
0x004D: 'HvInstallIntercept',
0x004E: 'HvCreateVp',
0x004F: 'HvDeleteVp',
0x0050: 'HvGetVpRegisters',
0x00 |
from clawpack.petclaw.solution import Solution
#from petclaw.io.petsc import read_petsc
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as pl
from matplotlib import rc
#rc('text', usetex=True)
import numpy as np
import os
def write_slices(frame,file_prefix,path,name):
sol=Solution(frame,file_format='petsc',path=path,read_aux=False,file_prefix=file_prefix)
x=sol.state.grid.x.centers; y=sol.state.grid.y.centers; my=len(y)
q=sol.state.q
f=open(name+'.txt','w')
#f.wri | telines(str(xc)+" "+str(q[0,i,my/4])+" "+str(q[0,i,3*my/4])+"\n" for i,xc in enumerate(x))
f.write | lines(str(xc)+" "+str(sum(q[0,i,:])/my)+"\n" for i,xc in enumerate(x))
f.close()
if __name__== "__main__":
write_slices(970,'claw_p','./_output/_p/','stress_normal')
|
#!/usr/bin/python2
'''
Implemting grep in python in less than 10 lines of code...
'' | '
import re # for compile, finditer
import sys # for argv
# command line usage...
if len(sys.argv) < 3:
print('usage: grep.py [expr] [files...]')
sys.exit(1)
# first compile the regular expression...
c = re.compile(sys.argv[1])
for filename in sys.argv[2:]:
for num, l in enumerate(open(filename)):
if c.match(l):
print(fi | lename, num, l)
|
>= 1000 and user not in ['nobody', 'project']
# GROUPS
def is_group(name):
try:
grp.getgrnam(name)
return True
except KeyError:
return False
def list_project_groups():
return sorted(g.gr_name for g in grp.getgrall()
if exists(PROJECT, g.gr_name))
def list_user_groups(user):
try:
pw_gid = pwd.getpwnam(user).pw_gid
except KeyError: # user doesn't exist
return []
else:
return [g.gr_name for g in grp.getgrall()
if (g.gr_gid == pw_gid or user in g.gr_mem)
and exists(PROJECT, g.gr_name)
and g.gr_name != 'common']
def reset_passwords(reset):
ps = []
for u in reset:
p = random.randint(1*(10**PWDLENG), 1*(10**(PWDLENG+1))-1)
ps.append((u, p))
ls = '\n'.join('%s:%s' % (u, p) for u, p in ps)
subprocess.run(['/sbin/chpasswd'], input=ls, check=1, text=1)
print("Reset passwords:\n%s" % '\n'.join(' %-15s%s' % (u, p) for u, p in ps))
# GENERATE COMMANDS
def add_group(group):
if is_group(group):
yield "echo %s already exists" % group
return
if is_normal_user(group):
yield 'abort %s is the name of a regular user' % group
return
r = join(PROJECT, group)
if exists(r):
yield "abort %s exists although the group does not" % group
return
yield 'addgroup %s' % group
yield 'mkdir -p %s/store' % r
yield 'chmod 2770 %s' % r
yield 'chown project:%s %s' % (group, r)
yield 'echo added group %s' % group
def maybe_kick_all_users():
# First list all jupyter containers that are user notebook servers and
# build a dict that maps username to the output of "id user" on the host.
users = {}
s = subprocess.check_output(['docker', 'ps'], text=1)
for line in s.splitlines():
words = line.split()
if words[-1].startswith('jupyter-'):
_, user = words[-1].split('-', 1)
hid = subprocess.check_output(['id', user], text=1).strip()
users[user] = hid
# Next go through the usernames and spawn a background process that runs
# 'docker id jupyter-user id user'. This means that on a busy server we
# might start 50-100 'docker exec' processes.
# We then compare the output of 'id user' on the host vs in docker.
kick = []
with ProcessPoolExecutor() as exe:
jobs = [(user, exe.submit(docker_id, user)) for user in users]
for user, job in jobs:
try:
dock = job.result()
host = users[user]
if dock != host :
kick.append(('yesno "%s\'s user credentials have changed, '
'shut down their server? " && '
'docker rm -f jupyter-%s || :' % (user, user)))
except Exception as e:
print(user, "failed with", e)
return kick
def sync_new_groups(yaml):
e_groups = list_project_groups()
y_groups = yaml['groups']
for ng in set(y_groups) - set(e_groups):
yield from add_group(ng)
def sync_new_users(yaml):
y_users = yaml['users']
e_users = list_users()
for u in y_users:
l = u['login']
if l in e_users:
continue
yield 'adduser --disabled-password %s --shell /usr/sbin/nologin --gecos ""' % l
def sync_user_groups(yaml):
for u in yaml['users']:
l = u['login']
for g in set(u.get('groups', [])) - set(list_user_groups(l)):
yield 'adduser %s %s' % (l, g)
def sync_homedirs(yaml):
for u in yaml['users']:
login = u['login']
if not is_normal_user(login):
continue
home = pwd.getpwnam(login).pw_dir
pcom = join(PROJECT, 'common')
if readlink(home, 'common') != pcom:
yield 'ln -sf %s %s' % (pcom, home)
project = join(home, 'project')
if not exists(project):
yield 'mkdir -pm 0755 %s' % project
for g in list_user_groups(login):
p = join(PROJECT, g)
if readlink(project, g) != p:
yield 'ln -sf %s %s' % (p, project)
# RUN
def run(*gens):
flags = "set -eu"
if VERBOSE:
flags += "x"
init = ["#!/bin/bash", flags,
'error () { echo "$@" > /dev/stderr; }',
'abort () { echo "$@" > /dev/stderr; exit 1; }',
'yesno () { read -p "$* [yn]: " a; [[ $a == y ]]; }']
if DRY_RUN:
cmds = [l for g in gens for l in g]
if len(cmds) == 0:
return print("Everything is synced.")
else:
return print('\n'.join(cmds))
ncs = 0
for g in gens:
cmds = init[:]
for line in g:
cmds.append(line)
ncs += 1
prog = '\n'.join(cmds)
r = subprocess.run(['/bin/bash', '-c', prog])
if r.returncode != 0:
print("abort.")
sys.exit(1)
if ncs == 0:
print("Everything is synced.")
# CLI
@click.group()
def cli():
pass
@cli.command()
@click.option('--force', '-f', is_flag=True, help="force restart")
def restart_hub(force):
"""Restart the hub.
All users are managed on the host system. The jupyter hub then keeps a copy
of the user database. This means that if the user database is updated,
then the hub needs to be restarted for those changes to take effect.
Running this command will restart the hub, but only if the hub doesn't have
the latest user database.
\b
$ jusers restart-hub
Everything is synced.
\b
$ jusers restart-hub --force
Restarting hub ...
"""
run(maybe_restart_hub(force))
@cli.command()
@click.option('--may-the-force-be-with-me', is_flag=True, help="Execute commands.")
def sync(may_the_force_be_with_me):
"""Read jusers.yml and output commands needed to sync it.
In the simplest case, everything is already up-to-date:
\b
$ jusers sync
Everything is synced.
Let's say we add a new user to jusers.yml:
\b
- login: beeblebrox
groups:
- cake
- sausage
Re-running the sync command will now output the commands needed to move the
host closer to what is described by users.yml:
\b
$ jusers sync
useradd --create-home --user-group beeblebrox
usermod -G cake -a beeblebrox
usermod -G sausage -a beeblebrox
These commands needs to be run manually - either directly or | by piping them
into bash - until sync reports that 'Everything is synced.'
The sync command also has the capability to run these commands on its own,
in which case it only needs to be run once. This is accomplished by passing
the correct option to sync.
\b
$ jusers sync --may-the-force-be- | with-me
+ useradd --create-home --user-group beeblebrox
+ usermod -G cake -a beeblebrox
+ usermod -G sausage -a beeblebrox
+ ln -sf /project/common /home/beeblebrox
+ mkdir -pm 0755 /home/beeblebrox/project
+ ln -sf /project/cake /home/beeblebrox/project
+ ln -sf /project/sausage /home/beeblebrox/project
+ cd /docker/jupyter
+ docker-compose restart hub
Restarting hub ...
"""
global DRY_RUN, VERBOSE
if not may_the_force_be_with_me:
DRY_RUN = True
else:
VERBOSE = True
yaml = YAML(typ='safe').load(open(USERSDB, 'rb'))
run(sync_new_groups(yaml),
sync_new_users(yaml),
sync_user_groups(yaml),
sync_homedirs(yaml),
maybe_restart_hub(),
maybe_kick_all_users())
@cli.command()
@click.argument('users', nargs=-1, required=False)
def set_passwords(users):
"""Manage user passwords.
When a user is created, its password is unset. To be able to login, their
password needs to be set.
Running this command without an argument will list all users without
passwords. The command can then be rerun with a list of users whose
password is to be reset. The passwords are reset to randomly generated
passwords.
If any passwords were reset, the jupyter hub will be restarted in ord |
vfp2py import vfpfunc
from vfp2py.vfpfunc import DB, Array, C, F, M, S, lparameters, parameters, vfpclass
@lparameters()
def MAIN():
pass
@lparameters()
def select_tests():
assert DB.select_function(0 if vfpfunc.set('compatible') == 'OFF' else None) == 1
assert DB.select_function(0) == 1
assert DB.select_function(1) == 32767
assert DB.select_function(2) == 0
assert DB.select_function('test') == 0
@lparameters()
def chr_tests():
assert ord('\x00'[0]) == 0
@lparameters()
def set_tests():
assert vfpfunc.set('compatible') == 'OFF'
assert vfpfunc.set('compatible', 1) == 'PROMPT'
@lparameters()
def used_tests():
assert DB.used('test') == False
@lparameters()
def date_tests():
M.add_local('somedate')
S.somedate = dt.date(2017, 6, 30)
assert S.somedate == dt.date(2017, 6, 30)
assert vfpfunc.dow_fix(S.somedate.weekday()) == 6
assert S.somedate.strftime('%A') == 'Friday'
assert S.somedate.month == 6
assert S.somedate.strftime('%B') == 'June'
assert S.somedate.strftime('%d %B %Y') == '30 June 2017'
assert vfpfunc.dtos(S.somedate) == '20170630'
assert vfpfunc.dtoc(S.somedate) == '06/30/2017'
assert len(dt.datetime.now().time().strftime('%H:%M:%S')) == 8
assert len(dt.datetime.now().time().strftime('%H:%M:%S.%f')[:11]) == 11
assert dt.datetime.combine(S.somedate, dt.datetime.min.time()) == dt.datetime(2017, 6, 30, 0)
assert vfpfunc.gomonth(S.somedate, -4) == dt.date(2017, 2, 28)
assert vfpfunc.vartype(S.somedate) == 'D'
assert vfpfunc.vartype(dt.datetime.combine(S.somedate, dt.datetime.min.time())) == 'T'
@lparameters()
def math_tests():
M.add_local('num_value')
S.num_value = math.pi
assert round(math.pi, 2) == 3.14
assert abs(math.tan(math.radians(45)) - 1) < 0.001
assert abs(math.sin(math.radians(90)) - 1) < 0.001
assert abs(math.cos(math.radians(90)) - 0) < 0.001
assert abs(math.cos(math.radians(45)) - math.sqrt(2) / 2) < 0.001
assert 0 < random.random() and random.random() < 1
assert (5 % 2) == 1
M.add_local('stringval')
S.stringval = '1e5'
assert float(S.stringval) == 100000
assert vfpfunc.vartype(S.num_value) == 'N'
assert not ((True or True) and False)
assert True or False and True
@lparameters()
def string_tests():
S.cstring = 'AAA aaa, BBB bbb, CCC ccc.'
assert vfpfunc.vartype(S.cstring) == 'C'
assert len([w for w in S.cstring.split() if w]) == 6
assert len([w for w in S.cstring.split(',') if w]) == 3
assert len([w for w in S.cstring.split('.') if w]) == 1
assert vfpfunc.getwordnum(S.cstring, 2) == 'aaa,'
assert vfpfunc.getwordnum(S.cstring, 2, ',') == ' BBB bbb'
assert vfpfunc.getwordnum(S.cstring, 2, '.') == ''
assert vfpfunc.like('Ab*t.???', 'About.txt')
assert not vfpfunc.like('Ab*t.???', 'about.txt')
assert not ''[:1].isalpha()
assert 'a123'[:1].isalpha()
assert not '1abc'[:1].isalpha()
assert not ''[:1].islower()
assert 'test'[:1].islower()
assert not 'Test'[:1].islower()
assert not ''[:1].isdigit()
assert '1abc'[:1].isdigit()
assert not 'a123'[:1].isdigit()
assert not ''[:1].isupper()
assert 'Test'[:1].isupper()
assert not 'test'[:1].isupper()
assert vfpfunc.isblank('')
assert not vfpfunc.isblank('test')
assert vfpfunc.isblank(None)
S.cstring = ' AAA '
assert S.cstring.strip() == 'AAA'
assert S.cstring.rstrip() == ' AAA'
assert S.cstring.lstrip() == 'AAA '
assert S.cstring.rstrip() == S.cstring.rstrip()
assert vfpfunc.strextract('This {{is}} a {{template}}', '{{', '}}') == 'is'
assert vfpfunc.strextract('This {{is}} a {{template}}', '{{', '}}', 2) == 'template'
assert vfpfunc.strextract('This {{is}} a {{template}}', '{{is}}') == ' a {{template}}'
assert vfpfunc.strex | tract('This {{is}} a {{template}}', '{{IS}}', '', 1, 1) == ' a {{template}}'
assert '123AAbbB'.lower().find('aab'.lower()) + 1 == 4
S.cstring = vfpfunc.text([' 123AAbbbB',
' TESTTEST',
' TEXTLINES',
' '], show=False)
assert S.cstring == '123AAbbbBTESTTESTTEXTLINES'
S.cstring = '123AAbb | bB\r\nTESTTEST\r\nTEXTLINES'
assert vfpfunc.atline('T', S.cstring) == 2
assert vfpfunc.ratline('T', S.cstring) == 3
@lparameters()
def path_tests():
assert vfpfunc.home() == os.getcwd()
S.handle = open('test_lib_file', 'w')
S.handle.close()
assert not vfpfunc.isblank(vfpfunc.locfile('test_lib_file'))
os.chdir('..')
assert vfpfunc.home() != os.getcwd()
assert not vfpfunc.isblank(vfpfunc.locfile('test_lib_file'))
os.remove(os.path.join(vfpfunc.home(), 'test_lib_file'))
@lparameters()
def misc_tests():
assert vfpfunc.version() == 'Not FoxPro 9'
assert vfpfunc.version(4) == vfpfunc.version()
assert vfpfunc.version(5) == 900
@lparameters('seed')
def _add_db_record():
M.add_local('fake', 'fake_name', 'fake_st', 'fake_quantity', 'fake_received')
S.fake = faker.Faker()
S.fake.seed(S.seed)
S.fake_name = S.fake.name()
S.fake_st = S.fake.state_abbr()
S.fake_quantity = S.fake.random_int(0, 100)
S.fake_received = S.fake.boolean()
DB.insert('report', (S.fake_name, S.fake_st, S.fake_quantity, S.fake_received))
@lparameters('sqlconn', 'seed')
def _sqlexec_add_record():
M.add_local('fake', 'fake_name', 'fake_st', 'fake_quantity', 'fake_received')
S.fake = faker.Faker()
S.fake.seed(S.seed)
S.fake_name = S.fake.name()
S.fake_st = S.fake.state_abbr()
S.fake_quantity = S.fake.random_int(0, 100)
S.fake_received = S.fake.boolean()
S.sqlcmd = "insert into REPORT values ('" + S.fake_name + "','" + S.fake_st + "'," + vfpfunc.str(S.fake_quantity).strip() + ',' + vfpfunc.str(int(S.fake_received)).strip() + ')'
print(S.sqlcmd)
return vfpfunc.sqlexec(S.sqlconn, S.sqlcmd)
@lparameters()
def database_tests():
# FIX ME: SET SAFETY OFF
# FIX ME: SET ASSERTS ON
try:
DB.create_table('report', 'name c(50); st c(2); quantity n(5, 0); received l', 'free')
assert os.path.isfile('report.dbf')
assert DB.used('report')
try:
DB.use('report', 0, 'shared')
assert False
except Exception as err:
S.oerr = vfpfunc.Exception.from_pyexception(err)
print(S.oerr.message)
assert S.oerr.message == 'File is in use.'
_add_db_record(0)
_add_db_record(1)
_add_db_record(2)
_add_db_record(3)
assert DB.cpdbf() == 0
assert DB.fcount() == 4
DB.alter_table('report', 'add', 'age n(3, 0)')
assert DB.fcount() == 5
assert DB.field(2) == 'st'
assert not DB.found()
DB.goto(None, 0)
M.add_local('loopcount')
S.loopcount = 0
for _ in DB.scanner(scope=('rest',)):
assert len(S.name.strip()) > 0
S.loopcount += 1
assert S.loopcount == 4
DB.goto(None, 3)
S.loopcount = 0
for _ in DB.scanner(scope=('all',), condition=lambda: S.st.strip() == 'ID'):
assert len(S.name.strip()) > 0
S.loopcount += 1
assert S.loopcount == 2
S.loopcount = 0
for _ in DB.scanner(scope=('rest',), condition=lambda: S.st.strip() == 'ID'):
assert len(S.name.strip()) > 0
S.loopcount += 1
assert S.loopcount == 0
DB.goto(None, 0)
S.loopcount = 0
for _ in DB.scanner(scope=('rest',), condition=lambda: S.st.strip() == 'ID'):
assert len(S.name.strip()) > 0
S.loopcount += 1
assert S.loopcount == 2
del M.loopcount
assert S.name.strip() == 'Norma Fisher', S.name.strip() + ' should be Norma Fisher'
assert DB.recno() == 1
S.report_record = vfpfunc.scatter(totype='name')
assert S.report_record.name.strip() == 'Norma Fisher', S.report_record.name.strip() + ' should be Norma Fisher'
DB.goto(None, -1)
assert S.name.strip() == 'Joshua Wood', S.name.strip() + ' should be Josh |
from collections import OrderedDict
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
from Orange.data import Table
from Orange.classification.svm import SVMLearner, NuSVMLearner
from Orange.widgets import settings, gui
from Orange.widgets.utils.owlearnerwidget import OWBaseLearner
class OWBaseSVM(OWBaseLearner):
#: Kernel types
Linear, Poly, RBF, Sigmoid = 0, 1, 2, 3
#: Selected kernel type
kernel_type = settings.Setting(RBF)
#: kernel degree
degree = settings.Setting(3)
#: gamma
gamma = settings.Setting(1.0)
#: coef0 (adative constant)
coef0 = settings.Setting(0.0)
#: numerical tolerance
tol = settings.Setting(0.001)
kernels = (("Linear", "x⋅y"),
("Polynomial", "(g x⋅y + c)<sup>d</sup>"),
("RBF", "exp(-g|x-y|²)"),
("Sigmoid", "tanh(g x⋅y + c)"))
def _add_kernel_box(self):
# Initialize with the widest label to measure max width
self.kernel_eq = self.kernels[-1][1]
self.kernel_box = box = gui.hBox(self.controlArea, "Kernel")
buttonbox = gui.radioButtonsInBox(
box, self, "kernel_type", btnLabels=[k[0] for k in self.kernels],
callback=self._on_kernel_changed, addSpace=20)
buttonbox.layout().setSpacing(10)
gui.rubber(buttonbox)
parambox = gui.vBox(box)
gui.label(parambox, self, "Kernel: %(kernel_eq)s")
common = dict(orientation=Qt.Horizontal, callback=self.settings_changed,
alignment=Qt.AlignRight, controlWidth=80)
spbox = gui.hBox(parambox)
gui.rubber(spbox)
inbox = gui.vBox(spbox)
gamma = gui.doubleSpin(
inbox, self, "gamma", 0.0, 10.0, 0.01, label=" g: ", **common)
coef0 = gui.doubleSpin(
inbox, self, "coef0", 0.0, 10.0, 0.01, label=" c: ", **common)
degree = gui.doubleSpin(
inbox, self, "degree", 0.0, 10.0, 0.5, label=" d: ", **common)
self._kernel_params = [gamma, coef0, degree]
gui.rubber(parambox)
# This is th | e maximal height (all double spins are visible)
# and the maximal width (th | e label is initialized to the widest one)
box.layout().activate()
box.setFixedHeight(box.sizeHint().height())
box.setMinimumWidth(box.sizeHint().width())
def _add_optimization_box(self):
self.optimization_box = gui.vBox(
self.controlArea, "Optimization Parameters")
gui.doubleSpin(
self.optimization_box, self, "tol", 1e-6, 1.0, 1e-5,
label="Numerical tolerance:",
decimals=6, alignment=Qt.AlignRight, controlWidth=100,
callback=self.settings_changed
)
def add_main_layout(self):
self._add_type_box()
self._add_kernel_box()
self._add_optimization_box()
def _on_kernel_changed(self):
enabled = [[False, False, False], # linear
[True, True, True], # poly
[True, False, False], # rbf
[True, True, False]] # sigmoid
self.kernel_eq = self.kernels[self.kernel_type][1]
mask = enabled[self.kernel_type]
for spin, enabled in zip(self._kernel_params, mask):
[spin.box.hide, spin.box.show][enabled]()
self.settings_changed()
def _report_kernel_parameters(self, items):
if self.kernel_type == 0:
items["Kernel"] = "Linear"
elif self.kernel_type == 1:
items["Kernel"] = \
"Polynomial, ({g:.4} x⋅y + {c:.4})<sup>{d}</sup>".format(
g=self.gamma, c=self.coef0, d=self.degree)
elif self.kernel_type == 2:
items["Kernel"] = "RBF, exp(-{:.4}|x-y|²)".format(self.gamma)
else:
items["Kernel"] = "Sigmoid, tanh({g:.4} x⋅y + {c:.4})".format(
g=self.gamma, c=self.coef0)
def update_model(self):
super().update_model()
sv = None
if self.valid_data:
sv = self.data[self.model.skl_model.support_]
self.send("Support vectors", sv)
class OWSVMClassification(OWBaseSVM):
name = "SVM"
description = "Support Vector Machines map inputs to higher-dimensional " \
"feature spaces that best separate different classes. "
icon = "icons/SVM.svg"
priority = 50
LEARNER = SVMLearner
outputs = [("Support vectors", Table)]
# 0: c_svc, 1: nu_svc
svmtype = settings.Setting(0)
C = settings.Setting(1.0)
nu = settings.Setting(0.5)
shrinking = settings.Setting(True),
probability = settings.Setting(False)
max_iter = settings.Setting(100)
limit_iter = settings.Setting(True)
def _add_type_box(self):
form = QtGui.QGridLayout()
self.type_box = box = gui.radioButtonsInBox(
self.controlArea, self, "svmtype", [], box="SVM Type",
orientation=form, callback=self.settings_changed)
form.addWidget(gui.appendRadioButton(box, "C-SVM", addToLayout=False),
0, 0, Qt.AlignLeft)
form.addWidget(QtGui.QLabel("Cost (C):"),
0, 1, Qt.AlignRight)
form.addWidget(gui.doubleSpin(box, self, "C", 1e-3, 1000.0, 0.1,
decimals=3, alignment=Qt.AlignRight,
controlWidth=80, addToLayout=False,
callback=self.settings_changed),
0, 2)
form.addWidget(gui.appendRadioButton(box, "ν-SVM", addToLayout=False),
1, 0, Qt.AlignLeft)
form.addWidget(QtGui.QLabel("Complexity (ν):"),
1, 1, Qt.AlignRight)
form.addWidget(gui.doubleSpin(box, self, "nu", 0.05, 1.0, 0.05,
decimals=2, alignment=Qt.AlignRight,
controlWidth=80, addToLayout=False,
callback=self.settings_changed),
1, 2)
def _add_optimization_box(self):
super()._add_optimization_box()
gui.spin(self.optimization_box, self, "max_iter", 50, 1e6, 50,
label="Iteration limit:", checked="limit_iter",
alignment=Qt.AlignRight, controlWidth=100,
callback=self.settings_changed)
def create_learner(self):
kernel = ["linear", "poly", "rbf", "sigmoid"][self.kernel_type]
common_args = dict(
kernel=kernel,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
tol=self.tol,
max_iter=self.max_iter if self.limit_iter else -1,
probability=True,
preprocessors=self.preprocessors
)
if self.svmtype == 0:
return SVMLearner(C=self.C, **common_args)
else:
return NuSVMLearner(nu=self.nu, **common_args)
def get_learner_parameters(self):
items = OrderedDict()
if self.svmtype == 0:
items["SVM type"] = "C-SVM, C={}".format(self.C)
else:
items["SVM type"] = "ν-SVM, ν={}".format(self.nu)
self._report_kernel_parameters(items)
items["Numerical tolerance"] = "{:.6}".format(self.tol)
items["Iteration limt"] = self.max_iter if self.limit_iter else "unlimited"
return items
if __name__ == "__main__":
app = QtGui.QApplication([])
w = OWSVMClassification()
w.set_data(Table("iris")[:50])
w.show()
app.exec_()
|
"""
homeassistant.components.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sets up a demo environment that mimics interaction with devices.
"""
import time
import homeassistant.core as ha
import homeassistant.bootstrap as bootstrap
import homeassistant.loader as loader
from homeassistant.const import (
CONF_PLATFORM, ATTR_ENTITY_PICTURE, ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME)
DOMAIN = "demo"
DEPENDENCIES = ['introduction', 'conversation']
COMPONENTS_WITH_DEMO_PLATFORM = [
'switch', 'light', 'sensor', 'thermostat', 'media_player', 'notify']
def setup(hass, config):
""" Setup a demo environment. """
group = loader.get_component('group')
configurator = loader.get_component('configurator')
config.setdefault(ha.DOMAIN, {})
config.setdefault(DOMAIN, {})
if config[DOMAIN].get('hide_demo_state') != 1:
hass.states.set('a.Demo_Mode', 'Enabled')
# Setup sun
if not hass.config.latitude:
hass.config.latitude = 32.87336
if not hass.config.longitude:
hass.config.longitude = 117.22743
bootstrap.setup_component(hass, 'sun')
# Setup demo platforms
for component in COMPONENTS_WITH_DEMO_PLATFORM:
bootstrap.setup_component(
hass, component, {component: {CONF_PLATFORM: 'demo'}})
# Setup room groups
lights = sorted(hass.states.entity_ids('light'))
switches = sorted(hass.states.entity_ids('switch'))
media_players = sorted(hass.states.entity_ids('media_player'))
group.setup_group(hass, 'living room', [lights[2], lights[1], switches[0],
media_players[1]])
group.setup_group(hass, 'bedroom', [lights[0], switches[1],
media_players[0]])
# Setup IP Camera
bootstrap.setup_component(
hass, 'camera',
{'camera': {
'platform': 'generic',
'name': 'IP Camera',
'still_image_url': 'http://home-assistant.io/demo/webcam.jpg',
}})
# Setup scripts
bootstrap.setup_component(
hass, 'script',
{'script': {
'demo': {
'alias': 'Toggle {}'.format(lights[0].split('.')[1]),
'sequence': [{
'execute_service': 'light.turn_off',
'service_data': {ATTR_ENTITY_ID: lights[0]}
}, {
'delay': {'seconds': 5}
}, {
'execute_service': 'light.turn_on',
'service_data': {ATTR_ENTITY_ID: lights[0]}
}, {
'delay': {'seconds': 5}
}, {
'execute_service': 'light.turn_off',
'service_data': {ATTR_ENTITY_ID: lights[0]}
}]
}}})
# Setup scenes
bootstrap.setup_component(
hass, 'scene',
{'scene': [
{'name': 'Romantic lights',
'entities': {
lights[0]: True,
lights[1]: {'state': 'on', 'xy_color': [0.33, 0.66],
'brightness': 200},
}},
{'name': 'Switch on and off',
'entities': {
switches[0]: True,
switches[1]: False,
}},
]})
# Setup fake device tracker
hass.states.set("device_tracker.paulus", "home",
{ATTR_ENTITY_PICTURE:
"htt | p://graph.facebook.com/297400035/picture",
ATTR_FRIENDLY_NAME: 'Paulus'})
hass.states.set("device_tracker.anne_therese", "not_home",
{ATTR_FRIENDLY_NAME: 'Anne Therese',
'latitude': hass.config.latitude + 0.002,
'longitude': hass.config.longitude + 0.002})
hass.states.set("group.all_devices", "home",
| {
"auto": True,
ATTR_ENTITY_ID: [
"device_tracker.paulus",
"device_tracker.anne_therese"
]
})
# Setup configurator
configurator_ids = []
def hue_configuration_callback(data):
""" Fake callback, mark config as done. """
time.sleep(2)
# First time it is called, pretend it failed.
if len(configurator_ids) == 1:
configurator.notify_errors(
configurator_ids[0],
"Failed to register, please try again.")
configurator_ids.append(0)
else:
configurator.request_done(configurator_ids[0])
request_id = configurator.request_config(
hass, "Philips Hue", hue_configuration_callback,
description=("Press the button on the bridge to register Philips Hue "
"with Home Assistant."),
description_image="/static/images/config_philips_hue.jpg",
submit_caption="I have pressed the button"
)
configurator_ids.append(request_id)
return True
|
terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mezzanine.utils.tests import TestCase
from organization.job.models import JobOffer, Candidacy, JobResponse
# from organization.job.admin import *
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core import urlresolvers
from django.contrib.auth import get_user_model as User
class URLTests(TestCase):
def setUp(self):
super(URLTests, self).setUp()
self.job_offer = JobOffer.objects.create(
title="django dev",
email="testing@email.fr",
type="internship",
content="python"
)
self.candidacy = Candidacy.objects.create(
title="research",
text_button_external="more"
)
def test_job_offer_detail_url(self):
response = self.client.get('/job-offer/' + self.job_offer.slug + "/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "python")
self.assertTemplateUsed(response, "job/job_offer_detail.html")
def test_basic_job_offer_url(self):
response = self.client.get('/job-offer/')
self.assertEqual(r | esponse.status_code, 200)
self.assertContains(response, "django-dev")
self.assertTemplateUsed(response, "job/job_offer_list.html")
def test_basic_candidacies_url(self):
response = self.client.get('/candidacies/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "research")
self.assertTemplateUsed(response, "j | ob/candidacy_list.html")
def test_candidacies_autocomplete(self):
response = self.client.get('/candidacy-autocomplete/')
self.assertEqual(response.status_code, 200)
class JobOfferTests(TestCase):
def setUp(self):
super(JobOfferTests, self).setUp()
app = "organization_job"
model = "joboffer"
self.url = urlresolvers.reverse("admin:%s_%s_add" % (app, model))
self.file = SimpleUploadedFile('letter.txt'.encode(), 'content'.encode())
self.job_offer = JobOffer.objects.create(
email="test@test.fr",
type="internship"
)
self.job_response = JobResponse.objects.create(
first_name="jean",
last_name="dupont",
email="jean@dupont.fr",
message="I want this job",
curriculum_vitae=self.file,
cover_letter=self.file,
job_offer=self.job_offer
)
def test_job_offer_display_for_everyone(self):
self.client.logout()
response = self.client.get(self.job_offer.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "job/job_offer_detail.html")
self.client.login(username='user', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "job/job_offer_detail.html")
self.client.login(username='test', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "job/job_offer_detail.html")
def test_job_offer_admin(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='user', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='test', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_job_offer_admin_creation(self):
self.client.login(username='test', password='test')
nmb = JobOffer.objects.count()
response = self.client.post(
self.url,
{
"title": 'title',
"status": 2,
"email": 'email@email.fr',
"type": 'internship',
'job_response-INITIAL_FORMS': '0',
'job_response-TOTAL_FORMS': '1'
}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(nmb+1, JobOffer.objects.count())
def test_job_offer_admin_edition(self):
self.client.logout()
response = self.client.get(self.job_offer.get_absolute_url())
self.assertNotContains(response, "editable")
self.client.login(username='user', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertNotContains(response, "editable")
self.client.login(username='test', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertContains(response, "editable")
def test_job_offer_creation(self):
self.assertTrue(isinstance(self.job_offer, JobOffer))
self.assertEqual(self.job_offer.email, "test@test.fr")
self.assertEqual(self.job_offer.type, "internship")
def test_job_offer_retrieval(self):
self.assertTrue(self.job_offer in JobOffer.objects.all())
self.assertTrue(self.job_offer in JobOffer.objects.filter(email="test@test.fr"))
self.assertTrue(self.job_offer in JobOffer.objects.filter(type="internship"))
def test_job_offer_update(self):
self.job_offer.email = "test@django.fr"
self.assertEqual(1, JobOffer.objects.filter(email="test@test.fr").count())
self.assertEqual(0, JobOffer.objects.filter(email="test@django.fr").count())
self.job_offer.save()
self.assertEqual(0, JobOffer.objects.filter(email="test@test.fr").count())
self.assertEqual(1, JobOffer.objects.filter(email="test@django.fr").count())
class JobResponseTests(TestCase):
def setUp(self):
super(JobResponseTests, self).setUp()
app = "organization_job"
model = "joboffer"
self.user = User().objects.create_user(username="user", password='test')
self.file = SimpleUploadedFile('letter.txt'.encode(), 'content'.encode())
self.job_offer = JobOffer.objects.create(
email="test@test.fr",
type="internship"
)
self.job_response = JobResponse.objects.create(
first_name="jean",
last_name="dupont",
email="jean@dupont.fr",
message="I want this job",
curriculum_vitae=self.file,
cover_letter=self.file,
job_offer=self.job_offer
)
self.url = urlresolvers.reverse(
"admin:%s_%s_change" % (app, model),
args=(self.job_offer.id,)
)
def test_job_response_fk_deletion(self):
self.job_offer.delete()
self.assertTrue(
self.job_response in JobResponse.objects.filter(
job_offer__isnull=True
)
)
def test_job_response_not_display_for_everyone(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='user', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='test', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "admin/change_form.html")
self.assertContains(response, "jean@dupont.fr")
def test_job_response_creation |
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants for music processing in Magenta."""
# Meter-related constants.
DEFAULT_QUARTERS_PER_MINUTE = 120.0
DEFAULT_STEPS_PER_BAR = 16 # 4/4 music sampled at 4 steps per quarter note.
DEFAULT_STEPS_PER_QUARTER = 4
# Default absolute quantization.
DEFAULT_STEPS_PER_SECOND = 100
# Standard pulses per quarter.
# https://en.wikipedia.org/wiki/Pulses_per_quarter_note
STANDARD_PPQ = 220
# Special melody events.
NUM_SPECIAL_MELODY_EVENTS = | 2
MELODY_NOTE_OFF = -1
MELODY_NO_EVENT = -2
# Other melody-related constants.
MIN_MELODY_EVENT = -2
MAX_MELODY_EVENT = 127
MIN_MIDI_PITCH = 0 # Inclusive.
MAX_MIDI_PITCH = 127 # Inclusive.
NUM_MIDI_PITCHES = MAX_MIDI_PITCH - MIN_MIDI_PITCH + 1
NOTES_PER_OCTAVE = 12
# Velocity-related constants.
MIN_MIDI_VELOCITY = 1 # Inclusive.
MAX_MIDI_VELOCITY = 127 # Inclusive.
# Program-related consta | nts.
MIN_MIDI_PROGRAM = 0
MAX_MIDI_PROGRAM = 127
# MIDI programs that typically sound unpitched.
UNPITCHED_PROGRAMS = (
list(range(96, 104)) + list(range(112, 120)) + list(range(120, 128)))
# Chord symbol for "no chord".
NO_CHORD = 'N.C.'
# The indices of the pitch classes in a major scale.
MAJOR_SCALE = [0, 2, 4, 5, 7, 9, 11]
# NOTE_KEYS[note] = The major keys that note belongs to.
# ex. NOTE_KEYS[0] lists all the major keys that contain the note C,
# which are:
# [0, 1, 3, 5, 7, 8, 10]
# [C, C#, D#, F, G, G#, A#]
#
# 0 = C
# 1 = C#
# 2 = D
# 3 = D#
# 4 = E
# 5 = F
# 6 = F#
# 7 = G
# 8 = G#
# 9 = A
# 10 = A#
# 11 = B
#
# NOTE_KEYS can be generated using the code below, but is explicitly declared
# for readability:
# NOTE_KEYS = [[j for j in range(12) if (i - j) % 12 in MAJOR_SCALE]
# for i in range(12)]
NOTE_KEYS = [
[0, 1, 3, 5, 7, 8, 10],
[1, 2, 4, 6, 8, 9, 11],
[0, 2, 3, 5, 7, 9, 10],
[1, 3, 4, 6, 8, 10, 11],
[0, 2, 4, 5, 7, 9, 11],
[0, 1, 3, 5, 6, 8, 10],
[1, 2, 4, 6, 7, 9, 11],
[0, 2, 3, 5, 7, 8, 10],
[1, 3, 4, 6, 8, 9, 11],
[0, 2, 4, 5, 7, 9, 10],
[1, 3, 5, 6, 8, 10, 11],
[0, 2, 4, 6, 7, 9, 11]
]
|
# -*- coding: utf-8 -*-
# author: bambooom
'''
My Diary Web App - CLI for client
'''
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import requests
from bs4 import BeautifulSoup
import re
HELP = '''
Input h/help/? for help.
Input q/quit to quit the process.
Input s/sync to sync the diary log.
Input lt/ListTags to list all tags.
Input st:TAG to set or delete tags
Input FLUSH to clear all diary entries.
'''
url = "http://bambooomdiary.sinaapp.com/"
def get_log_all():
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
log = ''
for i in soup.find_all('pre'):
log += i.get_text()+'\n'
return log
def get_log_bytag(tags):
response = requests.get(url)
soup = BeautifulSoup(response.text,"html.parser")
ti=list(soup.find_all('i', class_='etime'))
ta=list(soup.find_all('i', class_='tags'))
di=list(soup.find_all('pre',class_='diary'))
for i in range(len(list(ti))):
if ta[i].get_text() == 'TAG:'+tags:
print "%s %s" %(ti[i].get_text(),di[i].get_text())
def get_tags():
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
temp =[]
for i in soup.find_all('i', class_='tags'):
temp.append(i.get_text())
tag_set = list(set(temp))
for i in tag_set:
print i
def delete_log():
res = raw_ | input('ARE YOU SURE?(y/n)>')
if res.lower() == 'y':
response = requests.delete(url)
print "All clear!Restart a new diary!"
else:
print "Well, keep going on!"
def write_log(message, tags):
values = {'newdiary':message,'tags':tags}
response = requests.post(url, data=values)
def client():
print HELP
tags=''
while True:
print 'TAG:'+tags
message = raw_input('Input>')
if m | essage in ['h','help','?']:
print HELP
elif message in ['s','sync']:
get_log_bytag(tags)
elif message in ['q','quit']:
print 'Bye~'
break
elif message in ['lt','ListTags']:
get_tags()
elif message.startswith('st:'):
tags = message[3:]
elif message == 'FLUSH':
delete_log()
else:
write_log(message,tags)
if __name__ == '__main__':
client() |
# -*- coding: utf8 -*-
import base64
import hashlib
import io
import nose
import requests
import aliyunauth.utils
import aliyunauth.consts
def test_cal_b64md5():
s_data = b"foo"
l_data = b"bar" * aliyunauth.consts.MD5_CHUNK_SIZE
# normal data, None
nose.tools.eq_(aliyunauth.utils.cal_b64md5(None), None)
def b64md5(data):
return base64.b64encode(hashlib.md5(data).di | gest()).decode("utf8")
# normal data, small size, bytes
nose.tools.eq_(aliyunauth.utils.cal_b64md5(s_data), b64md5(s_data))
# normal data, small size, bytes
nose.tools.eq_(
aliyunauth.utils.cal_b64md5(s_data.decode("utf8")), b64md5(s_data)
)
# io-like, big size, bytes
nose.tools.eq_(
aliyunauth.utils.cal_b64md5(io.BytesIO(l_data)), b64md5(l_data)
)
# io-like, big size, str
nose.tools.eq_(
aliyunauth.utils.cal_b64md5(io.Stri | ngIO(l_data.decode("utf8"))),
b64md5(l_data)
)
def test_to_bytes():
nose.tools.ok_(isinstance(
aliyunauth.utils.to_bytes(u"foo"),
requests.compat.bytes
))
nose.tools.ok_(isinstance(
aliyunauth.utils.to_bytes(b"foo"),
requests.compat.bytes
))
nose.tools.eq_(aliyunauth.utils.to_bytes(u"福", "gb2312"), b'\xb8\xa3')
def test_to_str():
nose.tools.ok_(isinstance(
aliyunauth.utils.to_str(u"bar"),
requests.compat.str
), "unicode to str failed")
nose.tools.ok_(isinstance(
aliyunauth.utils.to_str(b"bar"),
requests.compat.str
), "bytes to str failed")
nose.tools.eq_(aliyunauth.utils.to_str(b"\xb0\xf4", "gb2312"), u"棒")
def test_percent_quote():
nose.tools.eq_(
aliyunauth.utils.percent_quote(u"福棒 &?/*~=+foo\""),
"%E7%A6%8F%E6%A3%92%20%26%3F%2F%2A~%3D%2Bfoo%22"
)
def test_percent_encode():
nose.tools.eq_(
aliyunauth.utils.percent_encode([("福 棒", "foo+bar"), ("none", None)]),
"%E7%A6%8F%20%E6%A3%92=foo%2Bbar"
)
nose.tools.eq_(
aliyunauth.utils.percent_encode([("foo", "福"), ("bar", "棒")], True),
"bar=%E6%A3%92&foo=%E7%A6%8F"
)
|
.rel),
{
'data': [
{ 'type': tgt.collection, 'id': '12'},
{ 'type': tgt.collection, 'id': '13'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
)
# Make sure they are there.
rel_ids = {
rel_item['id'] for rel_item in
self.test_app().get(
'/{}/10/relationships/{}'.format(src.collection, src.rel)
).json['data']
}
self.assertEqual(rel_ids, {'12', '13'})
# Make sure adding relitem:12 again doesn't result in two relitem:12s
self.test_app().post_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': [
{ 'type': tgt.collection, 'id': '12'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
)
rel_ids = [
rel_item['id'] for rel_item in
self.test_app().get(
'/{}/10/relationships/{}'.format(src.collection, src.rel)
).json['data']
]
self.assertEqual(sorted(rel_ids), ['12', '13'])
# Make sure adding relitem:11 adds to the list, rather than replacing
# it.
self.test_app().post_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': [
{ 'type': tgt.collection, 'id': '11'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
)
rel_ids = [
rel_item['id'] for rel_item in
self.test_app().get(
'/{}/10/relationships/{}'.format(src.collection, src.rel)
).json['data']
]
self.assertEqual(sorted(rel_ids), ['11', '12', '13'])
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_post_item_with_related(self, src, tgt, comment):
'''Should add a new item with linkage to related resources.
If a relationship is provided in the relationships member of the
resource object, its value MUST be a relationship object with a data
member. The value of this key represents the linkage the new resource is
to have.
'''
# Add a new item related to relitem:12 and possibly relitem:13
reldata = {'type': tgt.collection, 'id': '12'}
if tgt.many:
reldata = [ reldata, {'type': tgt.collection, 'id': '13'} ]
item_id = self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'data': reldata
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'}
).json['data']['id']
# GET it back and check that relationship linkage is correct.
item = self.test_app().get(
'/{}/{}'.format(src.collection, item_id)
).json['data']
if tgt.many:
specified_related_ids = {'12', '13'}
found_related_ids = {
thing['id'] for thing in item['relationships'][src.rel]['data']
}
self.assertEqual(specified_related_ids, found_related_ids)
else:
self.assertEqual(item['relationships'][src.rel]['data']['id'], '12')
# Now attempt to add another item with malformed requests.
incorrect_type_data = { 'type': 'frogs', 'id': '12' }
no_id_data = { 'type': tgt.collection, 'id_typo': '12'}
# No data element in rel.
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'meta': 'should fail'
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
if tgt.many:
incorrect_type_data = [ incorrect_type_data ]
no_id_data = [ no_id_data ]
# Not an array.
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'data': { 'type': tgt.collection, 'id': '12'}
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
else:
# Data is an array of identifiers when it should be just one.
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'data': [
{ 'type': tgt.collection, 'id': '12'}
]
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
# Data malformed (not a resource identifier or array of them).
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
're | lationships': {
src.rel: {
'data': 'splat'
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
# Item with incorrect type.
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
' | relationships': {
src.rel: {
'data': incorrect_type_data
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=409
)
# Item with no id.
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'data': no_id_data
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_rels_post_relationships_nonexistent_relationship(self):
'''Should return 404 error (relationship not found).
'''
# Try to add people/1 to no_such_relationship.
self.test_app().post_json(
'/articles_by_assoc/2/relationships/no_such_relationship',
{
'data': [
{ 'type': 'people', 'id': '1'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=404
)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_post_relationships_nonexistent_item(self, src, tgt, comment):
'''Should return HTTPFailedDependency (424).
'''
# Try to add tgt/99999 (doesn't exist) to src.rel
reldata = { 'type': tgt.collection, 'id': '99999'}
status = 403
if tgt.many:
reldata = [ reldata ]
|
model_data import FieldDataCache, set_score
from courseware.tests.helpers import (
LoginEnrollmentTestCase,
get_request_for_user
)
from lms.djangoapps.course_blocks.api import get_course_blocks
from student.tests.factories import UserFactory
from student.models import CourseEnrollment
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from .. import course_grades
from ..course_grades import summary as grades_summary
from ..module_grades import get_module_score
from ..new.course_grade import CourseGrade, CourseGradeFactory
from ..new.subsection_gra | de import SubsectionGradeFactory
def _grade_with_err | ors(student, course):
"""This fake grade method will throw exceptions for student3 and
student4, but allow any other students to go through normal grading.
It's meant to simulate when something goes really wrong while trying to
grade a particular student, so we can test that we won't kill the entire
course grading run.
"""
if student.username in ['student3', 'student4']:
raise Exception("I don't like {}".format(student.username))
return grades_summary(student, course)
@attr(shard=1)
class TestGradeIteration(SharedModuleStoreTestCase):
"""
Test iteration through student gradesets.
"""
COURSE_NUM = "1000"
COURSE_NAME = "grading_test_course"
@classmethod
def setUpClass(cls):
super(TestGradeIteration, cls).setUpClass()
cls.course = CourseFactory.create(
display_name=cls.COURSE_NAME,
number=cls.COURSE_NUM
)
def setUp(self):
"""
Create a course and a handful of users to assign grades
"""
super(TestGradeIteration, self).setUp()
self.students = [
UserFactory.create(username='student1'),
UserFactory.create(username='student2'),
UserFactory.create(username='student3'),
UserFactory.create(username='student4'),
UserFactory.create(username='student5'),
]
def test_empty_student_list(self):
"""If we don't pass in any students, it should return a zero-length
iterator, but it shouldn't error."""
gradeset_results = list(course_grades.iterate_grades_for(self.course.id, []))
self.assertEqual(gradeset_results, [])
def test_nonexistent_course(self):
"""If the course we want to get grades for does not exist, a `Http404`
should be raised. This is a horrible crossing of abstraction boundaries
and should be fixed, but for now we're just testing the behavior. :-("""
with self.assertRaises(Http404):
gradeset_results = course_grades.iterate_grades_for(SlashSeparatedCourseKey("I", "dont", "exist"), [])
gradeset_results.next()
def test_all_empty_grades(self):
"""No students have grade entries"""
all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students)
self.assertEqual(len(all_errors), 0)
for gradeset in all_gradesets.values():
self.assertIsNone(gradeset['grade'])
self.assertEqual(gradeset['percent'], 0.0)
@patch('lms.djangoapps.grades.course_grades.summary', _grade_with_errors)
def test_grading_exception(self):
"""Test that we correctly capture exception messages that bubble up from
grading. Note that we only see errors at this level if the grading
process for this student fails entirely due to an unexpected event --
having errors in the problem sets will not trigger this.
We patch the grade() method with our own, which will generate the errors
for student3 and student4.
"""
all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students)
student1, student2, student3, student4, student5 = self.students
self.assertEqual(
all_errors,
{
student3: "I don't like student3",
student4: "I don't like student4"
}
)
# But we should still have five gradesets
self.assertEqual(len(all_gradesets), 5)
# Even though two will simply be empty
self.assertFalse(all_gradesets[student3])
self.assertFalse(all_gradesets[student4])
# The rest will have grade information in them
self.assertTrue(all_gradesets[student1])
self.assertTrue(all_gradesets[student2])
self.assertTrue(all_gradesets[student5])
################################# Helpers #################################
def _gradesets_and_errors_for(self, course_id, students):
"""Simple helper method to iterate through student grades and give us
two dictionaries -- one that has all students and their respective
gradesets, and one that has only students that could not be graded and
their respective error messages."""
students_to_gradesets = {}
students_to_errors = {}
for student, gradeset, err_msg in course_grades.iterate_grades_for(course_id, students):
students_to_gradesets[student] = gradeset
if err_msg:
students_to_errors[student] = err_msg
return students_to_gradesets, students_to_errors
class TestProgressSummary(TestCase):
"""
Test the method that calculates the score for a given block based on the
cumulative scores of its children. This test class uses a hard-coded block
hierarchy with scores as follows:
a
+--------+--------+
b c
+--------------+-----------+ |
d e f g
+-----+ +-----+-----+ | |
h i j k l m n
(2/5) (3/5) (0/1) - (1/3) - (3/10)
"""
# Tell Django to clean out all databases, not just default
multi_db = True
def setUp(self):
super(TestProgressSummary, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.loc_a = self.create_location('chapter', 'a')
self.loc_b = self.create_location('section', 'b')
self.loc_c = self.create_location('section', 'c')
self.loc_d = self.create_location('vertical', 'd')
self.loc_e = self.create_location('vertical', 'e')
self.loc_f = self.create_location('vertical', 'f')
self.loc_g = self.create_location('vertical', 'g')
self.loc_h = self.create_location('problem', 'h')
self.loc_i = self.create_location('problem', 'i')
self.loc_j = self.create_location('problem', 'j')
self.loc_k = self.create_location('html', 'k')
self.loc_l = self.create_location('problem', 'l')
self.loc_m = self.create_location('html', 'm')
self.loc_n = self.create_location('problem', 'n')
weighted_scores = {
self.loc_h: self.create_score(2, 5),
self.loc_i: self.create_score(3, 5),
self.loc_j: self.create_score(0, 1),
self.loc_l: self.create_score(1, 3),
self.loc_n: self.create_score(3, 10),
}
locations_to_scored_children = {
self.loc_a: [self.loc_h, self.loc_i, self.loc_j, self.loc_l, self.loc_n],
self.loc_b: [self.loc_h, self.loc_i, self.loc_j, self.loc_l],
self.loc_c: [self.loc_n],
self.loc_d: [self.loc_h, self.loc_i],
self.loc_e: [self.loc_j, self.loc_l],
self.loc_f: [],
self.loc_g: [self.loc_n],
self.loc_k: [],
self.loc_m: [],
}
course_structure = MagicMock()
course_structure.get_children = lambda location: locations_to_scored_children[location]
self.course_grade = CourseGrade(student=None, course=None, course_structure |
Z'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
)
parsed_277U_2220E = Loop( u'2220E', Properties(looptype='',repeat=u'>1',pos=u'180',req_sit=u'S',desc=u'Service Line Information'),
Segment( u'SVC', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'180',desc=u'Service Line Information'),
Composite( u'C003', Properties(req_sit=u'R',refdes='',seq=u'01',desc=u'Composite Medical Procedure Identifier'),
Element( u'SVC01-01', Properties(desc=u'Product/Service ID Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=0,
codes=[u'AD', u'CI', u'HC', u'ID', u'IV', u'N1', u'N2', u'N3', u'N4', u'ND', u'NH', u'NU', u'RB'] ) ),
Element( u'SVC01-02', Properties(desc=u'Product/Service ID', req_sit=u'R', data_type=(u'AN',u'1',u'48'), position=1,
codes=[] ) ),
Element( u'SVC01-03', Properties(desc=u'Procedure Modifier', req_sit=u'S', data_type=(u'AN',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'SVC01-04', Properties(desc=u'Procedure Modifier', req_sit=u'S', data_type=(u'AN',u'2',u'2'), position=3,
codes=[] ) ),
Element( u'SVC01-05', Properties(desc=u'Procedure Modifier', req_sit=u'S', data_type=(u'AN',u'2',u'2'), position=4,
codes=[] ) ),
Element( u'SVC01-06', Properties(desc=u'Procedure Modifier', req_sit=u'S', data_type=(u'AN',u'2',u'2'), position=5,
codes=[] ) ),
Element( u'SVC01-07', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=6,
codes=[] ) ),
),
Element( u'SVC02', Properties(desc=u'Monetary Amount', req_sit=u'R', data_type=(u'R',u'1',u'18'), position=2,
codes=[] ) ),
Element( u'SVC03', Properties(desc=u'Monetary Amount', req_sit=u'R', data_type=(u'R',u'1',u'18'), position=3,
codes=[] ) ),
Element( u'SVC04', Properties(desc=u'Product/Service ID', req_sit=u'S', data_type=(u'AN',u'1',u'48'), position=4,
codes=[] ) ),
Element( u'SVC05', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Composite( u'C003', Properties(req_sit=u'N',refdes='',seq=u'06',desc=u'Composite Medical Procedure Identifier'),
),
Element( u'SVC07', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=7,
codes=[] ) ),
),
Segment( u'STC', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'190',desc=u'Service Line Status Information'),
Composite( u'C043', Properties(req_sit=u'R',refdes='',seq=u'01',desc=u'Health Care Claim Status'),
Element( u'STC01-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC01-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC01-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Element( u'STC02', Properties(desc=u'Date', req_sit=u'R', data_type=(u'DT',u'8',u'8'), position=2,
codes=[] ) ),
Element( u'STC03', Properties(desc=u'Action Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=3,
codes=[] ) ),
Element( u'STC04', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'STC05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=5,
codes=[] ) ),
Element( u'STC06', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=6,
codes=[] ) ),
Element( u'STC07', Properties(desc=u'Payment Method Code', req_sit=u'N', data_type=(u'ID',u'3',u'3'), position=7,
codes=[] ) ),
Element( u'STC08', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=8,
codes=[] ) ),
Element( u'STC09', Properties(desc=u'Check Number', req_sit=u'N', data_type=(u'AN',u'1',u'16'), position=9,
codes=[] ) ),
Composite( u'C043', Properties(req_sit=u'S',refdes='',seq=u'10',desc=u'Health Care Claim Status'),
Element( u'STC10-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC10-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC10-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Composite( u'C043', Properties(req_sit=u'S',refdes='',seq=u'11',desc=u'Health Care Claim Status'),
Element( u'STC11-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC11-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC11-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Element( u'STC12', Properties(desc=u'Free-form Message Text', req_sit=u'N', data_type=(u'AN',u'1',u'264'), position=12,
codes=[] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'200',desc=u'Service Line Item Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'FJ'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'210',desc=u'Service Line Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'472'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'RD8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_typ | e=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
)
parsed_277U_2200E = Loop( u'2200E', Properties(looptype='',repeat=u'>1',pos=u'090',req_ | sit=u'R',desc=u'Claim Submitter Trace Number'),
Segment( u'TRN', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'090',desc=u'Claim Submitter Trace Number'),
Element( u'TRN01', Properties(desc=u'Trace Type Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=1,
codes=[u'2'] ) ),
Element( u'TRN02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'TRN03', Properties(desc=u'Originating Company Identifier', req_sit=u'N', data_type=(u'AN',u'10',u'10'), position=3,
codes=[] ) ),
Element( u'TRN04', Properties(desc=u'Reference Identification', req_sit=u'N', data_type=(u'AN',u'1',u'50'), position=4,
codes=[] ) ),
),
Segment( u'STC', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'100',desc=u'Claim Level Status Information'),
Composite( u'C043', Properties(req_sit=u'R',refdes='',seq=u'01',desc=u'Health Care Claim Status'),
Element( u'STC01-01', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=0,
codes=[] ) ),
Element( u'STC01-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'STC01-03', Properties(desc=u'Entity Identifier Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[] ) ),
),
Element( u'STC02', Properties(desc=u'Date', req_sit=u'R', data_type=(u'DT',u'8',u'8'), position=2,
|
May be empty if the user retracted their vote.
"""
__slots__ = ('option_ids', 'user', 'poll_id', '_id_attrs')
def __init__(self, poll_id: str, user: User, option_ids: List[int], **_kwargs: Any):
self.poll_id = poll_id
self.user = user
self.option_ids = option_ids
self._id_attrs = (self.poll_id, self.user, tuple(self.option_ids))
@classmethod
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['PollAnswer']:
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
if not data:
return None
data['user'] = User.de_json(data.get('user'), bot)
return cls(**data)
class Poll(TelegramObject):
"""
This object contains information about a poll.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`id` is equal.
Attributes:
id (:obj:`str`): Unique poll identifier.
question (:obj:`str`): Poll question, 1-300 characters.
options (List[:class:`PollOption`]): List of poll options.
total_voter_count (:obj:`int`): Total number of users that voted in the poll.
is_closed (:obj:`bool`): :obj:`True`, if the poll is closed.
is_anonymous (:obj:`bool`): :obj:`True`, if the poll is anonymous.
type (:obj:`str`): Poll type, currently can be :attr:`REGULAR` or :attr:`QUIZ`.
allows_multiple_answers (:obj:`bool`): :obj:`True`, if the poll allows multiple answers.
correct_option_id (:obj:`int`): Optional. Identifier of the correct answer option.
explanation (:obj:`str`): Optional. Text that is shown when a user chooses an incorrect
answer or taps on the lamp icon in a quiz-style poll.
explanation_entities (List[:class:`telegram.MessageEntity`]): Optional. Special entities
like usernames, URLs, bot commands, etc. that appear in the :attr:`explanation`.
open_period (:obj:`int`): Optional. Amount of time in seconds the poll will be active
after creation.
close_date (:obj:`datetime.datetime`): Optional. Point in time when the poll will be
automatically closed.
Args:
id (:obj:`str`): Unique poll identifier.
question (:obj:`str`): Poll question, 1-300 characters.
options (List[:class:`PollOption`]): List of poll options.
is_closed (:obj:`bool`): :obj:`True`, if the poll is closed.
is_anonymous (:obj:`bool`): :obj:`True`, if the poll is anonymous.
type (:obj:`str`): Poll type, currently can be :attr:`REGULAR` or :attr:`QUIZ`.
allows_multiple_answers (:obj:`bool`): :obj:`True`, if the poll allows multiple answers.
correct_option_id (:obj:`int`, optional): 0-based identifier of the correct answer option.
Available only for polls in the quiz mode, which are closed, or was sent (not
forwarded) by the bot or to the private chat with the bot.
explanation (:obj:`str`, optional): Text that is shown when a user chooses an incorrect
answer or taps on the lamp icon in a quiz-style poll, 0-200 characters.
explanation_entities (List[:class:`telegram.MessageEntity`], optional): Special entities
like usernames, URLs, bot commands, etc. that appear in the :attr:`explanation`.
open_period (:obj:`int`, optional): Amount of time in seconds the poll will be active
after creation.
close_date (:obj:`datetime.datetime`, optional): Point in time (Unix timestamp) when the
poll will be automatically closed. Converted to :obj:`datetime.datetime`.
"""
__slots__ = (
'total_voter_count',
'allows_multiple_answers',
'open_period',
'options',
'type',
'explanation_entities',
'is_anonymous',
'close_date',
'is_closed',
'id',
'explanation',
'question',
'correct_option_id',
'_id_attrs',
)
def __init__(
self,
id: str, # pylint: disable=W0622
question: str,
options: List[PollOption],
total_voter_count: int,
is_closed: bool,
is_anonymous: bool,
type: str, # pylint: disable=W0622
allows_multiple_answers: bool,
correct_option_id: int = None,
explanation: str = None,
explanation_entities: List[MessageEntity] = None,
open_period: int = None,
close_date: datetime.datetime = None,
**_kwargs: Any,
):
self.id = id # pylint: disable=C0103
self.question = question
self.options = options
self.total_voter_count = total_voter_count
self.is_closed = is_closed
self.is_anonymous = is_anonymous
self.type = type
self.allows_multiple_answers = allows_multiple_answers
self.correct_option_id = correct_option_id
self.explanation = explanation
self.explanation_entities = explanation_entities
self.open_period = open_period
self.close_date = close_date
self._id_attrs = (self.id,)
@classmethod
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['Poll']:
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
if not data:
return None
data['options'] = [PollOption.de_json(option, bot) for option in data['options']]
data['explanation_entities'] = MessageEntity.de_list(data.get('explanation_entities'), bot)
data['close_date'] = from_timestamp(data.get('close_date'))
return cls(**data)
def to_dict(self) -> JSONDict:
"""See :meth:`telegram.TelegramObject.to_dict`."""
data = super().to_dict()
data['options'] = [x.to_dict() for x in self.options]
if self.explanation_entities:
data['explanation_entities'] = [e.to_dict() for e in self.explanation_entities]
data['close_date'] = to_timestamp(data.get('close_date'))
return data
def parse_explanation_entity(self, entity: MessageEntity) -> str:
"""Returns the text from a given :class:`telegram.MessageEntity`.
Note:
This method is present because Telegram calculates the offset and length in
UTF-16 codepoint pairs, which some versions of Python don't handl | e automatically.
(That is, you can't just slice ``Message.text`` with the offset and length.)
Args:
entity (:class:`telegram.MessageEntity`): The entity to extract the text from. It must
be an entity that belongs to this message.
Re | turns:
:obj:`str`: The text of the given entity.
Raises:
RuntimeError: If the poll has no explanation.
"""
if not self.explanation:
raise RuntimeError("This Poll has no 'explanation'.")
# Is it a narrow build, if so we don't need to convert
if sys.maxunicode == 0xFFFF:
return self.explanation[entity.offset : entity.offset + entity.length]
entity_text = self.explanation.encode('utf-16-le')
entity_text = entity_text[entity.offset * 2 : (entity.offset + entity.length) * 2]
return entity_text.decode('utf-16-le')
def parse_explanation_entities(self, types: List[str] = None) -> Dict[MessageEntity, str]:
"""
Returns a :obj:`dict` that maps :class:`telegram.MessageEntity` to :obj:`str`.
It contains entities from this polls explanation filtered by their ``type`` attribute as
the key, and the text that each entity belongs to as the value of the :obj:`dict`.
Note:
This method should always be used instead of the :attr:`explanation_entities`
attribute, since it calculates the correct substring from the message text based on
UTF-16 codepoints. See :attr:`parse_explanation_entity` for more info.
Args:
types (List[:obj:`str`], optional): List of ``MessageEntity`` types as strings. If the
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Gazetteer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import DataStructures_pb2 as DataStructures__pb2
try:
FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2
except AttributeError:
FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2
from .DataStructures_pb2 import *
DESCRIPTOR = _descriptor.Fil | eDescriptor(
name='Gazetteer.proto',
package='CoreML.Specification.CoreMLModels',
syntax='proto3',
serialized_pb=_b('\n\x0fGazetteer.proto\x12!CoreML.Specification.CoreMLModel | s\x1a\x14\x44\x61taStructures.proto\"\x9c\x01\n\tGazetteer\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x12@\n\x11stringClassLabels\x18\xc8\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x42\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3')
,
dependencies=[DataStructures__pb2.DESCRIPTOR,],
public_dependencies=[DataStructures__pb2.DESCRIPTOR,])
_GAZETTEER = _descriptor.Descriptor(
name='Gazetteer',
full_name='CoreML.Specification.CoreMLModels.Gazetteer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='revision', full_name='CoreML.Specification.CoreMLModels.Gazetteer.revision', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='language', full_name='CoreML.Specification.CoreMLModels.Gazetteer.language', index=1,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.Gazetteer.modelParameterData', index=2,
number=100, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stringClassLabels', full_name='CoreML.Specification.CoreMLModels.Gazetteer.stringClassLabels', index=3,
number=200, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='ClassLabels', full_name='CoreML.Specification.CoreMLModels.Gazetteer.ClassLabels',
index=0, containing_type=None, fields=[]),
],
serialized_start=77,
serialized_end=233,
)
_GAZETTEER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR
_GAZETTEER.oneofs_by_name['ClassLabels'].fields.append(
_GAZETTEER.fields_by_name['stringClassLabels'])
_GAZETTEER.fields_by_name['stringClassLabels'].containing_oneof = _GAZETTEER.oneofs_by_name['ClassLabels']
DESCRIPTOR.message_types_by_name['Gazetteer'] = _GAZETTEER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Gazetteer = _reflection.GeneratedProtocolMessageType('Gazetteer', (_message.Message,), dict(
DESCRIPTOR = _GAZETTEER,
__module__ = 'Gazetteer_pb2'
# @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.Gazetteer)
))
_sym_db.RegisterMessage(Gazetteer)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003'))
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
"""Setup the SkyLines application"""
from faker import Faker
from skylines.model import User
def test_admin():
u = User()
u.first_name = u'Example'
u.last_name = u'Manager'
u.email_address = u'manager@somedomain.com'
u.password = u.original_password = u'managepass'
u.admin = True
return u
def test_user():
u1 = User()
u1.first_name = u'Example'
u1.last_name = u'User'
u1.email_address = u'example@test.de'
u1.password = u1.original_password = u'test'
| u1.tracking_key = 123456
u1.tracking_delay = 2
return u1
def test_users(n=50):
fake = Faker(locale='de_DE')
fake.seed(42)
users = []
for i in xrange(n):
u = User()
u.first_name = fake.first_name()
u.last_name = fake.last_name()
u.email_address = fake.email()
u.password = u.original_password = fake.password()
u.trac | king_key = fake.random_number(digits=6)
users.append(u)
return users
|
mport *
# NAME IDEA -> pooling/random/sparse/distributed hebbian/horde/crowd/fragment/sample memory
# FEATURES:
# + boost -- neurons with empty mem slots learn faster
# + noise --
# + dropout -- temporal disabling of neurons
# + decay -- remove from mem
# + negatives -- learning to avoid detecting some patterns
# + fatigue -- winner has lower score for some time
# ~ sklearn -- compatibile api
# - prune -- if input < mem shrink mem ? (problem with m > input len)
# - weights -- sample weights for imbalanced classes
# - popularity -- most popular neuron is cloned / killed
# NEXT VERSION:
# - attention
# - https://towardsdatascience.com/the-fall-of-rnn-lstm-2d1594c74ce0
# - https://towardsdatascience.com/memory-attention-sequences-37456d271992
# - https://medium.com/breathe-publication/neural-networks-building-blocks-a5c47bcd7c8d
# - https://distill.pub/2016/augmented-rnns/
# - http://akosiorek.github.io/ml/2017/10/14/visual-attention.html
# + IDEA:
# append activated neurons indexes to queue available as input
# queue ages at constant rate and drops oldest values
# - IDEA:
# each neuron has small memory of activation prior to winning
# this memory is compared to ctx and intersection added to score
# winner updated this memory
# OPTION: several memories with diferent time frames
# NEXT VERSION:
# - layers -- rsm stacking
# NEXT VERSIONS:
# - numpy -- faster version
# - cython -- faster version
# - gpu -- faster version
# - distributed
class rsm:
def __init__(self,n,m,c=0,**kw):
"""Random Sample Memory
n -- number of neurons
m -- max connections per neuron (memory)
"""
self.mem = {j:set() for j in range(n)}
self.win = {j:0 for j in range(n)}
self.tow = {j:-42000 for j in range(n)} # time of win
self.t = 0
self.ctx = deque(maxlen=c) # context queue
# cfg
cfg = {}
cfg['n'] = n
cfg['m'] = m
cfg['c'] = c
cfg['k'] = kw.get('k',1)
cfg['method'] = kw.get('method',1)
cfg['cutoff'] = kw.get('cutoff',0.5)
cfg['decay'] = kw.get('decay',0.0)
cfg['dropout'] = kw.get('dropout',0.0)
cfg['fatigue'] = kw.get('fatigue',0)
cfg['boost'] = kw.get('boost',True)
cfg['noise'] = kw.get('noise',True)
cfg['sequence'] = kw.get('sequence',False)
cfg.update(kw)
self.cfg = cfg
# ---[ core ]---------------------------------------------------------------
def new_ctx(self):
self.ctx.clear()
# TODO -- input length vs mem length
# TODO -- args from cfg
def scores(self, input, raw=False, boost=False, noise=False, fatigue=0, dropout=0.0, **ignore): # -> dict[i] -> scores
"""
input -- sparse binary features
raw -- disable all postprocessing
boost -- improve scores based on number of unconnected synapses (TODO)
noise -- randomize scores to prevent snowballing
dropout -- temporal disabling of neurons
"""
mem = self.mem
tow = self.tow
N = self.cfg['n']
M = self.cfg['m']
t = self.t
scores = {}
for j in mem:
scores[j] = len(set(input) & mem[j])
if | raw:
return scores
if noise:
for j in mem:
scores[j] += 0.9*random()
if boost:
for j in mem:
scores[j] += 1+2*(M-len(mem[j])) if len(mem[j])<M else 0
# TODO boost also based on low win ratio / low tow
if fatigue: |
for j in mem:
dt = 1.0*min(fatigue,t - tow[j])
factor = dt / fatigue
scores[j] *= factor
if dropout:
k = int(round(float(dropout)*N))
for j in combinations(N,k):
scores[j] = -1
return scores
def learn(self, input, negative=False, **ignore):
for i in range(0,len(input),10):
self.learn_(set(input[i:i+10]),negative=negative)
def learn_(self, input, negative=False, **ignore):
"""
input -- sparse binary features
k -- number of winning neurons
"""
mem = self.mem
win = self.win
tow = self.tow
ctx = self.ctx
t = self.t
cfg = self.cfg
M = self.cfg['m']
N = self.cfg['n']
k = self.cfg['k']
decay = self.cfg['decay']
sequence = self.cfg['sequence']
known_inputs = set()
for j in mem:
known_inputs.update(mem[j])
# context
input = input | set(ctx)
# scoring
scores = self.scores(input, **cfg)
winners = top(k,scores)
for j in winners:
# negative learning
if negative:
mem[j].difference_update(input)
continue
# positive learning
unknown_inputs = input - known_inputs
mem[j].update(pick(unknown_inputs, M-len(mem[j])))
known_inputs.update(mem[j])
# handle decay
if decay:
decay_candidates = mem[j] - input
if decay_candidates:
for d in decay_candidates:
if random() < decay:
mem[j].remove(d)
# handle popularity
win[j] += 1
# handle fatigue
tow[j] = t
# handle context
if sequence:
for i in range(len(ctx)):
ctx[i] -= N
for j in winners:
ctx.append(-j-1)
self.t += 1
# ---[ auxiliary ]----------------------------------------------------------
def fit(self, X, Y):
cfg = self.cfg
for x,y in zip(X,Y):
negative = not y
self.learn(x,negative=negative,**cfg)
def fit2(self, X1, X0):
cfg = self.cfg
# TODO - unbalanced
for x1,x0 in zip(X1,X0):
self.learn(x1,negative=False,**cfg)
self.learn(x0,negative=True,**cfg)
def transform(self, X):
cutoff = self.cfg['cutoff']
out = []
for s in self.score_many(X):
y = 1 if s>=cutoff else 0
out += [y]
return out
def fit_transform(self, X, Y):
self.fit(X,Y)
return self.transform(X)
def score(self, X, Y, kind='acc'):
c = self.confusion(X,Y)
p = float(c['p'])
n = float(c['n'])
tp = float(c['tp'])
tn = float(c['tn'])
fp = float(c['fp'])
fn = float(c['fn'])
try:
if kind=='acc':
return (tp + tn) / (p + n)
elif kind=='f1':
return (2*tp) / (2*tp + fp + fn)
elif kind=='prec':
return tp / (tp + fp)
elif kind=='sens':
return tp / (tp + fn)
elif kind=='spec':
return tn / (tn + fp)
except ZeroDivisionError:
return float('nan')
def confusion(self, X, Y):
PY = self.transform(X)
p = 0
n = 0
tp = 0
tn = 0
fp = 0
fn = 0
for y,py in zip(Y,PY):
if y: p+=1
else: n+=1
if y:
if py: tp+=1
else: fn+=1
else:
if py: fp+=1
else: tn+=1
return dict(p=p,n=n,tp=tp,tn=tn,fp=fp,fn=fn)
def score_many(self, X):
out = []
for x in X:
s = self.score_one(x)
out += [s]
return out
# TODO
def calibrate(self, X, Y, kind='f1'):
for i in range(1,20):
c = 0.05*i
self.set_params(cutoff=c)
s = self.score(X,Y,kind)
print'{} {:.3} -> {:.3}'.format(kind,c,s)
def score_one(self, input):
"aggregate scores to scalar"
k = self.cfg['k']
method = self.cfg['method']
scores = self.scores(input)
M = self.cfg['m']
if method==0:
return top(k, scores, values=True)
elif method==1:
score = 1.0*sum(top(k, scores, values=True))/(k*(M+1))
return score
elif method==2:
score = 1.0*sum(top(k, scores, values=True))/(k*M)
return min(1.0,score)
if method==3:
score = 1.0*min(top(k, scores, values=True))/(M+1)
return score
elif method==4:
score = 1.0*min(top(k, scores, values=True))/M
return min(1.0,score)
if method==5:
score = 1.0*max(top(k, scores, values=True))/(M+1)
return score
elif method==6:
score = 1.0*max(top(k, scores, values=True))/M
return min(1.0,score)
def stats(self,prefix=''):
N = self.cfg['n']
M = self.cfg['m']
mem_v = self.mem.values()
out = {}
# mem
out['mem_empty'] = sum([1.0 if len(x)==0 else 0.0 for x in mem_v])/N
out['mem_not_empty'] = sum([1.0 if len(x)>0 else 0.0 for x in mem_v])/N
out['mem_full'] = sum([1.0 if len(x)==M else 0.0 for x in mem_v])/N
out['mem_avg'] = sum([1.0*len(x) for x in mem_v])/(N*M)
# win
win = list(sorted(self.win.values()))
out['win_min'] = win[0]
out['win_max'] = win[-1]
gini = 0
for a in win:
for b in win:
gini += abs(a-b)
gini = float(gini)/(2.0*len(win)*sum(win))
out['win_gini'] = round(gini,3)
# ctx
out['ctx_mem_sum'] = sum([1 if x<0 else 0 for m in mem_v for x in m])
out['ctx_mem_cnt'] = sum([max([1 if x<0 else 0 for x in m]) for m in mem_v if m])
out['ctx_mem_max'] = max([sum([1 if x<0 else 0 for x in m]) for m i |
# This file contains the arcpy funcitons that export rasters and shape files
# These were removed from the production script because they are not used.
# I'm saving them here just in case..
# The function array2raster uses arcpy to output a raster from the VIIRS array.
# This function DOES NOT handle the pixel size properly. The output is NOT
# properly aligned in space. These rasters are for testing only.
def array2raster(array, lat, lon, OutRaster):
array = np.fliplr(np.flipud(array))
lat = np.fliplr(np.flipud(lat))
lon = np.fliplr(np.flipud(lon))
OutRaster = OutRaster + ".tif"
if os.path.exists(os.path.join(BaseDir, "tiffs", OutRaster)):
os.remove(os.path.join(BaseDir, "tiffs",OutRaster))
cellSize = 1
LLlat = float(lat[lat.shape[0]-1, 0])
LLlon = float(lon[lon.shape[0]-1, 0])
print "LLlat:", LLlat
print "LLlon:", LLlon
tempRaster = arcpy.NumPyArrayToRaster(array, arcpy.Point(LLlon, LLlat),cellSize, cellSize)
tempRaster.save(os.path.join(BaseDir, "tiffs",OutRaster))
del tempRaster
array = None
lat = None
lon = None
del array
del lat
del lon
# Output to shapefile
def out_to_shapefile(list, fileName, date):
shp_file = fileName +'.shp'
# Check for pre-existing shape, delete if necessary.
if os.path.exists(os.path.join(BaseDir, shp_file)):
arcpy.Delete_management(os.path.join(BaseDir, shp_file))
# Set up parameters and delete create shapefile.
geometry_type = "POINT"
spatial = """GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]"""
arcpy.CreateFeatureclass_management(BaseDir, shp_file, geometry_type, "", "Di | sabled", "Disabled", spatial)
# Add attributes
shp_file = os.path.join(BaseDir, shp_file)
arcpy.AddField_management(shp_file, "Lat", "FLOAT")
arcpy.AddField_management(shp_file, "Lon", "FLOAT")
arcpy.AddField_management(shp_file, "Date", "DATE")
# Set up cursor and loop through list adding rows.
cursor = arcpy.da.InsertCursor(shp_file, ["Lat", "Lon", "Date", "SHAPE@XY"])
for coord in list:
| row = [coord[0], coord[1], date, (coord[1], coord[0])]
cursor.insertRow(row)
del cursor
# Output rasters from arrays
# The following should be uncommented if rasters are needed for testing.
##array2raster(M07ReflArray, LatArray, LonArray, "M07Refl")
##array2raster(M08ReflArray, LatArray, LonArray, "M08Refl")
##array2raster(M10ReflArray, LatArray, LonArray, "M10Refl")
##array2raster(M11ReflArray, LatArray, LonArray, "M11Refl")
##array2raster(AfArray, LatArray, LonArray, "ActiveFire")
# Output shapefile
if ShapeOut == "y":
print "Exporting to point shapefile:"
if not os.path.exists(ShapePath):
os.makedirs(ShapePath)
shp = ShapePath + '/' + 'fire_collection_point_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
Pgsql2shpExe = os.path.join(PostBin, "pgsql2shp")
query = '\"SELECT a.*, b.fid as col_id, b.active FROM fire_events a, fire_collections b WHERE a.collection_id = b.fid;\"'
command = '\"{0}\" -f {1} -h localhost -u {2} -P {3} {4} {5}'.format(Pgsql2shpExe, shp, DBuser, pwd, DBname, query).replace('\\', '/')
print command
subprocess.call(command, shell = True)
shutil.copy2(IniFile, os.path.join(ShapePath, os.path.basename(IniFile + '_'+ datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))))
print "Exporting to polygon shapefile:"
if not os.path.exists(ShapePath):
os.makedirs(ShapePath)
shp = ShapePath + '/' + 'fire_collection_poly_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
Pgsql2shpExe = os.path.join(PostBin, "pgsql2shp")
query = '\"SELECT ST_Multi(ST_Union(ST_Expand(geom, 375))) as geom, collection_id FROM fire_events GROUP BY collection_id;\"'
command = '\"{0}\" -f {1} -h localhost -u {2} -P {3} {4} {5}'.format(Pgsql2shpExe, shp, DBuser, pwd, DBname, query).replace('\\', '/')
print command
subprocess.call(command, shell = True)
shutil.copy2(IniFile, os.path.join(ShapePath, os.path.basename(IniFile + '_'+ datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))))
ShapeOut = ini.get("OutputFlags", "ShapeFile").lower()
ShapePath = ini.get("OutputFlags", "OutShapeDir").lower()
# lines from ini file
#ShapeFile = n ; Flag to output to shapefile using pgsql2shp
#
#OutShapeDir = c:\fiddle\VIIRS_Data\ShapeOut ; Shapefile output directory
|
from unittest.mock import Mock
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import query
from sqlalchemy.orm import relationship
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assert_warns_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class ScopedSessionTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("someid", None, ForeignKey("table1.id")),
)
def test_basic(self):
table2, table1 = self.tables.table2, self.tables.table1
Session = scoped_session(sa.orm.sessionmaker(testing.db))
class CustomQuery(query.Query):
pass
class SomeObject(fixtures.ComparableEntity):
query = Session.query_property()
class SomeOtherObject(fixtures.ComparableEntity):
query = Session.query_property()
custom_query = Session.query_property(query_cls=CustomQuery)
self.mapper_registry.map_imperatively(
SomeObject,
table1,
properties={"options": relationship(SomeOtherObject)},
)
self.mapper_registry.map_imperatively(SomeOtherObject, table2)
s = SomeObject(id=1, data="hello")
sso = SomeOtherObject()
s.options.append(sso)
Session.add(s)
Session.commit()
Session.refresh(sso)
Session.remove()
eq_(
SomeObject(
id=1, data="hello", options=[SomeOtherObject(someid=1)]
),
Session.query(SomeObject).one(),
)
eq_(
SomeObject(
id=1, data="hello", options=[SomeOtherObject(someid=1)]
),
SomeObject.query.one(),
)
eq_(
SomeOtherObject(someid=1),
SomeOtherObject.query.filter(
SomeOtherObject.someid == sso.someid
).one(),
)
assert isinstance(SomeOtherObject.query, query.Query)
assert not isinstance(SomeOtherObject.query, CustomQuery)
assert isinstance(SomeOtherObject.custom_query, query.Query)
def test_config_errors(self):
Session = scoped_session(sa.orm.sessionmaker())
s = Session() # noqa
assert_raises_message(
sa.exc.InvalidRequestError,
"Scoped session is already present",
Session,
bind=testing.db,
)
assert_warns_message(
sa.exc.SAWarning,
"At least one scoped session is already present. ",
Session.configure,
bind=testing.db,
)
def test_call_with_kwargs(self):
mock_scope_func = Mock()
SessionMaker = sa.orm.sessionmaker()
Session = scoped_session(sa.orm.sessionmaker(), mock_scope_func)
s0 = SessionMaker()
assert s0.autoflush == True
mock_scope_func.return_value = 0
s1 = Session()
assert s1.autoflush == True
assert_raises_message(
sa.exc.InvalidRequestError,
"Scoped session is already present",
Session,
autoflush=False,
)
mock_scope_func.return_value = 1
s2 = Session(autoflush=False)
assert s2.autoflush == False
def test_methods_etc(self):
mock_session = Mock()
mock_session.bind = "the bind"
sess = scoped_session(lambda: mock_session)
sess.add("add")
sess.delete("delete")
sess.get("Cls", 5)
eq_(sess.bind, "the bind")
eq_(
mock_session.mock_calls,
[
mock.call.add("add", _warn=True),
mock.call.delete("delete"),
mock.call.get(
"Cls",
5,
options=None,
populate_existing=False,
with_for_update=None,
identity_token=None,
execution_options=None,
),
],
)
with mock.patch(
"sqlalchemy.orm.session.object_session"
) as mock_object_session:
sess.object_session("foo")
eq_(mock_object_session.mock_calls, [mock.call("foo")])
@testing.combinations(
"style1",
"style2",
"style3",
"style4",
)
def test_get_bind_custom_session_subclass(self, style):
"""test #6285"""
class MySession(Session):
if style == "style1":
def get_bind(self, mapper=None, **kwargs):
return super().get_bind(mapper=mapper, **kwargs)
elif style == "style2":
# this was the workaround for #6285, ensure it continues
# working as well
def get_bind(self, mapper=None, *args, **kwargs):
return super().get_bind(mapper, *args, **kwargs)
elif style == "style3":
# py2k style
def get_bind(self, mapper=None, *args, **kwargs):
return super(MySession, self).get_bind(
mapper, *args, **kwargs
)
elif style | == "style4":
# py2k style
def get_bind(self, mapper=None, **kwargs):
return super(MySession, self).get_bind(
mapper=mapper, **kwargs
| )
s1 = MySession(testing.db)
is_(s1.get_bind(), testing.db)
ss = scoped_session(sessionmaker(testing.db, class_=MySession))
is_(ss.get_bind(), testing.db)
def test_attributes(self):
expected = [
name
for cls in Session.mro()
for name in vars(cls)
if not name.startswith("_")
]
ignore_list = {
"connection_callable",
"transaction",
"in_transaction",
"in_nested_transaction",
"get_transaction",
"get_nested_transaction",
"prepare",
"invalidate",
"bind_mapper",
"bind_table",
"enable_relationship_loading",
"dispatch",
}
SM = scoped_session(sa.orm.sessionmaker(testing.db))
missing = [
name
for name in expected
if not hasattr(SM, name) and name not in ignore_list
]
eq_(missing, [])
|
# helper functions
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
from diffpy.Structure import loadStructure
from pyobjcryst.crystal import CreateCrystalFromCIF
from diffpy.srreal.bondcalculator import BondCalculator
from diffpy.srreal.pdfcalculator import PDFCalculator
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
#from pdf_lib.glbl import glbl
from glbl import glbl
def read_full_gr(f_name, rmin=glbl.r_min, rmax = glbl.r_max, skip_num=glbl.skip_row):
''' simple function to read .gr data generated from PDFgui'''
read_in = np.loadtxt(str(f_name), skiprows = skip_num)
raw_data = np.transpose(read_in)
raw_data_list = raw_in.tolist()
upper_ind = raw_list[0].index(rmax)
lower_ind = raw_list[0].index(rmin)
cut_da | ta_list = np.asarray([raw_in[0][lower_ind:upper_ind], raw_in[1][lower_ind:upper_ind]])
return cut_data_list
def read_gr(f_name):
''' simple function to read .gr data in database'''
read_in = np.loadtxt(str(f_name))
plt.plot(read_in[0], read_in[1])
(gr_name, tail) = os.path.spli | text(f_name)
plt.title(gr_name)
plt.xlabel('r, A')
plt.ylabel('G(r), A^-2')
def simple_pdf_cal(input_f, Debye = True, DW_factor = glbl.DW_factor, qmin = glbl.q_min, qmax = glbl.q_max, rmax = glbl.r_max):
''' simple pdf calculator. Take input .cif/.xyz file to calculate PDF
(only use PDFCalculator now, can't calculate nano structrue at this stage)
argument:
input_f - str - strcuture file name
Dw_factor - float - value of Debye-Waller factor, which accounts for thermal motions. Default=1 means zero temperature
'''
## calculate theoretical pdf from given structure file
# create structure
struc = loadStructure(input_f)
struc.Bisoequiv = DW_factor
# calculate PDF
pdfc = PDFCalculator(qmax = qmax, rmax = rmax)
dbc = DebyePDFCalculator(qmax = qmax, rmax = rmax)
if Debye:
(r, g) = dbc(struc, qmin = qmin)
else:
(r, g) = pdfc(struc, qmin=qmin)
return (r, g)
def dbc_iter(struc_f, iter_range):
'''change different range'''
import numpy as np
import matplotlib.pyplot as plt
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
from diffpy.Structure import loadStructure
struc = loadStructure(struc_f)
dbc = DebyePDFCalculator()
dbc.setStructure(struc)
dbc.qmax = 20
dbc.qmin = 0.5
dbc.rmax = 20
#par = eval(para)
#print(dbc.par)
for step in iter_range:
(r,g) = dbc(delta2 = step)
plt.plot(r,g)
#plt.legend('delta2 =%s' % str(step) )
def iter_bond(x_min, x_max, step=5):
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib.pyplot import plot
from diffpy.Structure import Structure, Atom, Lattice
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
dbc = DebyePDFCalculator()
dbc.qmax = 20
dbc.qmin = 0.5
dbc.rmax = 20
iter_range = np.linspace(x_min, x_max, step)
fig_dim = len(iter_range)
acs = Atom('Cs', [0, 0, 0])
acl = Atom('Cl', [0.5, 0.5, 0.5])
plt.figure()
for ind, val in enumerate(iter_range):
cscl = Structure(atoms=[acs, acl],
lattice=Lattice(val, val, val, 90, 90, 90))
dbc.setStructure(cscl)
(r,g) = dbc()
print(val)
plt.subplot(fig_dim, 1, ind+1)
plt.plot(r,g)
plt.title('bond length = %s' % str(val))
def single_plot(x,y):
''' simple plot, can't stand the god damn syntax anymore
'''
import matplotlib.pyplot as plt
#plt.figure()
plt.plot(x,y)
plt.hold(False)
def multi_plot(data_sets):
''' multiplot by calling single_plot'''
if not isinstance(data_sets, tuple):
working_data = (data_sets)
else:
working_data = data_sets
for data in data_sets:
x_read = data[0]
y_read = data[1]
single_plot(x_read, y_read)
|
f tax.sequence != tax_templ.sequence:
notes += _("The sequence field is different.\n")
modified = True
if tax.amount != tax_templ.amount:
notes += _("The amount field is different.\n")
modified = True
if tax.type != tax_templ.type:
notes += _("The type field is different.\n")
modified = True
if tax.applicable_type != tax_templ.applicable_type:
notes += _("The applicable type field is different.\n")
modified = True
if tax.domain != tax_templ.domain:
notes += _("The domain field is different.\n")
modified = True
if tax.child_depend != tax_templ.child_depend:
notes += _("The child depend field is different.\n")
modified = True
if tax.python_compute != tax_templ.python_compute:
notes += _("The python compute field is different.\n")
modified = True
# if tax.tax_group != tax_templ.tax_group:
# notes += _("The tax group field is different.\n")
# modified = True
if tax.base_sign != tax_templ.base_sign:
notes += _("The base sign field is different.\n")
modified = True
if tax.tax_sign != tax_templ.tax_sign:
notes += _("The tax sign field is different.\n")
modified = True
if tax.include_base_amount != tax_templ.include_base_amount:
notes += _("The include base amount fiel | d is different.\n")
modified | = True
if tax.type_tax_use != tax_templ.type_tax_use:
notes += _("The type tax use field is different.\n")
modified = True
# TODO: We could check other tax fields for changes...
if modified:
# Tax code to update.
updated_taxes += 1
wiz_taxes_obj.create(cr, uid, {
'tax_id': tax_templ.id,
'update_chart_wizard_id': wizard.id,
'type': 'updated',
'update_tax_id': tax_id,
'notes': notes,
}, context)
for delay_vals_wiz in delay_wiz_tax:
wiz_taxes_obj.create(cr, uid, delay_vals_wiz, context)
return {'new': new_taxes, 'updated': updated_taxes, 'mapping': tax_templ_mapping}
def _find_accounts(self, cr, uid, wizard, context=None):
"""
Search for, and load, account templates to create/update.
"""
if not wizard.chart_template_id:
return {}
new_accounts = 0
updated_accounts = 0
acc_templ_mapping = {}
acc_obj = self.pool['account.account']
acc_templ_obj = self.pool['account.account.template']
wiz_accounts = self.pool['wizard.update.charts.accounts.account']
# Remove previous accounts
wiz_accounts_ids = wiz_accounts.search(cr, uid, [], context=context)
wiz_accounts.unlink(cr, uid, wiz_accounts_ids, context=context)
# Search for new / updated accounts
root_account_id = wizard.chart_template_id.account_root_id.id
acc_templ_criteria = [('chart_template_id',
'=',
wizard.chart_template_id.id)]
if root_account_id:
acc_templ_criteria = (
['|'] + acc_templ_criteria +
['&', ('parent_id', 'child_of', [root_account_id]),
('chart_template_id', '=', False)]
)
acc_ids = acc_templ_obj.search(cr, uid, acc_templ_criteria,
context=context)
acc_ids.sort()
for acc_templ in acc_templ_obj.browse(cr, uid, acc_ids,
context=context):
# Ensure the account template is on the map (search for the mapped
# account id).
account_id = self._map_account_template(cr, uid, wizard,
acc_templ_mapping,
acc_templ,
context=context)
if not account_id:
new_accounts += 1
wiz_accounts.create(cr, uid, {
'account_id': acc_templ.id,
'update_chart_wizard_id': wizard.id,
'type': 'new',
'notes': _('Code not found.'),
}, context)
elif wizard.update_account:
# Check the account for changes.
modified = False
notes = ""
account = acc_obj.browse(cr, uid, account_id, context=context)
if account.name != acc_templ.name and account.name != wizard.company_id.name:
notes += _("The name is different.\n")
modified = True
if account.type != acc_templ.type:
notes += _("The type is different.\n")
modified = True
if account.user_type != acc_templ.user_type:
notes += _("The user type is different.\n")
modified = True
if account.reconcile != acc_templ.reconcile:
notes += _("The reconcile is different.\n")
modified = True
# TODO: We could check other account fields for changes...
if modified:
# Account to update.
updated_accounts += 1
wiz_accounts.create(cr, uid, {
'account_id': acc_templ.id,
'update_chart_wizard_id': wizard.id,
'type': 'updated',
'update_account_id': account_id,
'notes': notes,
}, context)
return {'new': new_accounts,
'updated': updated_accounts,
'mapping': acc_templ_mapping}
def _find_fiscal_positions(self, cr, uid, wizard, chart_template_ids,
context=None):
"""
Search for, and load, fiscal position templates to create/update.
@param chart_template_ids: IDs of the chart templates to look on,
calculated once in the calling method.
"""
new_fps = 0
updated_fps = 0
fp_templ_mapping = {}
tax_templ_mapping = {}
acc_templ_mapping = {}
fp_templ_obj = self.pool['account.fiscal.position.template']
fp_obj = self.pool['account.fiscal.position']
wiz_fp = self.pool['wizard.update.charts.accounts.fiscal.position']
# Remove previous fiscal positions
wiz_fp.unlink(cr, uid, wiz_fp.search(cr, uid, []))
# Search for new / updated fiscal positions
fp_template_ids = fp_templ_obj.search(cr, uid,
[('chart_template_id',
'in',
chart_template_ids)],
context=context)
for fp_templ in fp_templ_obj.browse(cr, uid, fp_template_ids,
context=context):
# Ensure the fiscal position template is on the map (search for the
# mapped fiscal position id).
fp_id = self._map_fp_template(cr, uid, wizard, fp_templ_mapping,
fp_templ, context=context)
if not fp_id:
# New fiscal position template.
new_fps += 1
wiz_fp.create(cr, uid, {
'fiscal_position_id': fp_templ.id,
'update_chart_wizard_id': wi |
fr | om os import path
from .t | askqueue import TaskQueueClient
__all__ = ['TaskQueueClient']
with open(path.join(path.dirname(__file__), 'version.txt')) as fp:
__version__ = fp.read().strip()
|
alg = item.alg
popupmenu = QMenu()
executeAction = QAction(self.tr('Execute'), self.algorithmTree)
executeAction.triggered.connect(self.executeAlgorithm)
popupmenu.addAction(executeAction)
if alg.canRunInBatchMode and not alg.allowOnlyOpenedLayers:
executeBatchAction = QAction(
self.tr('Execute as batch process'),
self.algorithmTree)
executeBatchAction.triggered.connect(
self.executeAlgorithmAsBatchProcess)
popupmenu.addAction(executeBatchAction)
popupmenu.addSeparator()
editRenderingStylesAction = QAction(
self.tr('Edit rendering styles for outputs'),
self.algorithmTree)
editRenderingStylesAction.triggered.connect(
self.editRenderingStyles)
popupmenu.addAction(editRenderingStylesAction)
actions = Processing.contextMenuActions
if len(actions) > 0:
popupmenu.addSeparator()
for action in actions:
action.setData(alg, self)
if action.isEnabled():
contextMenuAction = QAction(action.name,
self.algorithmTree)
contextMenuAction.triggered.connect(action.execute)
popupmenu.addAction(contextMenuAction)
popupmenu.exec_(self.algorithmTree.mapToGlobal(point))
def editRenderingStyles(self):
item = self.algorithmTree.currentItem()
if isinstance(item, TreeAlgorithmItem):
alg = Processing.getAlgorithm(item.alg.commandLineName())
dlg = EditRenderingStylesDialog(alg)
dlg.exec_()
def executeAlgorithmAsBatchProcess(self):
item = self.algorithmTree.currentItem()
if isinstance(item, TreeAlgorithmItem):
alg = Processing.getAlgorithm(item.alg.commandLineName())
dlg = BatchProcessingDialog(alg)
dlg.exec_()
def executeAlgorithm(self):
item = self.algorithmTree.currentItem()
if isinstance(item, TreeAlgorithmItem):
alg = Processing.getAlgorithm(item.alg.commandLineName())
message = alg.checkBeforeOpeningParametersDialog()
if message:
dlg = MissingDependencyDialog(message)
dlg.exec_()
return
alg = alg.getCopy()
dlg = alg.getCustomParametersDialog()
if not dlg:
dlg = ParametersDialog(alg)
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
if dlg.executed:
showRecent = ProcessingConfig.getSetting(
ProcessingConfig.SHOW_RECENT_ALGORITHMS)
if showRecent:
self.addRecentAlgorithms(True)
if isinstance(item, TreeActionItem):
action = item.action
action.setData(self)
action.execute()
def fillTree(self):
settings = QSettings()
useCategories = settings.value(self.USE_CATEGORIES, type=bool)
if useCategories:
self.fillTreeUsingCategories()
else:
self.fillTreeUsingProviders()
self.algorithmTree.sortItems(0, Qt.AscendingOrder)
self.addRecentAlgorithms(False)
def addRecentAlgorithms(self, updating):
showRecent = ProcessingConfig.getSetting(
ProcessingConfig.SHOW_RECENT_ALGORITHMS)
if showRecent:
recent = ProcessingLog.getRecentAlgorithms()
if len(recent) != 0:
found = False
if updating:
recentItem = self.algorithmTree.topLevelItem(0)
treeWidget = recentItem.treeWidget()
treeWidget.takeTopLevelItem(
treeWidget.indexOfTopLevelItem(recentItem))
recentItem = QTreeWidgetItem()
recentItem.setText(0, self.tr('Recently used algorithms'))
for algname in recent:
alg = Processing.getAlgorithm(algname)
if alg is not None:
algItem = TreeAlgorithmItem(alg)
recentItem.addChild(algItem)
found = True
if found:
self.algorithmTree.insertTopLevelItem(0, recentItem)
recentItem.setExpanded(True)
self.algorithmTree.setWordWrap(True)
def fillTreeUsingCategories(self):
providersToExclude = ['model', 'script']
self.algorithmTree.clear()
text = unicode(self.searchBox.text())
groups = {}
for providerName in Processing.algs.keys():
provider = Processing.algs[providerName]
name = 'ACTIVATE_' + providerName.upper().replace(' ', '_')
if not ProcessingConfig.getSetting(name):
continue
if providerName in providersToExclude \
or len(ModelerUtils.providers[providerName].actions) != 0:
continue
algs = provider.values()
# add algorithms
for alg in algs:
if not alg.showInToolbox:
continue
(altgroup, altsubgroup, altname) = \
AlgorithmDecorator.getGroupsAndName(alg)
if altgroup is None:
continue
if text == '' or text.lower() in altname.lower():
if altgroup not in groups:
groups[altg | roup] = {}
group = groups[altgroup]
if altsubgroup not in group:
groups[altgroup][altsubgroup] = []
| subgroup = groups[altgroup][altsubgroup]
subgroup.append(alg)
if len(groups) > 0:
mainItem = QTreeWidgetItem()
mainItem.setText(0, 'Geoalgorithms')
mainItem.setIcon(0, GeoAlgorithm.getDefaultIcon())
mainItem.setToolTip(0, mainItem.text(0))
for (groupname, group) in groups.items():
groupItem = QTreeWidgetItem()
groupItem.setText(0, groupname)
groupItem.setIcon(0, GeoAlgorithm.getDefaultIcon())
groupItem.setToolTip(0, groupItem.text(0))
mainItem.addChild(groupItem)
for (subgroupname, subgroup) in group.items():
subgroupItem = QTreeWidgetItem()
subgroupItem.setText(0, subgroupname)
subgroupItem.setIcon(0, GeoAlgorithm.getDefaultIcon())
subgroupItem.setToolTip(0, subgroupItem.text(0))
groupItem.addChild(subgroupItem)
for alg in subgroup:
algItem = TreeAlgorithmItem(alg)
subgroupItem.addChild(algItem)
self.algorithmTree.addTopLevelItem(mainItem)
for providerName in Processing.algs.keys():
if providerName not in providersToExclude:
continue
name = 'ACTIVATE_' + providerName.upper().replace(' ', '_')
if not ProcessingConfig.getSetting(name):
continue
providerItem = TreeProviderItem(providerName)
self.algorithmTree.addTopLevelItem(providerItem)
def fillTreeUsingProviders(self):
self.algorithmTree.clear()
for providerName in Processing.algs.keys():
name = 'ACTIVATE_' + providerName.upper().replace(' ', '_')
if not ProcessingConfig.getSetting(name):
continue
providerItem = TreeProviderItem(providerName)
self.algorithmTree.addTopLevelItem(providerItem)
|
""" Execute this file to launch Rekoder. Refer to README.md for usage. """
# Rekoder modules
from core.app import App
if __name__ == '__main__':
# Load | configuration file and start application.
app = App()
app.load_json_config('config.json')
| app.start()
|
import pytest
import unittest
from fs_radar.path_filter import makePathFilter, makeDirFilter
class MakePathFilterTest(unittest.TestCase):
def test_empty_rules(self):
f = makePathFilter([])
assert f('') is False
assert f('foo.txt') is False
def test_file_at_any_depth(self):
f = makePathFilter([
'foo.txt'
])
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/foo.txt')
def test_asterisk_for_file_at_any_depth(self):
f = makePathFilter([
'*.txt'
])
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/foo.txt')
def test_file_pattern_must_not_used_as_prefix(self):
f = makePathFilter([
'.doc'
])
assert f('foo.docx') is False
def test_match_only_at_relative_root(self):
f = makePathFilter([
'./foo.txt'
])
assert f('./foo.txt')
assert f('foo.txt')
assert f('/foo.txt') is False
assert f('a/b/foo.txt') is False
def test_match_only_absolute_path(self):
f = makePathFilter([
'/a/b/foo.txt'
])
assert f('/a/b/foo.txt')
assert f('a/b/foo.txt') is False
def test_match_directory_and_any_file_underneath(self):
f = makePathFilter([
'a/b/'
])
assert f('a/b/')
assert f('a/b')
assert f('a/b/foo.txt')
assert f('a/b/c/')
assert f('a/b/c/bar')
def test_do_not_use_directory_as_prefix(self):
f = makePathFilter([
'a/b/'
])
assert f('a/bo') is False
def test_just_asterisk(self):
f = makePathFilter([
'*'
])
assert f('') is False
assert f('foo.txt')
assert f('a/b/')
def test_start_with_asterisk(self):
f = makePathFilter([
'*a',
'*b/foo'
])
assert f('a')
assert f('xyza')
assert f('b') is False
assert f('b/foo')
assert f('xb/foo')
def test_single_asterisk(self):
f = makePathFilter([
'a/*foo/a',
'b/bar*/b',
'c/*baz*/c',
])
assert f('a/foo/a')
assert f('a/xfoo/a')
assert f('b/bar/b')
assert f('b/barx/b')
assert f('c/baz/c')
assert f('c/xbaz/c')
assert f('c/bazx/c')
assert f('c/xbazx/c')
assert f('abcdfoo/a') is False
def test_just_multi_asterisks(self):
f = makePathFilter([
'**'
])
assert f('') is False
assert f('foo.txt')
assert f('a/b/')
def test_file_start_multi_asterisks(self):
f = makePathFilter([
'**a'
])
assert f('foo.txt') is False
assert f('ba')
assert f('bar') is False
assert f('ba/example/') is False
assert f('x/y/a')
def test_dir_start_multi_asterisks(self):
f = makePathFilter([
'**a/'
])
assert f('ba')
assert f('bar') is False
assert f('ba/example/')
assert f('x/y/a/')
def test_multi_asterisks(self):
f = makePathFilter([
'a/**/x'
])
assert f('a/x') is False
assert f('a/one-level/x')
assert f('a/multi/level/deep/x')
assert f('a/b/c') is False
def test_exclusion(self):
f = makePathFilter([
"app/cache/*",
"!app/cache/*.txt",
"+app/cache/do-not-exclude-me.txt"
])
assert f('app/cache/include-me')
assert f('app/cache/exclude-me.txt') is False
assert f('app/cache/do-not-exclude-me.txt')
def test_working_directory_just_dot(self):
f = makePathFilter([
'.'
])
assert f('.')
assert f('./')
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/')
def test_working_directory_dot_slash(self):
f = makePathFilter([
'././'
])
assert f('.')
assert f('./')
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/')
class MakeDirFilterTest(unittest.TestCase):
def test_empty_rules(self):
f = makeDirFilter([])
assert f('') is False
assert f('a/') is False
def test_dir_at_any_depth(self):
f = makeDirFilter([
'a/'
])
assert f('a/')
assert f('./a')
assert f('b/a/')
def test_ignore_file_keep_dir(self):
| f = makeDirFilter([
'a/foo.txt'
])
assert f('a/')
assert f('./a')
assert f('b/a/')
def test_excluded_dir(self):
f = makeDirFilter([
'a/',
'!a/b/',
'!a/c/foo.txt',
'!a/d/',
'+a/d/baz.txt'
])
assert f('a/')
assert f('a/b/') is False
assert f('a/c/')
assert f('a/d/')
def test_working_directory_just_dot(self):
| f = makeDirFilter([
'.'
])
assert f('.')
assert f('./')
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/')
def test_working_directory_dot_slash(self):
f = makeDirFilter([
'././'
])
assert f('.')
assert f('./')
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/')
def test_single_asterisk_in_a_path_starting_with_dot(self):
f = makeDirFilter([
'./a/*/c/'
])
assert f('./a/b/c')
assert f('./a/b/d') is False
assert f('a/b/c')
assert f('a/b/d') is False
def test_double_asterisk_in_a_path_starting_with_dot(self):
f = makeDirFilter([
'./a/**/c/'
])
assert f('./a/b/c')
assert f('./a/b1/b2/c')
assert f('./a/b/d') is False
assert f('a/b/c')
|
# -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from unach_ | photo_server.urls import urlpatterns as unach_photo_server_urls
urlpatterns = [
url(r'^', include(unach_photo_server_urls, nam | espace='unach_photo_server')),
]
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that | it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Publi | c License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
default_app_config = "taiga.projects.attachments.apps.AttachmentsAppConfig"
|
# Install.py -- File system installation commands
# Copyright (C) 2007-2013 CEA
#
# This file is part of shine
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import sys
from Shine.Configuration.Globals import Globals
from Shine.FSUtils import create_lustrefs
from Shine.Lustre.FileSystem import FSRemoteError
from Shine.Commands.Base.Command import Command, CommandHelpException
from Shine.Commands.Base.CommandRCDefs import RC_OK, RC_FAILURE
# Lustre events
from Shine.Commands.Base.FSEventHandler import FSGlobalEventHandler
class Install(Command):
"""
shine install -m /path/to/model.lmf
"""
NAME = "install"
DESCRIPTION = "Install a new file system."
def execute(self):
# Option sanity check
self.forbidden(self.options.fsnames, "-f, see -m")
self.forbidden(self.options.labels, "-l")
self.forbidden(self.options.indexes, "-i")
self.forbidden(self.options.failover, "-F")
rc = RC_OK
if not self.options.model:
raise CommandHelpException("Lustre model file path"
"(-m <model_file>) argument required.", self)
eh = FSGlobalEventHandler(self)
# Use this Shine.FSUtils convenience function.
lmf = self.get_lmf_path()
if lmf:
print("Using Lustre model file %s" % lmf)
else:
raise CommandHelpException("Lustre model file for ``%s'' not found:"
" please use filename or full LMF path.\n"
"Your default model files directory (lmf_dir) is: %s" %
(self.options.model, Globals().get_lmf_dir()), self)
install_nodes = self.options.nodes
excluded_nodes = self.options.excludes
fs_conf, fs = create_lustrefs(self.get_lmf_path(),
event_handler=eh, nodes=install_nodes,
excluded=excluded_nodes)
# Register the filesystem in backend
print("Registering FS %s to backend..." % fs.fs_name)
if self.options.dryrun:
rc = 0
else:
rc = self.register_fs(fs_conf)
if rc:
msg = "Error: failed to register FS to backend (rc=%d)" % rc
print(msg, file=sys.stderr)
else:
print("Filesystem %s registered." % fs.fs_name)
# Helper message.
# If user specified nodes which were not used, warn him about it.
actual_nodes = fs.components.managed().servers()
if not self.check_valid_list(fs_conf.get_fs_name(), \
actual_nodes, "install"):
return RC_FAILURE
# Install file system configuration files; normally, this | should
# not be done by the Shine.Lustre.FileSystem object itself, but as
# all proxy methods are currently handled by it, it is more
# convenient this way...
try:
fs.insta | ll(fs_conf.get_cfg_filename(),
dryrun=self.options.dryrun)
tuning_conf = Globals().get_tuning_file()
if tuning_conf:
fs.install(tuning_conf, dryrun=self.options.dryrun)
except FSRemoteError as error:
print("WARNING: Due to error, installation skipped on %s"
% error.nodes)
rc = RC_FAILURE
if not install_nodes and not excluded_nodes:
# Give pointer to next user step.
print("Use `shine format -f %s' to initialize the file system." %
fs_conf.get_fs_name())
return rc
def register_fs(self, fs_conf):
# register file system configuration to the backend
fs_conf.register_fs()
fs_conf.register_targets()
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
yield subsubpart
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
for line in StringIO(payload):
yield | line
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a | given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
|
ce(levelSize)
self.levelRect = self.level.get_rect()
self.levelUnlocked = False
self.levelCleared = False
self.spike = createImage("%sspike.png"%(spritePath))
self.spikes = []
self.walls = []
self.blocks = []
self.platforms = []
self.pillars = []
self.dynamicObjects = {
"key": {
"sprite": createImage("%skey.png"%(spritePath)),
"rect": None # Don't need this, keeping it for format
}
}
# Position helper function
def checkEdgePos(self, pos, sprite):
pos = list(pos)
if pos[0] == -1: pos[0] = self.levelRect.width - sprite.get_width()
if pos[1] == -1: pos[1] = self.levelRect.height - sprite.get_height()
return pos
# Level Decor ---
def setBackdrop(self):
for x in range(0, self.levelRect.width, self.tile.get_width()):
for y in range(0, self.levelRect.height, self.tile.get_height()):
self.level.blit(self.tile, (x, y))
def setStartDoor(self):
doorRect = self.startDoor.get_rect()
# Ew, hardcoded hero values
doorRect.centerx = self.initHeroPos[0] + 20
doorRect.bottom = self.initHeroPos[1] + 46
self.level.blit(self.startDoor, doorRect)
def addWallTop(self, pos, length):
wWidth, wHeight = self.wallBlocks["top"].get_size()
self.walls.append(Rect(
pos[0] - wWidth,
pos[1] - wHeight,
wWidth * length,
wHeight
))
for top in range(pos[0], wWidth * length, wWidth):
self.levelBorder.blit(self.wallBlocks["top"], (top, pos[1]))
def addWallRight(self, pos, length):
wWidth, wHeight = self.wallBlocks["right"].get_size()
self.walls.append(Rect(
pos[0] - wWidth,
pos[1] - wHeight,
wWidth,
wHeight * length
))
for right in range(pos[1], wHeight * length, wHeight):
self.levelBorder.blit(self.wallBlocks["right"], (pos[0], right))
def addWallBottom(self, pos, length):
wWidth, wHeight = self.wallBlocks["bottom"].get_size()
self.walls.append(Rect(
pos[0] - wWidth,
pos[1] - wHeight,
wWidth * length,
wHeight
))
for bottom in range(pos[0], wWidth * length, wWidth):
self.levelBorder.blit(self.wallBlocks["bottom"], (bottom, pos[1]))
def addWallLeft(self, pos, length):
wWidth, wHeight = self.wallBlocks["left"].get_size()
self.walls.append(Rect(
pos[0] - wWidth,
pos[1] - wHeight,
wWidth,
wHeight * length
))
for left in range(pos[1], wHeight * length, wHeight):
self.levelBorder.blit(self.wallBlocks["left"], (pos[0], left))
def addWallBoxTop(self, pos, size):
bBlockW, bBlockH = self.wallBlocks["right"].get_width(), self.wallBlocks["bottom"].get_height()
size = (size[0] * bBlockW, size[1] * bBlockH)
pos = list(pos)
if pos[0] == -1: pos[0] = self.levelBorder.get_rect().right - size[0]
self.walls.append(Rect((pos[0] - bBlockW, pos[1] - bBlockH), size))
wallBoxRect = Rect(pos, size)
self.levelBorder.set_clip(wallBoxRect)
bg = Surface(size)
bg.fill(self.bgBColor)
self.levelBorder.blit(bg, wallBoxRect)
for bottom in range(pos[0], pos[0] + size[0], self.wallBlocks["bottom"].get_width()):
self.levelBorder.blit(self.wallBlocks["bottom"], (bottom, wallBoxRect.bottom - bBlockH))
for right in range(pos[1], pos[1] + size[1], self.wallBlocks["right"].get_height()):
self.levelBorder.blit(self.wallBlocks["right"], (wallBoxRect.left, right))
for left in range(pos[1], pos[1] + size[1], self.wallBlocks["left"].get_height()):
self.levelBorder.blit(self.wallBlocks["left"], (wallBoxRect.right - bBlockW, left))
self.levelBorder.set_clip(None)
def addWallBoxBottom(self, pos, size):
bBlockW, bBlockH = self.wallBlocks["left"].get_width(), self.wallBlocks["top"].get_height()
size = (size[0] * bBlockW, size[1] * bBlockH)
pos = list(pos)
if pos[0] == -1: pos[0] = self.levelBorder.get_rect().right - size[0]
if pos[1] == -1: pos[1] = self.levelBorder.get_rect().bottom - size[1]
self.walls.append(Rect((pos[0] - bBlockW, pos[1] - bBlockH), size))
wallBoxRect = Rect(pos, size)
self.levelBorder.set_clip(wallBoxRect)
bg = Surface(size)
bg.fill(self.bgBColor)
self.levelBorder.blit(bg, wallBoxRect)
for top in range(pos[0], pos[0] + size[0], self.wallBlocks["top"].get_width()):
self.levelBorder.blit(self.wallBlocks["top"], (top, wallBoxRect.top))
| for left in range(pos[1], pos[1] + size[1], self.wallBlocks["left"].get_height()):
self.levelBorder.blit(self.wallBlocks["left"], (wallBoxRect.right - bBlockW, left))
for right in range(pos[1], pos[1] + size[1], self.wallBlocks["right"].get_height()):
self.levelBorder.blit(self.wallBlocks["right"], (wallBoxRect.left, right))
self.levelBorder | .set_clip(None)
# Level objects
def setKey(self, pos):
self.dynamicObjects["key"]["rect"] = Rect(pos, self.dynamicObjects["key"]["sprite"].get_size())
def unlockLevel(self):
self.levelUnlocked = True
playSound("./assets/sounds/key_sound.ogg")
del self.dynamicObjects["key"]
self.door.openDoor()
self.door.updateDoor(self)
def addBlock(self, typeOf, pos):
sprite = self.blockTypes[typeOf]
pos = self.checkEdgePos(pos, sprite)
self.blocks.append(Rect(pos, sprite.get_size()))
self.level.blit(sprite, pos)
def addPlatform(self, typeOf, pos):
sprite = self.platformTypes[typeOf]
pos = self.checkEdgePos(pos, sprite)
self.platforms.append(Rect(pos, sprite.get_size()))
self.level.blit(sprite, pos)
def addPillar(self, typeOf, pos):
sprite = self.pillarTypes[typeOf]
pos = self.checkEdgePos(pos, sprite)
if not typeOf == "background": self.pillars.append(Rect(pos, sprite.get_size()))
self.level.blit(sprite, pos)
def addSpike(self, pos):
sprite = self.spike
pos = self.checkEdgePos(pos, sprite)
self.spikes.append(Rect(pos, sprite.get_size()))
self.level.blit(sprite, pos)
class RoomRegular(Room):
def __init__(self, spritePath, levelSize):
super().__init__(levelSize, spritePath)
self.bgFColor = 0x22373A
self.bgBColor = 0x1B2821
self.tile = createImage("%sRegular/backdrop.png"%(spritePath))
self.startDoor = createImage("%sRegular/door_start.png"%(spritePath))
self.wallBlocks = {
"top": createImage("%sRegular/border_top.png"%(spritePath)),
"right": createImage("%sRegular/border_right.png"%(spritePath)),
"bottom": createImage("%sRegular/border_bottom.png"%(spritePath)),
"left": createImage("%sRegular/border_left.png"%(spritePath))
}
self.blockTypes = {
"long": createImage("%sRegular/long_block.png"%(spritePath)),
"short": createImage("%sRegular/short_block.png"%(spritePath)),
"mini": createImage("%sRegular/mini_block.png"%(spritePath))
}
self.platformTypes = {
"long": createImage("%sRegular/long_platform.png"%(spritePath)),
"short": createImage("%sRegular/short_platform.png"%(spritePath)),
"mini": createImage("%sRegular/mini_platform.png"%(spritePath))
}
self.pillarTypes = {
"normal": createImage("%sRegular/normal_pillar.png"%(spritePath)),
"background": createImage("%sRegular/background_pill |
"""
Util classes
------------
Classes which repre | sent data types useful for the package pySpatialTools.
"""
## Spatial elements collectors
from spatialelement | s import SpatialElementsCollection, Locations
## Membership relations
from Membership import Membership
|
#
# ti-sac.py is a Titanium plug-in for Sublime Text 3
#
# developed by Steve Rogers, SpiralArm Consulting Ltd (www.spiralarm.uk)
# @sarmcon
#
#
import sublime, sublime_plugin, os
import Titanium.lib.tiutils as Ti
# This will create a new Alloy Widget
class sacAlloyWidgetCommand(sublime_plugin.WindowCommand):
def run(self,paths=[]):
if len(paths) > 0 and self.window.active_view():
self.projectDir = paths[0]
self.window.active_view().window().show_input_panel("Widget name:", '',self.createWidget, None, None)
def createWidget(self, name):
Ti.createAlloyWidget(self.projectDir + "/app", name)
# This will create a new Alloy Controller
class sacAlloyControllerCommand(sublime_plugin.WindowCommand):
def run(self,paths=[]):
if len(paths) > 0 and self.window.active_view():
self.projectDir = paths[0]
self.window.active_view().window().show_input_panel("Controller name:", '',self.createController, None, None)
def createController(self, name):
Ti.createAlloyController(self.projectDir + "/app", name)
# This will clean the current project
class sacCleanProjectCommand(sublime_plugin.WindowCommand):
def run(self,paths=[]):
if len(paths) > 0:
Ti.cleanProject(paths[0])
# This will create a Titanium Alloy Project
class sacCreateAlloyCommand(sublime_plugin.TextCommand):
def run(self, edit):
# ask for the App name
self.view.window().show_input_panel("Project name:", 'test',self.createProjectFiles, None, None)
#self.createProjectFiles("alloytest") #- test version that just creates the specified project
def createProjectFiles(self, projectName):
# Turn the console on
sublime.active_window().run_command("show_panel", {"panel": "console", "toggle": True})
# Define the project meta data and the proposed directory
self.projectDir = Ti.getProjectDirectory(projectName)
Ti.consolePrint("","\n--------------------------------------------------------------------------------------------------")
Ti.consolePrint("info", "Creating Project: %s in %s................." % (projectName,self.projectDir))
# If dir exists then DONT create project
if os.path.exists(self.projectDir):
sublime.error_message("Unable to create Titanium project, the directory already exists: %s " % self.projectDir)
else:
# Step 1 - First Step Create Titanium skeleton project
Ti.createClassicProject(projectName)
# Step 2 - Now Generate the Alloy Bit
Ti.generateAlloyProject(self.projectDir)
# Step 3 - Now | Create the sublime Project files
Ti.consolePrint('info', "Generating Sublime Project....")
Ti.createSublimeProject(self.projectDir)
# Step 4 Finally open the project (opens in a new sublime instance)
os.system("open %s" % self.projectDir+".s | ublime-project")
# Step 4a - possible add the project to the recent project list so it can be opened with Open Recent or Quick Project Switch
#TODO::
# This will create a Titanium Classic Project
class sacCreateCommand(sublime_plugin.TextCommand):
def run(self, edit):
# ask for the App name
self.view.window().show_input_panel("Project name:", 'test',self.createProjectFiles, None, None)
#self.createProjectFiles("test") #- test version that just creates the specified project
def createProjectFiles(self, projectName):
# Turn the console on
sublime.active_window().run_command("show_panel", {"panel": "console", "toggle": True})
# Define the project meta data and the proposed directory
self.projectDir = Ti.getProjectDirectory(projectName)
Ti.consolePrint("","\n--------------------------------------------------------------------------------------------------")
Ti.consolePrint("info", "Creating Project: %s in %s................." % (projectName,self.projectDir))
# If dir exists then DONT create project
if os.path.exists(self.projectDir):
sublime.error_message("Unable to create Titanium project, the directory already exists: %s " % self.projectDir)
else:
# Step 1 - Create Titanium skeleton project
Ti.createClassicProject(projectName)
# Step 2 - Now Create the sublime Project files
Ti.consolePrint('info', "Generating Sublime Project....")
Ti.createSublimeProject(self.projectDir)
# Step 3 Finally open the project (opens in a new sublime instance)
os.system("open %s" % self.projectDir+".sublime-project")
# Step 3a - possible add the project to the recent project list so it can be opened with Open Recent or Quick Project Switch
#TODO::
# This will open the Plugin - config file
class sacEditConfigCommand(sublime_plugin.TextCommand):
def run(self,edit):
open_file_settings('titanium-sac.sublime-settings')
|
r, print_fail
@admin.command()
@click.option('-n', '--name', type=str, default=None,
help='Name of the resource policy.')
def resource_policy(name):
"""
Show details about a keypair resource policy. When `name` option is omitted, the
resource policy for the current access_key will be returned.
"""
fields = [
('Name', 'name'),
('Created At', 'created_at'),
('Default for Unspecified', 'default_for_unspecified'),
('Total Resource Slot', 'total_resource_slots'),
('Max Concurrent Sessions', 'max_concurrent_sessions'),
('Max Containers per Session', 'max_containers_per_session'),
('Max vFolder Count', 'max_vfolder_count'),
('Max vFolder Size', 'max_vfolder_size'),
('Idle Timeeout', 'idle_timeout'),
('Allowed vFolder Hosts', 'allowed_vfolder_hosts'),
]
with Session() as session:
try:
rp = session.ResourcePolicy(session.config.access_key)
info = rp.info(name, fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
rows = []
if info is None:
print('No such resource policy.')
sys.exit(1)
for name, key in fields:
rows.append((name, info[key]))
print(tabulate(rows, headers=('Field', 'Value')))
@admin.group(invoke_without_command=True)
@click.pass_context
def resource_policies(ctx):
'''
List and manage resource policies.
(admin privilege required)
'''
if ctx.invoked_subcommand is not None:
return
fields = [
('Name', 'name'),
('Created At', 'created_at'),
('Default for Unspecified', 'default_for_unspecified'),
('Total Resource Slot', 'total_resource_slots'),
('Max Concurrent Sessions', 'max_concurrent_sessions'),
('Max Containers per Session', 'max_containers_per_session'),
('Max vFolder Count', 'max_vfolder_count'),
('Max vFolder Size', 'max_vfolder_size'),
('Idle Timeeout', 'idle_timeout'),
('Allowed vFolder Hosts', 'allowed_vfolder_hosts'),
]
with Session() as session:
try:
items = session.ResourcePolicy.list(fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
if len(items) == 0:
print('There are no keypair resource policies.')
return
print(tabulate((item.values() for item in items),
headers=(item[0] for item in fields)))
@resource_policies.command()
@click.argument('name', type=str, default=None, metavar='NAME')
@click.option('--default-for-unspecified', type=str, default='UNLIMITED',
help='Default behavior for unspecified resources: '
'LIMITED, UNLIMITED')
@click.option('--total-resource-slots', type=str, default='{}',
help='Set total resource slots.')
@click.option('--max-concurrent-sessions', type=int, default=30,
help='Number of maximum concurrent sessions.')
@click.option('--max-containers-per-session', type=int, default=1,
help='Number of maximum containers per session.')
@click.option('--max-vfolder-count', type=int, default=10,
help='Number of maximum virtual folders allowed.')
@click.option('--max-vfolder-size', type=int, default=0,
help='Maximum virtual folder size (future plan).')
@click.option('--idle-timeout', type=int, default=1800,
help='The maximum period of time allowed for kernels to wait '
'further requests.')
# @click.option('--allowed-vfolder-hosts', type=click.Tuple(str), default=['local'],
# help='Locations to create virtual folders.')
@click.option('--allowed-vfolder-hosts', default=['local'],
help='Locations to create virtual folders.')
def add(name, default_for_unspecified, total_resource_slots, max_concurrent_sessions,
max_containers_per_session, max_vfolder_count, max_vfolder_size,
idle_timeout, allowed_vfolder_hosts):
'''
Add a new keypair resource policy.
NAME: NAME of a new keypair resource policy.
'''
with Session() as session:
try:
data = session.ResourcePolicy.create(
name,
default_for_unspecified=default_for_unspecified,
total_resource_slots=total_resource_slots,
max_concurrent_sessions=max_concurrent_sessions,
max_containers_per_session=max_containers_per_session,
max_vfolder_count=max_vfolder_count,
max_vfolder_size=max_vfolder_size,
idle_timeout=idle_timeout,
allowed_vfolder_hosts=allowed_vfolder_hosts,
)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair Resource Policy creation has failed: {0}'
.format(data['msg']))
sys.exit(1)
item = data['resource_policy']
print('Keypair resource policy ' + item['name'] + ' is created.')
@resource_policies.command()
@click.argument('name', type=str, default=None, metavar='NAME')
@click.option('--default-for-unspecified', type=str,
help='Default behavior for unspecified resources: '
'LIMITED, UNLIMITED')
@click.option('--total-resource-slots', type=str,
help='Set total resource slots.')
@click.option('--max-concurrent-sessions', type=int,
help='Number of maximum concurrent sessions.')
@click.option('--max-containers-per-session', type=int,
help='Number of maximum containers per session.')
@click.option('--max-vfolder-count', type=int,
help='Number of maximum virtual folders allowed.')
@click.option('--max-vfolder-size', type=int,
help='Maximum virtual folder size (future plan).')
@click.option('--idle-timeout', type=int,
help='The maximum period of time allowed for kernels to wait '
'further requests.')
@click.option('--allowed-vfolder-hosts', help='Locations to create virtual folders.')
def update(name, default_for_unspecified, total_resource_slots,
max_concurrent_sessions, max_containers_per_session, max_vfolder_count,
max_vfolder_size, idle_timeout, allowed_vfolder_hosts):
"""
Update an existing keypair resource policy.
NAME: NAME of a keypair resource policy to update.
"""
with Session() as session:
try:
data = session.ResourcePolicy.update(
name,
default_for_unspecified=default_for_unspecified,
total_resource_slots=total_resource_slots,
max_concurrent_sessions=max_concurrent_sessions,
max_containers_per_session=max_containers_per_session,
max_vfolder_count=max_vfolder_count,
max_vfolder_size=max_vfolder_size,
idle_timeout=idle_timeout,
allowed_vfolder_hosts=allowed_vfolder_hosts,
)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair Resource Policy creation has failed: {0}'
| .format(data['msg']))
sys.exit(1)
print('Update succeeded.')
@resource_policies.command()
@click.argument('name', type=str, default=None, metavar='NAME')
def delete(name):
"""
Delete a keypair resource policy.
NAME: NAME of a keypair resource policy to delete.
"""
with Session() as session | :
if input('Are you sure? (y/n): ').lower().strip()[:1] != 'y':
print('Canceled.')
sys.exit(1)
try:
data = session.ResourcePolicy.delete(name)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair Resource Policy deletion has failed: {0}'
.format(data['msg']))
sys.exit(1) |
esult['Count']
la.min = result['Min']
la.max = result['Max']
la.average = result['Average']
la.median = result['Median']
la.stddev = result['StandardDeviation']
la.sum = result['Sum']
la.unique_values = result['unique_values']
la.last_stats_updated = datetime.datetime.now()
la.attribute_label = field.title()
la.visible = ftype.find("gml:") != 0
la.display_order = iter
la.save()
iter += 1
logger.debug(
"Created [%s] attribute for [%s]",
field,
layer.name.encode('utf-8'))
else:
logger.debug("No attributes found")
def set_styles(layer, gs_catalog):
style_set = []
gs_layer = gs_catalog.get_layer(layer.name)
default_style = gs_layer.default_style
layer.default_style = save_style(default_style)
style_set.append(layer.default_style)
alt_styles = gs_layer.styles
for alt_style in alt_styles:
style_set.append(save_style(alt_style))
layer.styles = style_set
return layer
def save_style(gs_style):
style, created = Style.objects.get_or_create(name=gs_style.sld_name)
style.sld_title = gs_style.sld_title
style.sld_body = gs_style.sld_body
style.sld_url = gs_style.body_href()
style.save()
return style
def is_layer_attribute_aggregable(store_type, field_name, field_type):
"""
Decipher whether layer attribute is suitable for statistical derivation
"""
# must be vector layer
if store_type != 'dataStore':
return False
# must be a numeric data type
if field_type not in LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES:
return False
# must not be an identifier type field
if field_name.lower() in ['id', 'identifier']:
return False
return True
def get_attribute_statistics(layer_name, field):
"""
Generate statistics (range, mean, median, standard deviation, unique values)
for layer attribute
"""
logger.debug('Deriving aggregate statistics for attribute %s', field)
if not ogc_server_settings.WPS_ENABLED:
return None
try:
return wps_execute_layer_attribute_statistics(layer_name, field)
except Exception:
logger.exception('Error generating layer aggregate statistics')
def get_wcs_record(instance, retry=True):
wcs = WebCoverageService(ogc_server_settings.public_url + 'wcs', '1.0.0')
key = instance.workspace + ':' + instance.name
if key in wcs.contents:
return wcs.contents[key]
else:
msg = ("Layer '%s' was not found in WCS service at %s." %
(key, ogc_server_settings.public_url)
)
if retry:
logger.debug(
msg +
' Waiting a couple of seconds before trying again.')
time.sleep(2)
return get_wcs_record(instance, retry=False)
else:
raise GeoNodeException(msg)
def get_coverage_grid_extent(instance):
"""
Returns a list of integers with the size of the coverage
extent in pixels
"""
instance_wcs = get_wcs_record(instance)
grid = instance_wcs.grid
return [(int(h) - int(l) + 1) for
h, l in zip(grid.highlimits, grid.lowlimits)]
GEOSERVER_LAYER_TYPES = {
'vector': FeatureTyp | e.resource_type,
'raster': Coverage.resource_type,
}
def geoserver_layer_type(filename):
the_type = layer_type(filename)
return GEOSERVER_LAYER_TYPES | [the_type]
def cleanup(name, uuid):
"""Deletes GeoServer and Catalogue records for a given name.
Useful to clean the mess when something goes terribly wrong.
It also verifies if the Django record existed, in which case
it performs no action.
"""
try:
Layer.objects.get(name=name)
except Layer.DoesNotExist as e:
pass
else:
msg = ('Not doing any cleanup because the layer %s exists in the '
'Django db.' % name)
raise GeoNodeException(msg)
cat = gs_catalog
gs_store = None
gs_layer = None
gs_resource = None
# FIXME: Could this lead to someone deleting for example a postgis db
# with the same name of the uploaded file?.
try:
gs_store = cat.get_store(name)
if gs_store is not None:
gs_layer = cat.get_layer(name)
if gs_layer is not None:
gs_resource = gs_layer.resource
else:
gs_layer = None
gs_resource = None
except FailedRequestError as e:
msg = ('Couldn\'t connect to GeoServer while cleaning up layer '
'[%s] !!', str(e))
logger.warning(msg)
if gs_layer is not None:
try:
cat.delete(gs_layer)
except:
logger.warning("Couldn't delete GeoServer layer during cleanup()")
if gs_resource is not None:
try:
cat.delete(gs_resource)
except:
msg = 'Couldn\'t delete GeoServer resource during cleanup()'
logger.warning(msg)
if gs_store is not None:
try:
cat.delete(gs_store)
except:
logger.warning("Couldn't delete GeoServer store during cleanup()")
logger.warning('Deleting dangling Catalogue record for [%s] '
'(no Django record to match)', name)
if 'geonode.catalogue' in settings.INSTALLED_APPS:
from geonode.catalogue import get_catalogue
catalogue = get_catalogue()
catalogue.remove_record(uuid)
logger.warning('Finished cleanup after failed Catalogue/Django '
'import for layer: %s', name)
def _create_featurestore(name, data, overwrite=False, charset="UTF-8"):
cat = gs_catalog
cat.create_featurestore(name, data, overwrite=overwrite, charset=charset)
return cat.get_store(name), cat.get_resource(name)
def _create_coveragestore(name, data, overwrite=False, charset="UTF-8"):
cat = gs_catalog
cat.create_coveragestore(name, data, overwrite=overwrite)
return cat.get_store(name), cat.get_resource(name)
def _create_db_featurestore(name, data, overwrite=False, charset="UTF-8"):
"""Create a database store then use it to import a shapefile.
If the import into the database fails then delete the store
(and delete the PostGIS table for it).
"""
cat = gs_catalog
dsname = ogc_server_settings.DATASTORE
try:
ds = cat.get_store(dsname)
except FailedRequestError:
ds = cat.create_datastore(dsname)
db = ogc_server_settings.datastore_db
db_engine = 'postgis' if \
'postgis' in db['ENGINE'] else db['ENGINE']
ds.connection_parameters.update(
host=db['HOST'],
port=db['PORT'],
database=db['NAME'],
user=db['USER'],
passwd=db['PASSWORD'],
dbtype=db_engine
)
cat.save(ds)
ds = cat.get_store(dsname)
try:
cat.add_data_to_store(ds, name, data,
overwrite=overwrite,
charset=charset)
return ds, cat.get_resource(name, store=ds)
except Exception:
# FIXME(Ariel): This is not a good idea, today there was a problem
# accessing postgis that caused add_data_to_store to fail,
# for the same reasons the call to delete_from_postgis below failed too
# I am commenting it out and filing it as issue #1058
# delete_from_postgis(name)
raise
def geoserver_upload(
layer,
base_file,
user,
name,
overwrite=True,
title=None,
abstract=None,
permissions=None,
keywords=(),
charset='UTF-8'):
# Step 2. Check that it is uploading to the same resource type as
# the existing resource
logger.info('>>> Step 2. Make sure we are |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_format23.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [108321024, 108328448]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
| 'values': '=Sheet1!$B$1:$B$5',
'border': {'color': 'yello | w'},
'fill': {'color': 'red', 'transparency': 100},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
# simple binary tree
# in this implementation, a node is inserted between an existing node and the root
class BinaryTree():
def __init__(self,rootid):
self.left = None
self.right = None
self.rootid = rootid
def getLeftChild(self):
return self.left
def getRightChild(self):
return self.right
def setNodeValue(self,value):
self.rootid = value
def getNodeValue(self):
return self.rootid
def insertRight(self,newNode):
if self.right == None:
self.right = BinaryTree(newNode)
else:
tree = BinaryTree(newNode)
tree.right = self.right
self.right = tree
def insertLeft(self,n | ewNode):
if self.left == None:
self.left = BinaryTree(newNode)
else:
tree = BinaryTree(newNode)
self.left = tree
tree.left = self.left
# try to insert to left, if not insert to right
def insert(self, newNode, max_depth, c | urrent_depth=0):
if self.left == None:
self.left = BinaryTree(newNode)
else:
if(current_depth < max_depth):
current_depth+=1
self.left.insert(newNode, max_depth, current_depth)
else:
if(self.right == None):
self.right = BinaryTree(newNode)
else:
'''
def insert(item, tree):
if (item < tree.entry):
if (tree.left != None):
insert(item, tree.left)
else:
tree.left = Tree(item)
else:
if (tree.right != None):
insert(item, tree.right)
else:
tree.right = Tree(item)
'''
def printTree(tree):
if tree != None:
printTree(tree.getLeftChild())
print(tree.getNodeValue())
printTree(tree.getRightChild())
# test tree
def testTree():
myTree = BinaryTree("Maud")
myTree.insertLeft("Bob")
myTree.insertRight("Tony")
myTree.insertRight("Steven")
printTree(myTree)
testTree() |
# wget http://stuff.mit.edu/afs/sipb/contrib/pi/pi-billion.txt
# THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python example009.py
from __future__ import division
import numpy as np
import theano
import theano.tensor as T
import lasagne as L
import argparse
import time
from six.moves import cPickle
np.set_printoptions(threshold='nan')
np.set_printoptions(linewidth=200)
np.set_printoptions(formatter={'float': '{:12.8f}'.format, 'int': '{:4d}'.format})
print 'numpy ' + np.__version__
print 'theano ' + theano.__version__
print 'lasagne ' + L.__version__
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--ndigits', help='number of digits, default 1000000', default=1000000, type=int)
parser.add_argument('--window', help='window size, default=100', default=100, type=int)
parser.add_argument('--lr', help='learning rate, default 0.001', default=0.001, type=float)
parser.add_argument('--nepoch', help='number of epochs, default=100', default=100, type=int)
parser.add_argument('--nbatch', help='number of batches per eopch, default=100', default=100, type=int)
parser.add_argument('--batchsize', help='batch size, default 1000', default=1000, type=int)
parser.add_argument('--test', help='test fraction, default 0.2', default=0.2, type=float)
parser.add_argument('--model', help='output model filename')
args = parser.parse_args()
print args
# load data
with open('pi-billion.txt') as f:
s = f.read()
f.close()
pi = np.empty([args.ndigits],dtype='float32')
i=0
for c in s:
if c.isdigit():
pi[i] = float(c)
i+=1
if i==args.ndigits:
break
print 'pi.shape',pi.shape
input_var = T.matrix(dtype=theano.config.floatX)
target_var = T.vector(dtype='int32')
network = L.layers.InputLayer((None, args.window), input_var)
print 'input', L.layers.get_output_shape(network)
network = L.layers.ReshapeLayer(network, ((-1, 1, args.window)))
print 'reshape', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
conv = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(conv)
gap = L.layers.Pool1DLayer(conv, pool_size=L.layers.get_output_shape(conv)[2], stride=None, pad=0, mode='average_inc_pad')
print 'gap', L.layers.get_output_shape(gap)
network = L.layers.DenseLayer(gap, 2, nonlinearity=L.nonlinearities.softmax)
print 'output', L.layers.get_output_shape(network)
#input_var = T.matrix(dtype=theano.config.floatX)
#target_var = T.vector(dtype='int32')
#network = L.layers.InputLayer((None, args.window), input_var)
#network = L.layers.DenseLayer(network, 10000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 100)
#network = L.layers.DenseLayer(network, 2, nonlinearity=L.nonlinearities.softmax)
prediction = L.layer | s.get_output(network)
loss = L.objectives.aggregate(L.objectives.categorical_crossentropy(prediction, target_var), mode='mean')
params = L.layers.get_all_params(network, trainable=True)
updates = L.updates.adam(loss, params, learning_rate=args.lr)
scaled_grads,norm = L.updates.total_norm_constraint(T.grad(loss,params), np.inf, return_norm=True)
train_fn = theano.function([input_var, target_var], [loss,norm], updates=updates)
te | st_fn = theano.function([input_var], L.layers.get_output(network, deterministic=True))
d = np.empty([args.batchsize,args.window],dtype='float32')
l = np.empty([args.batchsize],dtype='int32')
t0 = time.time()
t = time.time()
for i in range(args.nepoch):
tloss=0
tnorm=0
#train
for j in range(args.nbatch):
for k in range(args.batchsize):
#w = np.random.randint(int(pi.shape[0]*args.test),pi.shape[0]-args.window)
w = np.random.randint(0,int(pi.shape[0]*(1-args.test))-args.window)
d[k] = pi[w:w+args.window]
if np.random.randint(0,2)==0:
l[k]=0
else:
np.random.shuffle(d[k])
l[k]=1
bloss,bnorm = train_fn(d,l)
tloss += bloss
tnorm += bnorm
#test
for k in range(args.batchsize):
#w = np.random.randint(0,int(pi.shape[0]*args.test-args.window))
w = np.random.randint(int(pi.shape[0]*(1-args.test)),pi.shape[0]-args.window)
d[k] = pi[w:w+args.window]
if np.random.randint(0,2)==0:
l[k]=0
else:
np.random.shuffle(d[k])
l[k]=1
val_output = test_fn(d)
val_predictions = np.argmax(val_output, axis=1)
tacc = np.mean(val_predictions == l)
print 'epoch {:8d} loss {:12.8f} grad {:12.8f} accuracy {:12.8f} n_zero {:6d} n_one {:6d} t_epoch {:4d} t_total {:8d}'.format(i, tloss/args.nbatch, tnorm/args.nbatch, tacc, np.sum(val_predictions==0), np.sum(val_predictions==1), int(time.time()-t), int(time.time()-t0))
t = time.time()
f = open(args.model, 'wb')
cPickle.dump(L.layers.get_all_param_values(network), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pycasso documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 24 01:54:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not al | l possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# ser | ve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import shutil
sys.path.insert(0, os.path.abspath('../../'))
# copy README
shutil.copy('../../README.rst', './README.rst')
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['APItemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pycasso'
copyright = '2017, Haoming Jiang, Jason Ge'
author = 'Haoming Jiang, Jason Ge'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
VERSION_PATH = os.path.join(os.path.dirname(__file__), '../../pycasso/VERSION')
# The full version, including alpha/beta/rc tags.
release = open(VERSION_PATH).read().strip()
# The short X.Y version.
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pycassodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pycasso.tex', 'pycasso Documentation',
'Haoming Jiang, Jian Ge', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycasso', 'pycasso Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pycasso', 'pycasso Documentation',
author, 'pycasso', 'One line description of project.',
'Miscellaneous'),
]
|
self.networks+1):
# Core
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=2)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=2)])
# Aggregation
for pod in range(1,41):
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
# ToR
for tor in range(1,21):
cr_next.send([i, np(layer=TOR, pod=pod, left_right=LEFT, number=tor)])
cr_next.send([i, np(layer=TOR, pod=pod, left_right=RIGHT, number=tor)])
else:
for i in range(1, self.networks+1):
for j in range(1, self.nodes+1):
cr_next.send([i, formatstr.format(j)])
def _generate_port(self, cr_next):
networks = self.networks
nodes = self.nodes
formatstr = self.formatstr
topo = self.topo_type
if topo == 'ring':
"""
...[node]--adj_left- | -[node]--adj_right--[node]...
"""
for i in ran | ge(1, networks + 1):
for j in range(1, nodes+1):
node = formatstr.format(j)
if j == 1:
adj_left = formatstr.format(nodes)
adj_right = formatstr.format(2)
elif j == nodes:
adj_left = formatstr.format(nodes - 1)
adj_right = formatstr.format(1)
else:
adj_left = formatstr.format(j-1)
adj_right = formatstr.format(j+1)
cr_next.send([i, node, adj_left])
cr_next.send([i, node, adj_right])
elif topo == 'mesh':
"""
| |
...[node]----[node]----[node]...
1 : range(1,1), range(2,1001)
2 : range(1,2), range(3,1001)
3 : range(1,3), range(4,1001)
:
1000: range(1,1000), range(1001,1001)
"""
for i in range(1, networks+1):
for j in range(1, nodes+1):
node = formatstr.format(j)
for port in range(1,j):
cr_next.send([i, node, formatstr.format(port)])
for port in range(j+1,nodes+1):
cr_next.send([i, node, formatstr.format(port)])
elif topo == 'linear':
"""
[node]---[node]...[node]---[node]
"""
for i in range(1, networks+1):
for j in range(1, nodes+1):
node = formatstr.format(j)
if j == 1:
adj_right = formatstr.format(2)
cr_next.send([i, node, adj_right])
elif j == nodes:
adj_left = formatstr.format(nodes - 1)
cr_next.send([i, node, adj_left])
else:
adj_left = formatstr.format(j-1)
adj_right = formatstr.format(j+1)
cr_next.send([i, node, adj_left])
cr_next.send([i, node, adj_right])
elif topo == 'fat_tree':
CORE = Topology.CORE
AGGR = Topology.AGGREGATION
TOR = Topology.TOR
LEFT = Topology.LEFT
RIGHT = Topology.RIGHT
np = formatstr.format # node & port
NULL = Topology.NULL
for i in range(1, self.networks+1):
for pod in range(1,41):
# Core => Aggregation
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=1),
np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=2),
np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=1),
np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=2),
np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
# Aggregation => Core
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1),
np(layer=CORE, pod=NULL, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1),
np(layer=CORE, pod=NULL, left_right=LEFT, number=2)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1),
np(layer=CORE, pod=NULL, left_right=RIGHT, number=1)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1),
np(layer=CORE, pod=NULL, left_right=RIGHT, number=2)])
# Aggregation
for pod in range(1,41):
# ToR
for tor in range(1,21):
# Aggregation => ToR
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1),
np(layer=TOR, pod=pod, left_right=LEFT, number=tor)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1),
np(layer=TOR, pod=pod, left_right=LEFT, number=tor)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1),
np(layer=TOR, pod=pod, left_right=RIGHT, number=tor)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1),
np(layer=TOR, pod=pod, left_right=RIGHT, number=tor)])
# ToR => Aggregation
cr_next.send([i, np(layer=TOR, pod=pod, left_right=LEFT, number=tor),
np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=TOR, pod=pod, left_right=LEFT, number=tor),
np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
cr_next.send([i, np(layer=TOR, pod=pod, left_right=RIGHT, number=tor),
np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=TOR, pod=pod, left_right=RIGHT, number=tor),
np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
else:
pass
def _generate_eport(self, cr_next): # External ports
networks = self.networks
nodes = self.nodes
eports = self.eports
formatstr = self.formatstr
topo = self.topo_type
if topo in ['ring', 'mesh']:
for i in range(1, networks+1):
for j in range(1, nodes+1):
node = formatstr.format(j)
for k in range(nodes+1, nodes+eports+1):
eport = formatstr.format(k)
cr_next.send([i, node, eport])
elif topo == 'linear':
for i in range(1, networks+1):
for j in [1, nodes]:
node = formatstr.format(j)
for k in range(nodes+1, nodes+eports+1):
eport = formatstr.format(k)
cr_next.send([i, node, eport])
elif topo == 'fat_tree':
CORE = Topology.CORE
TOR = Topology.TOR
LEFT = Topology.L |
import endpoints
fro | m api_user import UserAPI
from api_posts import PostsAPI
from api_comments import ReactionAPI
from api_image import ImageAPI
APPLICATION = endpoints.api_server([PostsAPI, ReactionAPI, Us | erAPI, ImageAPI]) |
import json
import os
from urlparse import urlparse
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import resolve
from django.http import Http404
from django.shortcuts import render
from django.utils import translation
from django.views.decorators.cache import cache_control
from django.views.decorators.gzip import gzip_page
import newrelic.agent
import waffle
from mkt.commonplace.models import DeployBuildId
from mkt.regions.middleware import RegionMiddleware
from mkt.account.helpers import fxa_auth_info
from mkt.webapps.models import Webapp
@gzip_page
@cache_control(max_age=settings.CACHE_MIDDLEWARE_SECONDS)
def commonplace(request, repo, **kwargs):
"""Serves the frontend single-page apps."""
if repo not in settings.FRONTEND_REPOS:
raise Http404
BUILD_ID = get_build_id(repo)
ua = request.META.get('HTTP_USER_AGENT', '').lower()
include_splash = False
detect_region_with_geoip = False
if repo == 'fireplace':
include_splash = True
has_sim_info_in_query = (
'mccs' in request.GET or
('mcc' in request.GET and 'mnc' in request.GET))
if not has_sim_info_in_query:
# If we didn't receive mcc/mnc, then use geoip to detect region,
# enabling fireplace to avoid the consumer_info API call that it
# does normally to fetch the region.
detect_region_with_geoip = True
fxa_auth_state, fxa_auth_url = fxa_auth_info()
site_settings = {
'dev_pay_providers': settings.DEV_PAY_PROVIDERS,
'fxa_auth_state': fxa_auth_state,
'fxa_auth_url': fxa_auth_url,
}
ctx = {
'BUILD_ID': BUILD_ID,
'LANG': request.LANG,
'DIR': lang_dir(request.LANG),
'include_splash': include_splash,
'repo': repo,
'robots': 'googlebot' in ua,
'site_settings': site_settings,
'newrelic_header': newrelic.agent.get_browser_timing_header,
'newrelic_footer': newrelic.agent.get_browser_timing_footer,
}
if repo == 'fireplace':
# For OpenGraph stuff.
resolved_url = resolve(request.path)
if resolved_url.url_name == 'detail':
ctx = add_app_ctx(ctx, resolved_url.kwargs['app_slug'])
ctx['waffle_switches'] = list(
waffle.models.Switch.objects.filter(active=True)
.values_list('name', flat=True))
media_url = urlparse(settings.MEDIA_URL)
if media_url.netloc:
ctx['media_origin'] = media_url.scheme + '://' + media_url.netloc
if detect_region_with_geoip:
region_middleware = RegionMiddleware()
ctx['geoip_region'] = region_middleware.region_from_request(request)
if repo in settings.REACT_REPOS:
return render(request, 'commonplace/index_react.html', ctx)
elif repo in settings.COMMONPLACE_REPOS:
return render(request, 'commonplace/index.html', ctx)
def get_allowed_origins(request, include_loop=True):
current_domain = settings.DOMAIN
current_origin = '%s://%s' % ('https' if request.is_secure() else 'http',
current_domain)
development_server = (settings.DEBUG or
current_domain == 'marketplace-dev.allizom.org')
allowed = [
# Start by allowing the 2 app:// variants for the current domain,
# and then add the current http or https origin.
'app://packaged.%s' % current_domain,
'app://%s' % current_domain,
current_origin,
# Also include Tarako
'app://tarako.%s' % current_domain,
]
# On dev, also allow localhost/mp.dev.
if development_server:
allowed.extend([
'http://localhost:8675',
'https://localhost:8675',
'http://localhost',
'https://localhost',
'http://mp.dev',
'https://mp.dev',
])
if include_loop:
# Include loop origins if necessary.
allowed.extend([
'https://hello.firefox.com',
'https://call.firefox.com',
])
# On dev, include loop dev & stage origin as well.
if development_server:
| allowed.extend([
| 'https://loop-webapp-dev.stage.mozaws.net',
'https://call.stage.mozaws.net',
])
return json.dumps(allowed)
def get_build_id(repo):
try:
# Get the build ID from the database (bug 1083185).
return DeployBuildId.objects.get(repo=repo).build_id
except DeployBuildId.DoesNotExist:
# If we haven't initialized a build ID yet, read it directly from the
# build_id.txt by our frontend builds.
try:
build_id_path = os.path.join(settings.MEDIA_ROOT, repo,
'build_id.txt')
with storage.open(build_id_path) as f:
return f.read()
except:
return 'dev'
def fxa_authorize(request):
"""
A page to mimic commonplace's fxa-authorize page to handle login.
"""
return render(request, 'commonplace/fxa_authorize.html')
def add_app_ctx(ctx, app_slug):
"""
If we are hitting the Fireplace detail page, get the app for Open Graph
tags.
"""
try:
app = Webapp.objects.get(app_slug=app_slug)
ctx['app'] = app
except Webapp.DoesNotExist:
pass
return ctx
@gzip_page
def iframe_install(request):
return render(request, 'commonplace/iframe-install.html', {
'allowed_origins': get_allowed_origins(request)
})
@gzip_page
def potatolytics(request):
return render(request, 'commonplace/potatolytics.html', {
'allowed_origins': get_allowed_origins(request,
include_loop=False)
})
def lang_dir(lang):
if lang == 'rtl' or translation.get_language_bidi():
return 'rtl'
else:
return 'ltr'
|
""" Copyright (C) 2015 Sebastian Otalora
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import flickrapi
import json
import matplotlib.pyplot as plt
import Image
import urllib, cStringIO
import numpy as np
api_key = u'3724c5fcd02af2a224329c086d64a68c' #TODO: Put this into a file of .gitignore
api_secret = u'6e80f744db6eb5c3'
flickr = flickrapi.FlickrAPI(api_key, api_secret,format='parsed-json')
#Obtain medium sized (maxdim = 500px) images URLS hosted in flickr for the provided label
#https://www.flickr.com/services/api/misc.urls.html
def get_catimgs_urls(label='chair', show_samples=0):
#The search query is stored in a JSON object
imgs_urls = []
photos = flickr.photos_search(tags=label,sort='relevance', geo_context=1, extras='url_m')
#photos['photos']['photo'] is a list with all the results imgs as dicts with ids, captions, urls and so forth
for img_id in range(len(photos['photos']['photo'])):
cur_img_id = photos['photos']['photo'][img_id]
imgs_urls.append(cur_img_id['url_m']) #medium size
#photoSizes = flickr.photos_getSizes(photo_id=cur_img_id['id'])
#cur_img_id['sizes']['size'][0] contains the diferent sources of the img in diferent sizes:
#{u'height': 75,
# u'label': u'Square',
# u'media': u'photo',
# u'source': u'https://farm6.staticflickr.com/5836/22322409944_1498c04fb6_s.jpg',
# u'url': u'https://www.flickr.com/photos/g20_turkey/22322409944/sizes/sq/',
# u'width': 75}
#photos = flickr.photos_se | arch(tags='chair', lat='42.355056', lon='-71.065503', radius='5')
#sets = flickr.photosets.getList(user_id='73509078@N00')
#lets parse the source URL
if show_samples:
img_idt = 0
URL = imgs_urls[img_idt]
img_file = cStringIO.StringIO(urllib.urlopen(UR | L).read())
img = Image.open(img_file)
#lets look at the image with plt
plt.imshow(img)
plt.show()
return imgs_urls
#lets save all the image urls to process with the opencv gui
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Budgets Management',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
This module allows accountants to manage analytic and crossovered budgets.
==========================================================================
Once the Budgets are defined (in Invoicing/Budgets/Budgets), the Project Managers
can set the planned amount on each Analytic Account.
The accountant has the possibility to see the total of amount planned for each
Budget in order to ensure the total planned is not greater/lower than what he
planned for this Budget. Each list of record can also be switched to a graphical
view of it.
Three reports are available:
--------------------- | -------
1. The first is available from a list of Budgets. It gives the spreading, for
these Budgets, of the Analytic Accounts.
2. The second is a summary of the previous one, it only gives the spreading,
for the selected Budgets, of the Analytic Accounts.
3. The last one is available from the Analytic Chart of Accounts. It gives
the spreading, for the selected Analytic Accounts of Budgets.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo. | com/page/accounting',
'depends': ['account'],
'data': [
'security/ir.model.access.csv',
'security/account_budget_security.xml',
'account_budget_view.xml',
'account_budget_report.xml',
'account_budget_workflow.xml',
'wizard/account_budget_analytic_view.xml',
'wizard/account_budget_report_view.xml',
'wizard/account_budget_crossovered_summary_report_view.xml',
'wizard/account_budget_crossovered_report_view.xml',
'views/report_analyticaccountbudget.xml',
'views/report_budget.xml',
'views/report_crossoveredbudget.xml',
],
'demo': ['account_budget_demo.xml'],
'test': [
'test/account_budget.yml',
'test/account_budget_report.yml',
],
'installable': True,
'auto_install': False,
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-07 08:13
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations
import djstripe.fields
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0020_auto_20161229_0041'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='application_fee_percent',
field=djstripe.fields.StripePercentField(decimal_places=2, help_text="A positive decimal that represents the fee percentage of the subscription invoice amount that will be transferred to | the application owner's Stripe account each billing period.", max_digits=5, null=True, validators=[django.core.validators.MinValueValida | tor(1.0), django.core.validators.MaxValueValidator(100.0)]),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
import os
import hnbot
class HnbotMessage(unittest.Tes | tCase):
def testTooLarge(self):
"""test should fail if message size is bigger than 140 characters"""
self.assertEqu | al(1,1)
if __name__ == "__main__":
unittest.main()
|
'''
Biggest Sum from Top left to Bottom Right
'''
def find_sum(m):
p | = [0] * (len(m) + 1)
for l in m:
for i, v in enumerate(l, 1):
p[i] = v + max(p[i-1], p[i])
return p[-1]
matrix = [[20, 20, 10, 10],
[10, 20, 10, 10],
[10, 20, 20, 20],
[10, 10, 10, 20]]
print(find_sum(matr | ix) == 140)
|
# -*- coding: utf-8 -*-
import base64
import time
import simplejson as json
from eventlet.green import urllib2
import urllib
from kral import config
def stream(queries, queue, kral_start_time):
url = 'https://stream.twitter.com/1/statuses/filter.json'
queries = [q.lower() for q in queries]
quoted_queries = [urllib.quote(q) for q in queries]
query_post = 'track=' + ",".join(quoted_queries)
request = urllib2.Request(url, query_post)
auth = base64.b64 | encode('%s:%s' % (config.TWITTER['user'], config.TWITTER['password']))
request.add_header('Authorization', "basic %s" % auth)
request.add_header('User-agent', config.USER_AGENT)
for item in urllib2.urlopen(request):
try:
item = json.loads(i | tem)
except json.JSONDecodeError: #for whatever reason json reading twitters json sometimes raises this
continue
if 'text' in item and 'user' in item:
#determine what query we're on if it exists in the text
text = item['text'].lower()
query = ''
for q in queries:
q_uni = unicode(q, 'utf-8')
if q_uni in text:
query = q_uni
lang = False
if config.LANG:
if item['user']['lang'] == config.LANG:
lang = True
else:
lang = True
if query and lang:
post = {
'service' : 'twitter',
'user' : {
'id' : item['user']['id_str'],
'utc' : item['user']['utc_offset'],
'name' : item['user']['screen_name'],
'description' : item['user']['description'],
'location' : item['user']['location'],
'avatar' : item['user']['profile_image_url'],
'subscribers': item['user']['followers_count'],
'subscriptions': item['user']['friends_count'],
'website': item['user']['url'],
'language' : item['user']['lang'],
},
'links' : [],
'id' : item['id'],
'application': item['source'],
'date' : int(time.mktime(time.strptime(item['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))),
'text' : text,
'query' : query,
'geo' : item['coordinates'],
}
for url in item['entities']['urls']:
post['links'].append({ 'href' : url.get('url') })
queue.put(post)
|
# coding: utf-8
import configureEnvironnement
confi | gureEnvironnement.setup()
import django
django.setup()
from datetime import datetime, date, timedelta
from chambres.models import Reservation, Client
from chambres.views import OneDayStats
rs = Reservation.objects.all()
for r in rs:
if r.dateArrivee > r.dateDepart:
print(r, | r.id, r.client.id)
|
import logging
from collections import Counter
from udata.commands import cli, header, success
log = logging.getLogger(__name__)
@cli.group('images')
def grp():
'''Images related operations'''
pass
def render_or_skip(obj, attr):
try:
getattr(obj, attr).rerender()
obj.save()
return 1
except Exception as e:
log.warning('Skipped "%s": %s(%s)', obj, e.__class__.__name__, e)
return 0
@grp.command()
def render():
'''Force (re)rendering stored images'''
from udata.core.organization.models import Organization
from udata.core.post.models import Post
from udata.core.reuse.models import Reuse
from udata.core.user.models import User
header('Rendering images')
count = Counter()
total = Counter()
organizations = Organization.objects(logo__exists=True)
total['orgs'] = organizations.count()
log.info('Processing {0} organ | izations logos'.format(total['orgs']))
for org in organizations:
count['orgs'] += re | nder_or_skip(org, 'logo')
users = User.objects(avatar__exists=True)
total['users'] = users.count()
log.info('Processing {0} user avatars'.format(total['users']))
for user in users:
count['users'] += render_or_skip(user, 'avatar')
posts = Post.objects(image__exists=True)
total['posts'] = posts.count()
log.info('Processing {0} post images'.format(total['posts']))
for post in posts:
count['posts'] += render_or_skip(post, 'image')
reuses = Reuse.objects(image__exists=True)
total['reuses'] = reuses.count()
log.info('Processing {0} reuse images'.format(total['reuses']))
for reuse in reuses:
count['reuses'] += render_or_skip(reuse, 'image')
log.info('''Summary:
Organization logos: {count[orgs]}/{total[orgs]}
User avatars: {count[users]}/{total[users]}
Post images: {count[posts]}/{total[posts]}
Reuse images: {count[reuses]}/{total[reuses]}
'''.format(count=count, total=total))
success('Images rendered')
|
# Generated by Django 2.0.4 on 2019-07-16 21:18
from django.db import migrations
class Migration(migrations.Migration):
depende | ncies = [
('scheduler', '0009_auto_20190607_1518'),
]
operations = [
migrations.RenameField(
m | odel_name='smpost',
old_name='post_instagram',
new_name='post_newsletter',
),
]
|
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""Calculate Round Constant
"""
import argparse
import bitarray as ba
import logging as log
def main():
parser = argparse.ArgumentParser(
prog="keccak round constant generator",
description=
'''This tool generates the round constants based on th | e given max round number'''
)
parser.add_argument(
'-r',
type=int,
default=24,
help='''Max Round value. Default is SHA3 Keccak round %(default)''')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose')
args = parser.parse_args()
if (args.verbose):
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
else:
| log.basicConfig(format="%(levelname)s: %(message)s")
if args.r < 1:
log.error("Max Round value should be greater than 0")
# Create 0..255 bit array
rc = ba.bitarray(256)
rc.setall(0)
r = ba.bitarray('10000000')
rc[0] = True # t%255 == 0 -> 1
for i in range(1, 256):
# Update from t=1 to t=255
r_d = ba.bitarray('0') + r
if r_d[8]:
#Flip 0,4,5,6
r = r_d[0:8] ^ ba.bitarray('10001110')
else:
r = r_d[0:8]
rc[i] = r[0]
## Print rc
print(rc)
## Round
rcs = [] # Each entry represent the round
for rnd in range(0, args.r):
# Let RC=0
rndconst = ba.bitarray(64)
rndconst.setall(0)
# for j [0 .. L] RC[2**j-1] = rc(j+7*rnd)
for j in range(0, 7): #0 to 6
rndconst[2**j - 1] = rc[(j + 7 * rnd) % 255]
print("64'h{}, // Round {}".format(rndhex(rndconst), rnd))
def rndhex(bit) -> str:
return bit[::-1].tobytes().hex()
if __name__ == "__main__":
main()
|
#### NOTICE: THIS FILE | IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/armor/shared_arm_mandal_enhanced_heavy_composite.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","arm_mandal_enhan | ced_heavy_composite_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
of the header and key is its corresponding
cache key (obtained by a call to L{_create_cache_key()}).
The entry is removed from the index table, any referenced file
name is released and the cache file is deleted.
If key references a non-existing entry, the method returns
immediately.
@param source_file: Header file name
@type source_file: str
@param key: Key value for the specified header file
@type key: hashable object
"""
entry = self.__index.get(key)
if entry==None:
return
# Release the referenced files...
for id_, sig in entry.filesigs:
self.__filename_rep.release_filename(id_)
# Remove the cache entry...
del self.__index[key]
self.__modified_flag = True
# Delete the corresponding cache file...
cachefilena | me = self._create_cache_filename(source_file)
try:
os.remove(cachefilename)
except OSError, e: |
print "Could not remove cache file (%s)"%e
def _create_cache_key(self, source_file):
"""Return the cache key for a header file.
@param source_file: Header file name
@type source_file: str
@returns: Key for the given header file
@rtype: str
"""
path, name = os.path.split(source_file)
return name+str(hash(path))
def _create_cache_filename(self, source_file):
"""Return the cache file name for a header file.
@param source_file: Header file name
@type source_file: str
@returns: Cache file name (*.cache)
@rtype: str
"""
res = self._create_cache_key(source_file)+".cache"
return os.path.join(self.__dir, res)
def _create_config_signature(self, config):
"""Return the signature for a config object.
The signature is computed as md5 digest of the contents of
working_directory, include_paths, define_symbols and
undefine_symbols.
@param config: Configuration object
@type config: L{config_t}
@returns: Signature
@rtype: str
"""
m = md5.new()
m.update(config.working_directory)
map(lambda p: m.update(p), config.include_paths)
map(lambda p: m.update(p), config.define_symbols)
map(lambda p: m.update(p), config.undefine_symbols)
map(lambda p: m.update(p), config.cflags)
return m.digest()
class filename_entry_t:
"""This is a record stored in the filename_repository_t class.
The class is an internal class used in the implementation of the
filename_repository_t class and it just serves as a container for
the file name and the reference count.
"""
def __init__( self, filename ):
"""Constructor.
The reference count is initially set to 0.
"""
# Filename
self.filename = filename
# Reference count
self.refcount = 0
# Cached signature value for the file.
# If sig_valid flag is False, the signature still has to be computed,
# otherwise the cached value can be used.
# These attributes must not be pickled!
self.sig_valid = False
self.signature = None
def __getstate__(self):
# Only pickle filename and refcount
return (self.filename, self.refcount)
def __setstate__(self, state):
self.filename, self.refcount = state
self.sig_valid = False
self.signature = None
def inc_ref_count(self):
"""Increase the reference count by 1."""
self.refcount += 1
def dec_ref_count(self):
"""Decrease the reference count by 1 and return the new count."""
self.refcount -= 1
return self.refcount
class filename_repository_t:
"""File name repository.
This class stores file names and can check whether a file has been
modified or not since a previous call.
A file name is stored by calling acquire_filename() which returns
an ID and a signature of the file. The signature can later be used
to check if the file was modified by calling is_file_modified().
If the file name is no longer required release_filename() should be
called so that the entry can be removed from the repository.
"""
def __init__( self, md5_sigs ):
"""Constructor.
"""
# Flag that determines whether the signature is a md5 digest or
# the modification time
# (this flag is passed to the filename_repository_t class)
self._md5_sigs = md5_sigs
# ID lookup table (key: filename / value: id_)
self.__id_lut = {}
# Entry dictionary (key: id_ / value: filename_entry_t)
# This dictionary contains the actual data.
# It must always hold that each entry in __entries has a corresponding
# entry in __id_lut (i.e. the keys in __id_lut must be the names
# stored in __entries)
self.__entries = {}
# A counter for new ids
self.__next_id = 1
def acquire_filename(self, name):
"""Acquire a file name and return its id and its signature.
"""
id_ = self.__id_lut.get(name)
# Is this a new entry?
if id_==None:
# then create one...
id_ = self.__next_id
self.__next_id += 1
self.__id_lut[name] = id_
entry = filename_entry_t(name)
self.__entries[id_] = entry
else:
# otherwise obtain the entry...
entry = self.__entries[id_]
entry.inc_ref_count()
return id_, self._get_signature(entry)
def release_filename(self, id_):
"""Release a file name.
"""
entry = self.__entries.get(id_)
if entry==None:
raise ValueError, "Invalid filename id (%d)"%id_
# Decrease reference count and check if the entry has to be removed...
if entry.dec_ref_count()==0:
del self.__entries[id_]
del self.__id_lut[entry.filename]
def is_file_modified(self, id_, signature):
"""Check if the file referred to by id_ has been modified.
"""
entry = self.__entries.get(id_)
if entry==None:
raise ValueError, "Invalid filename id_ (%d)"%id_
# Is the signature already known?
if entry.sig_valid:
# use the cached signature
filesig = entry.signature
else:
# compute the signature and store it
filesig = self._get_signature(entry)
entry.signature = filesig
entry.sig_valid = True
return filesig!=signature
def update_id_counter(self):
"""Update the id_ counter so that it doesn't grow forever.
"""
if len(self.__entries)==0:
self.__next_id = 1
else:
self.__next_id = max(self.__entries.keys())+1
def _get_signature(self, entry):
"""Return the signature of the file stored in entry.
"""
if self._md5_sigs:
# return md5 digest of the file content...
if not os.path.exists(entry.filename):
return None
try:
f = file(entry.filename)
except IOError, e:
print "Cannot determine md5 digest:",e
return None
data = f.read()
f.close()
return md5.new(data).digest()
else:
# return file modification date...
try:
return os.path.getmtime(entry.filename)
except OSError, e:
return None
def _dump(self):
"""Dump contents for debugging/testing.
"""
print 70*"-"
print "ID lookup table:"
for name in self.__id_lut:
|
'dnf': DnfBackend,
'zypper': ZypperBackend}
if backend_type not in backend_mapping.keys():
raise NotImplementedError('Unimplemented package management '
'system: %s.' % backend_type)
backend = backend_mapping[backend_type]
self.backend = backend()
self.initialized = True
def __getattr__(self, name):
self._init_on_demand()
return self.backend.__getattribute__(name)
class BaseBackend(object):
"""
This class implements all common methods among backends.
"""
def install_what_provides(self, path):
"""
Installs package that provides [path].
:param path: Path to file.
"""
provides = self.provides(path)
if provides is not None:
return self.install(provides)
else:
log.warning('No package seems to provide %s', path)
return False
class RpmBackend(BaseBackend):
"""
This class implements operations executed with the rpm package manager.
rpm is a lower level package manager, used by higher level managers such
as yum and zypper.
"""
PACKAGE_TYPE = 'rpm'
SOFTWARE_COMPONENT_QRY = (
PACKAGE_TYPE + ' ' +
'%{NAME} %{VERSION} %{RELEASE} %{SIGMD5} %{ARCH}')
def __init__(self):
self.lowlevel_base_cmd = utils_path.find_command('rpm')
def _check_installed_version(self, name, version):
"""
Helper for the check_installed public method.
:param name: Package name.
:param version: Package version.
"""
cmd = (self.lowlevel_base_cmd + ' -q --qf %{VERSION} ' + name)
inst_version = process.system_output(cmd, ignore_status=True)
if 'not installed' in inst_version:
return False
return bool(inst_version >= version)
def check_installed(self, name, version=None, arch=None):
"""
Check if package [name] is installed.
:param name: Package name.
:param version: Package version.
:param arch: Package architecture.
"""
if arch:
cmd = (self.lowlevel_base_cmd + ' -q --qf %{ARCH} ' + name)
inst_archs = process.system_output(cmd, ignore_status=True)
inst_archs = inst_archs.split('\n')
for inst_arch in inst_archs:
if inst_arch == arch:
return self._check_installed_version(name, version)
return False
elif version:
return self._check_installed_version(name, version)
else:
cmd = 'rpm -q ' + name
try:
process.system(cmd)
return True
except process.CmdError:
return False
def list_all(self, software_components=True):
"""
List all installed packages.
:param software_components: log in a format suitable for the
SoftwareComponent schema
"""
log.debug("Listing all system packages (may take a while)")
if software_components:
cmd_format = "rpm -qa --qf '%s' | sort"
query_format = "%s\n" % self.SOFTWARE_COMPONENT_QRY
cmd_format %= query_format
cmd_result = process.run(cmd_format, verbose=False, shell=True)
else:
cmd_result = process.run('rpm -qa | sort', verbose=False,
shell=True)
out = cmd_result.stdout.strip()
installed_packages = out.splitlines()
return installed_packages
def list_files(self, name):
"""
List files installed on the system by package [name].
:param name: Package name.
"""
path = os.path.abspath(name)
if os.path.isfile(path):
option = '-qlp'
name = path
else:
option = '-ql'
l_cmd = 'rpm' + ' ' + option + ' ' + name
try:
result = process.system_output(l_cmd)
list_files = result.split('\n')
return list_files
except process.CmdError:
return []
class DpkgBackend(BaseBackend):
"""
This class implements operations executed with the dpkg package manager.
dpkg is a lower level package manager, used by higher level managers such
as apt and aptitude.
"""
PACKAGE_TYPE = 'deb'
INSTALLED_OUTPUT = 'install ok installed'
def __init__(self):
self.lowlevel_base_cmd = utils_path.find_command('dpkg')
def check_installed(self, name):
if os.path.isfile(name):
n_cmd = self.lowlevel_base_cmd + ' -f ' + name + ' Package'
name = process.system_output(n_cmd)
i_cmd = self.lowlevel_base_cmd + " -s " + name
# Checking if package is installed
package_status = process.system_output(i_cmd, ignore_status=True)
dpkg_installed = (self.INSTALLED_OUTPUT in package_status)
if dpkg_installed:
return True
return False
def list_all(self):
"""
List all packages available in the system.
"""
log.debug("Listing all system packages (may take a while)")
installed_packages = []
cmd_result = process.run('dpkg -l', verbose=False)
out = cmd_result.stdout.strip()
raw_list = out.splitlines()[5:]
for line in raw_list:
parts = line.split()
if parts[0] == "ii": # only grab "installed" packages
installed_packages.append("%s-%s" % (parts[1], parts[2]))
return installed_packages
def list_files(self, package):
"""
List files installed by package [package].
:param package: Package name.
:return: List of paths installed by package.
"""
if os.path.isfile(package):
l_cmd = self.lowlevel_base_cmd + ' -c ' + package
else:
l_cmd = self.lowlevel_base_cmd + ' -l ' + package
return process.system_output(l_cmd).split('\n')
class YumBackend(RpmBackend):
"""
Implements the yum backend for software manager.
Set of operations for the yum package manager, commonly found on Yellow Dog
Linux and Red Hat based distributions, such as Fedora and Red Hat
Enterprise Linux.
"""
def __init__(self, cmd='yum'):
"""
Initializes the base command and the yum package repository.
"""
super(YumBackend, self).__init__()
executable = utils_path.find_command(cmd)
base_arguments = '-y'
self.base_command = executable + ' ' + base_arguments
self.repo_file_path = '/etc/yum.repos.d/avocado-managed.repo'
self.cfgparser = ConfigParser.ConfigParser()
self.cfgparser.read(self.repo_file_path)
y_cmd = exec | utable + ' --version | head -1'
cmd_result = process.run(y_cmd, ignore_status=True,
verbose=False, shell=True)
out = cmd_result.stdout.strip()
try:
ver = re.findall(r'\d*.\d*.\d*', out)[0]
except IndexError:
ver = out
| self.pm_version = ver
log.debug('%s version: %s', cmd, self.pm_version)
if HAS_YUM_MODULE:
self.yum_base = yum.YumBase()
else:
self.yum_base = None
log.error("%s module for Python is required. "
"Using the basic support from rpm and %s commands", cmd,
cmd)
def _cleanup(self):
"""
Clean up the yum cache so new package information can be downloaded.
"""
process.system("yum clean all", sudo=True)
def install(self, name):
"""
Installs package [name]. Handles local installs.
"""
i_cmd = self.base_command + ' ' + 'install' + ' ' + name
try:
process.system(i_cmd, sudo=True)
return True
except process.CmdError:
return False
def remove(self, name):
"""
Removes package [ |
#!/usr/bin/env python
import sys
from pfsense_api import PfSenseAPI
from datetime import datetime
from pfsense_cmdline import PfSenseOptionParser
from ConfigParser import ConfigParser
from pfsense_logger import PfSenseLogger as logging
import os.path
parser = PfSenseOptionParser()
parser.add_option("--id", dest="crl_id", help="ID of the CRL to update")
parser.add_option("--name", dest="name", help="Descriptive name of the CRL", default="Imported CRL")
parser.add_option("--crl", dest="crl", help="File containing CRL in PEM format", metavar="CRL_FILE")
parser.add_option("--ssl_verification", dest="ssl_verification", help="Whether SSL should be verified or not, valid values are yes/no, true/false, 1/0", default=True, metavar="yes/no")
parser.add_option("--overwrite", dest="overwrite", default=False, help="Command line options will overwrite same settings in config file", action="store_true")
(options, args) = parser.parse_args()
logger = logging.setupLogger(options.logging)
parser.check_cmd_options( options )
required_items = ['crl_id', 'crl', 'host', 'username', 'password']
options_cmdline = vars(options).copy()
del options_cmdline['config']
del options_cmdline['overwrite']
configFile = ConfigParser()
configFile.read(options.config)
api = PfSenseAPI()
for section in co | nfigFile.sections():
logger.info("Working on %s" % section)
parsed_options = parser.parse_individual_options(configFile.items(section), options_cmdline, overw | rite = options.overwrite, bool_keys = ['ssl_verification'])
required_items_missed = False
missed_items = parser.check_required_options(parsed_options, required_items)
for item in missed_items:
logger.error('%s is reqired for entry %s' % ( item, section))
required_items_missed = True
if required_items_missed:
continue
if not os.path.isfile(parsed_options['crl']):
logger.error('CRL file %s does not exist?' % parsed_options['crl'])
continue
try:
crlFile = open(parsed_options['crl'], 'r')
crlData = crlFile.read()
crlFile.close()
except:
logger.error("Error while read CRL data from file %s" % parsed_options['crl'])
continue
api['options'] = parsed_options
api.login()
(rc, data, contentType) = api.call( '/system_crlmanager.php', 'POST',
apiData = {
'method': 'existing',
'descr': '%s (last refresh: %s)' % (options.name, datetime.now().isoformat()),
'crltext': crlData,
'submit': 'Save'
},
itemData = {
'id': parsed_options['crl_id'],
'act': 'editimported'
})
api.logout()
if rc == 302:
logger.info('CRL Update successful for %s' % (section))
else:
logger.info('CRL Update failed for %s' % ( section))
|
import multiprocessing
from setuptools import setup, find_packages
from ber_kit import __version__
setup(name='ber-kit',
version= __version__,
description='Toolkit to manage rolling upgrades on a Marathon cluster',
classifiers=[
'Development Status :: 3 - A | lpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Topic :: System :: Systems Administration',
],
keywords='marathon',
url='http://bitbucket.org/connectedsolutions/ber-kit',
author='Charles Rice, Cake Solutions',
author_email='devops@cakesolutions.net',
license='GNU GPLv3 | ',
packages=find_packages(),
include_package_data=True,
install_requires = [
'marathon>=0.8.6',
],
entry_points = {
'console_scripts': [
'ber-kit=ber_kit.main:main',
],
},
test_suite='nose.collector',
tests_require=[
'nose',
'mock',
'coverage',
],
zip_safe=False)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys, traceback
import threading
import time
import simplejson as json
import urllib2
from PyQt4 import QtGui,QtCore
from boardlet import Boardlet
from modellet import Modellet
class MoneroTicker(Boardlet):
def __init__(self, parent, btcusd):
super(MoneroTicker, self).__init__(parent)
self.p_model = Monero( btcusd )
self.initUI()
def initUI(self):
super(MoneroTicker, self).initUI()
self.p_icon = QtGui.QLabel(self)
self.p_icon.setGeometry( self.b_imgx(), self.b_imgy(),
self.b_iconwidth(),self.b_iconheight() )
self.p_icon.setPixmap( QtGui.QPixmap(os.getcwd() + "/img/xmr.png" ) )
t = threading.Thread(target=self.periodicUpdate)
t.setDaemon(True)
t.start()
def paintEvent(self, e):
super(MoneroTicker, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.setPen( self.p_grayPen )
qp.setFont( self.p_pairFont )
qp.drawText( self.b_col1x(), self.b_row1y(), 'Bittrex XMRUSD' )
qp.setPen( self.p_whitePen )
qp.setFont( self.p_normFont )
qp.drawText( self.b_col1x(), self.b_row2y() - 5,
'bid: ' + "{:06.2f}".format(self.p_model.getBestBid()) )
qp.drawText( self.b_col1x(), self.b_row3y() - 5,
'ask: ' + "{:06.2f}".format(self.p_model.getBestAsk()) )
qp.setFont( self.p_timeFont )
qp.setPen( self.p_grayPen )
qp.drawText( self.b_imgx(), self.b_row4y(),
'Refreshed: ' + self.p_model.getLastUpdated() )
qp.end()
def periodicUpdate(self):
while(True):
st = self.getNextWaitTimeSeconds()
time.sleep( st )
self.p_model.doRefresh()
class Monero(Modellet):
def __init__(self, btcusd):
self.p_btcusd = btcusd
self.p_refreshTime = None
self.p_bestBid = '000.00'
self.p_bestAsk | = '000.00'
def getBestBid(self):
return float(self.p_bestBid) * float(self.p_btcusd.p_model.getBid())
def getBestAsk(self):
return float(self.p_bestAsk) * float(self.p_btcusd.p_model.getAsk())
def doRefresh(self):
headers = {'User-agent' : 'Mozilla/5.0'}
req = urllib2.Request( 'https://bittrex.com/api/v1.1/public/getticker?market=BTC-XMR', None, headers )
try:
resp = urllib2.urlopen(req).read()
self.p_bestBid = str( json.l | oads(resp)['result']['Bid'] )
self.p_bestAsk = str( json.loads(resp)['result']['Ask'] )
super(Monero, self).setFaultFlag(False)
super(Monero, self).setLastUpdatedNow()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print ''.join('!! ' + line for line in lines)
super(Monero, self).setFaultFlag(True)
|
"""Benchmark Walk algorithm"""
import numpy as np
import bench
import obsoper.walk
class BenchmarkWalk(bench.Suite):
| def setUp(self):
longitudes, latitudes = np.meshgrid([1, 2, 3 | ],
[1, 2, 3],
indexing="ij")
self.fixture = obsoper.walk.Walk.from_lonlats(longitudes,
latitudes)
def bench_detect(self):
for _ in range(10):
self.fixture.detect((2.9, 2.9), i=0, j=0)
|
from boxbranding import getBoxType, getMachineProcModel, getMachineBuild
from os import path
from enigma import eDVBResourceManager, Misc_Options
from Tools.Directories import fileExists, fileCheck
from Tools.HardwareInfo import HardwareInfo
SystemInfo = { }
#FIXMEE...
def getNumVideoDecoders():
idx = 0
while fileExists("/dev/dvb/adapter0/video%d"% idx, 'f'):
idx += 1
return idx
SystemInfo["NumVideoDecoders"] = getNumVideoDecoders()
SystemInfo["PIPAvailable"] = SystemInfo["NumVideoDecoders"] > 1
SystemInfo["CanMeasureFrontendInputPower"] = eDVBResourceManager.getInstance().canMeasureFrontendInputPower()
def countFrontpanelLEDs():
leds = 0
if fileExists("/proc/stb/fp/led_set_pattern"):
leds += 1
while fileExists("/proc/stb/fp/led%d_pattern" % leds):
leds += 1
return leds
SystemInfo["12V_Output"] = Misc_Options.getInstance().detected_12V_output()
SystemInfo["ZapMode"] = fileCheck("/proc/stb/video/zapmode") or fileCheck("/proc/stb/video/zapping_mode")
SystemInfo["NumFrontpanelLEDs"] = countFrontpanelLEDs()
SystemInfo["FrontpanelDisplay"] = fileExists("/dev/dbox/oled0") or fileExists("/dev/dbox/lcd0")
SystemInfo["OledDisplay"] = fileExists("/dev/dbox/oled0") or getBoxType() in ('osminiplus')
SystemInfo["LcdDisplay"] = fileExists("/dev/dbox/lcd0")
SystemInfo["FBLCDDi | splay"] = fileCheck("/proc/stb/fb/sd_detach")
SystemInfo["VfdDisplay"] = getBoxType() not in ('vuultimo', 'xpeedlx3', 'et10000', 'mutant2400', 'quadbox2400', 'atemionemesis') and fileExists("/dev/dbox/oled0")
SystemInfo["DeepstandbySupport"] = HardwareInfo().has_deepstandby()
SystemInfo["Fan"] = fileCheck("/proc/stb/fp/fan")
SystemInfo["FanPWM"] = SystemInfo[" | Fan"] and fileCheck("/proc/stb/fp/fan_pwm")
SystemInfo["StandbyPowerLed"] = fileExists("/proc/stb/power/standbyled")
if getBoxType() in ('gbquad', 'gbquadplus','gb800ueplus', 'gb800seplus', 'gbipbox'):
SystemInfo["WOL"] = False
else:
SystemInfo["WOL"] = fileCheck("/proc/stb/power/wol") or fileCheck("/proc/stb/fp/wol")
SystemInfo["HDMICEC"] = (fileExists("/dev/hdmi_cec") or fileExists("/dev/misc/hdmi_cec0")) and fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/HdmiCEC/plugin.pyo")
SystemInfo["SABSetup"] = fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/SABnzbd/plugin.pyo")
SystemInfo["SeekStatePlay"] = False
SystemInfo["GraphicLCD"] = getBoxType() in ('vuultimo', 'xpeedlx3', 'et10000', 'mutant2400', 'quadbox2400', 'atemionemesis')
SystemInfo["Blindscan"] = fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/Blindscan/plugin.pyo")
SystemInfo["Satfinder"] = fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/Satfinder/plugin.pyo")
SystemInfo["HasExternalPIP"] = getMachineBuild() not in ('et9x00', 'et6x00', 'et5x00') and fileCheck("/proc/stb/vmpeg/1/external")
SystemInfo["hasPIPVisibleProc"] = fileCheck("/proc/stb/vmpeg/1/visible")
SystemInfo["VideoDestinationConfigurable"] = fileExists("/proc/stb/vmpeg/0/dst_left")
SystemInfo["GBWOL"] = fileExists("/usr/bin/gigablue_wol")
SystemInfo["LCDSKINSetup"] = path.exists("/usr/share/enigma2/display")
SystemInfo["CIHelper"] = fileExists("/usr/bin/cihelper")
SystemInfo["isGBIPBOX"] = fileExists("/usr/lib/enigma2/python/gbipbox.so")
SystemInfo["HaveMultiBoot"] = fileCheck("/boot/STARTUP") or fileCheck("/boot/STARTUP_1")
SystemInfo["HaveCISSL"] = fileCheck("/etc/ssl/certs/customer.pem") and fileCheck("/etc/ssl/certs/device.pem")
SystemInfo["LCDMiniTV"] = fileExists("/proc/stb/lcd/mode")
SystemInfo["LCDMiniTV4k"] = fileExists("/proc/stb/lcd/live_enable")
SystemInfo["LCDMiniTVPiP"] = SystemInfo["LCDMiniTV"] and getBoxType() != 'gb800ueplus'
SystemInfo["LcdLiveTV"] = fileCheck("/proc/stb/fb/sd_detach")
SystemInfo["HaveTouchSensor"] = getBoxType() in ('dm520', 'dm525', 'dm900')
SystemInfo["DefaultDisplayBrightness"] = getBoxType() == 'dm900' and 8 or 5
SystemInfo["RecoveryMode"] = fileCheck("/proc/stb/fp/boot_mode")
|
from pale.fields.string import StringField
class TimestampField(StringField):
"""A field for timestamp strings."""
value_type = 'timestamp'
# TODO - timestamp f | iel | d rendering
|
e "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.or | g/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CON | DITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Thermal100Temperature(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Thermal100Temperature - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'member_id': 'str',
'oem': 'ResourceOem',
'physical_context': 'PhysicalContext100PhysicalContext',
'related_item': 'list[Odata400IdRef]',
'related_itemodata_count': 'Odata400Count',
'related_itemodata_navigation_link': 'Odata400IdRef',
'status': 'ResourceStatus'
}
self.attribute_map = {
'member_id': 'MemberId',
'oem': 'Oem',
'physical_context': 'PhysicalContext',
'related_item': 'RelatedItem',
'related_itemodata_count': 'RelatedItem@odata.count',
'related_itemodata_navigation_link': 'RelatedItem@odata.navigationLink',
'status': 'Status'
}
self._member_id = None
self._oem = None
self._physical_context = None
self._related_item = None
self._related_itemodata_count = None
self._related_itemodata_navigation_link = None
self._status = None
@property
def member_id(self):
"""
Gets the member_id of this Thermal100Temperature.
This is the identifier for the member within the collection.
:return: The member_id of this Thermal100Temperature.
:rtype: str
"""
return self._member_id
@member_id.setter
def member_id(self, member_id):
"""
Sets the member_id of this Thermal100Temperature.
This is the identifier for the member within the collection.
:param member_id: The member_id of this Thermal100Temperature.
:type: str
"""
self._member_id = member_id
@property
def oem(self):
"""
Gets the oem of this Thermal100Temperature.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this Thermal100Temperature.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Thermal100Temperature.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this Thermal100Temperature.
:type: ResourceOem
"""
self._oem = oem
@property
def physical_context(self):
"""
Gets the physical_context of this Thermal100Temperature.
Describes the area or device to which this temperature measurement applies.
:return: The physical_context of this Thermal100Temperature.
:rtype: PhysicalContext100PhysicalContext
"""
return self._physical_context
@physical_context.setter
def physical_context(self, physical_context):
"""
Sets the physical_context of this Thermal100Temperature.
Describes the area or device to which this temperature measurement applies.
:param physical_context: The physical_context of this Thermal100Temperature.
:type: PhysicalContext100PhysicalContext
"""
self._physical_context = physical_context
@property
def related_item(self):
"""
Gets the related_item of this Thermal100Temperature.
Describes the areas or devices to which this temperature measurement applies.
:return: The related_item of this Thermal100Temperature.
:rtype: list[Odata400IdRef]
"""
return self._related_item
@related_item.setter
def related_item(self, related_item):
"""
Sets the related_item of this Thermal100Temperature.
Describes the areas or devices to which this temperature measurement applies.
:param related_item: The related_item of this Thermal100Temperature.
:type: list[Odata400IdRef]
"""
self._related_item = related_item
@property
def related_itemodata_count(self):
"""
Gets the related_itemodata_count of this Thermal100Temperature.
:return: The related_itemodata_count of this Thermal100Temperature.
:rtype: Odata400Count
"""
return self._related_itemodata_count
@related_itemodata_count.setter
def related_itemodata_count(self, related_itemodata_count):
"""
Sets the related_itemodata_count of this Thermal100Temperature.
:param related_itemodata_count: The related_itemodata_count of this Thermal100Temperature.
:type: Odata400Count
"""
self._related_itemodata_count = related_itemodata_count
@property
def related_itemodata_navigation_link(self):
"""
Gets the related_itemodata_navigation_link of this Thermal100Temperature.
:return: The related_itemodata_navigation_link of this Thermal100Temperature.
:rtype: Odata400IdRef
"""
return self._related_itemodata_navigation_link
@related_itemodata_navigation_link.setter
def related_itemodata_navigation_link(self, related_itemodata_navigation_link):
"""
Sets the related_itemodata_navigation_link of this Thermal100Temperature.
:param related_itemodata_navigation_link: The related_itemodata_navigation_link of this Thermal100Temperature.
:type: Odata400IdRef
"""
self._related_itemodata_navigation_link = related_itemodata_navigation_link
@property
def status(self):
"""
Gets the status of this Thermal100Temperature.
:return: The status of this Thermal100Temperature.
:rtype: ResourceStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this Thermal100Temperature.
:param status: The status of this Thermal100Temperature.
:type: ResourceStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == ot |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language gover | nin | g permissions and limitations
# under the License.
from keystoneclient import exceptions
from heat.common import exception
from heat.common import heat_keystoneclient as hkc
from heat.engine.clients import client_plugin
from heat.engine import constraints
class KeystoneClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
service_types = [IDENTITY] = ['identity']
def _create(self):
return hkc.KeystoneClient(self.context)
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def is_over_limit(self, ex):
return isinstance(ex, exceptions.RequestEntityTooLarge)
def is_conflict(self, ex):
return isinstance(ex, exceptions.Conflict)
def get_role_id(self, role):
try:
role_obj = self.client().client.roles.get(role)
return role_obj.id
except exceptions.NotFound:
role_list = self.client().client.roles.list(name=role)
for role_obj in role_list:
if role_obj.name == role:
return role_obj.id
raise exception.EntityNotFound(entity='KeystoneRole', name=role)
def get_project_id(self, project):
try:
project_obj = self.client().client.projects.get(project)
return project_obj.id
except exceptions.NotFound:
project_list = self.client().client.projects.list(name=project)
for project_obj in project_list:
if project_obj.name == project:
return project_obj.id
raise exception.EntityNotFound(entity='KeystoneProject',
name=project)
def get_domain_id(self, domain):
try:
domain_obj = self.client().client.domains.get(domain)
return domain_obj.id
except exceptions.NotFound:
domain_list = self.client().client.domains.list(name=domain)
for domain_obj in domain_list:
if domain_obj.name == domain:
return domain_obj.id
raise exception.EntityNotFound(entity='KeystoneDomain', name=domain)
def get_group_id(self, group):
try:
group_obj = self.client().client.groups.get(group)
return group_obj.id
except exceptions.NotFound:
group_list = self.client().client.groups.list(name=group)
for group_obj in group_list:
if group_obj.name == group:
return group_obj.id
raise exception.EntityNotFound(entity='KeystoneGroup', name=group)
def get_service_id(self, service):
try:
service_obj = self.client().client.services.get(service)
return service_obj.id
except exceptions.NotFound:
service_list = self.client().client.services.list(name=service)
if len(service_list) == 1:
return service_list[0].id
elif len(service_list) > 1:
raise exception.KeystoneServiceNameConflict(service=service)
else:
raise exception.EntityNotFound(entity='KeystoneService',
name=service)
def get_user_id(self, user):
try:
user_obj = self.client().client.users.get(user)
return user_obj.id
except exceptions.NotFound:
user_list = self.client().client.users.list(name=user)
for user_obj in user_list:
if user_obj.name == user:
return user_obj.id
raise exception.EntityNotFound(entity='KeystoneUser', name=user)
class KeystoneRoleConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, role):
client.client_plugin('keystone').get_role_id(role)
class KeystoneDomainConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, domain):
client.client_plugin('keystone').get_domain_id(domain)
class KeystoneProjectConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, project):
client.client_plugin('keystone').get_project_id(project)
class KeystoneGroupConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, group):
client.client_plugin('keystone').get_group_id(group)
class KeystoneServiceConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,
exception.KeystoneServiceNameConflict,)
def validate_with_client(self, client, service):
client.client_plugin('keystone').get_service_id(service)
class KeystoneUserConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, user):
client.client_plugin('keystone').get_user_id(user)
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
de | f create(kernel):
result = Static()
result.tem | plate = "object/static/naboo/shared_waterfall_naboo_falls_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
pper for runtime errors raised by AutoGraph generated code."""
def __init__(self, op_name, op_message, custom_traceback):
self.op_name = op_name
self.op_message = op_message
self.custom_traceback = custom_traceback
super(TfRuntimeError, self).__init__()
def __str__(self):
message = '%s\n\nCaused by op %r, defined at:\n' % (self.op_message,
| self.op_name)
return message + ''.join(traceback.format_list(self.custom_traceback))
def _rewrite_frame(source_map, cleaned_traceback, stack_frame_indices):
"""Rew | rites the stack frames at the given indices using the given source map.
Args:
source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
AG generated code.
cleaned_traceback: List[Tuple[text, text, text, text]], the current
traceback.
stack_frame_indices: Iterable[Int], frame indices to possibly rewrite if
there are matching source mapping keys.
Returns:
None
"""
for frame_index in stack_frame_indices:
# (file_path, line number, function name, code)
file_path, line_number, _, _ = cleaned_traceback[frame_index]
source_map_key = CodeLocation(file_path=file_path, line_number=line_number)
found_mapping = source_map_key in source_map
if found_mapping:
cleaned_traceback[frame_index] = source_map[source_map_key].as_frame()
# TODO(znado): Make more robust to name changes in the rewriting logic.
def _remove_rewrite_frames(tb):
"""Remove stack frames containing the error rewriting logic."""
cleaned_tb = []
for f in tb:
if 'ag__.rewrite_graph_construction_error' not in f[3]:
cleaned_tb.append(f)
return cleaned_tb
def rewrite_graph_construction_error(source_map):
"""Rewrites errors raised by non-AG APIs inside AG generated code.
Meant to be called from the try/except block inside each AutoGraph generated
function. Only rewrites the traceback frames corresponding to the function
that this is called from. When we raise a GraphConstructionError at the end
it is then caught by calling functions, where they can be responsible for
rewriting their own frames.
Args:
source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
AG generated code.
Raises:
GraphConstructionError: The rewritten underlying error.
Exception: The underlying error, if it could not be rewritten.
"""
error_info = sys.exc_info()
_, original_error, e_traceback = error_info
assert original_error is not None
try:
_, _, _, func_name, _, _ = tf_inspect.stack()[1]
# The latest function call is added to the beginning of a traceback, but
# when rewriting the traceback of multiple function calls the previous
# functions' except blocks may have already rewritten their own frames so
# we want to copy over all of the previous frames. We may have rewritten
# previous frames only if the error is a GraphConstructionError.
if isinstance(original_error, GraphConstructionError):
cleaned_traceback = traceback.extract_tb(e_traceback)
previous_traceback = original_error.custom_traceback
cleaned_traceback = [cleaned_traceback[0]] + previous_traceback
else:
cleaned_traceback = traceback.extract_tb(e_traceback)
cleaned_traceback = _remove_rewrite_frames(cleaned_traceback)
current_frame_indices = []
# This code is meant to be called from the try/except block that wraps a
# function body. Here we look for all frames that came from the function
# that this wraps, look for any matching line numbers in the source
# mapping, and then rewrite them if matches are found.
for fi, frame in enumerate(cleaned_traceback):
_, _, frame_func_name, _ = frame
if frame_func_name == func_name:
current_frame_indices.append(fi)
break
if current_frame_indices:
_rewrite_frame(source_map, cleaned_traceback, current_frame_indices)
if isinstance(original_error, GraphConstructionError):
original_error.custom_traceback = cleaned_traceback
new_error = original_error
else:
new_error = GraphConstructionError(original_error, cleaned_traceback)
except Exception:
logging.exception('Error while rewriting AutoGraph error:')
raise original_error
else:
raise new_error
finally:
# Addresses warning https://docs.python.org/2/library/sys.html#sys.exc_info.
del e_traceback
def rewrite_tf_runtime_error(error, source_map):
"""Rewrites TensorFlow runtime errors raised by ops created in AG code.
Args:
error: error_impl.OpError, an TensorFlow error that will have its traceback
rewritten.
source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
AG generated code.
Returns:
A TfRuntimeError with a traceback rewritten according to the given
source mapping.
"""
# Check for cases where we leave a user method and re-enter it in the
# traceback. This is done by looking at the function names when the
# filenames are from any files the user code is in. If we find a case where
# we return to a user method after leaving it then we cut out the frames in
# between because we assume this means these in between frames are from
# internal AutoGraph code that shouldn't be included.
#
# An example of this is:
#
# File "file1.py", line 57, in my_func
# ...
# File "control_flow_ops.py", line 231, in cond
# ...
# File "control_flow_ops.py", line 1039, in inner_cond
# ...
# File "file1.py", line 68, in my_func
# ...
#
# Where we would remove the control_flow_ops.py frames because we re-enter
# my_func in file1.py.
#
# The source map keys are (file_path, line_number) so get the set of all user
# file_paths.
try:
all_user_files = set(k.file_path for k in source_map)
cleaned_traceback = []
last_user_frame_index = None
last_user_user_file_path = None
last_user_user_fn_name = None
for fi, frame in enumerate(error.op.traceback):
frame_file_path, frame_line_number, _, _ = frame
src_map_key = CodeLocation(
file_path=frame_file_path, line_number=frame_line_number)
if frame_file_path in all_user_files:
if src_map_key in source_map:
original_fn_name = source_map[src_map_key].function_name
if (last_user_frame_index is not None and
last_user_user_file_path == frame_file_path):
if last_user_user_fn_name == original_fn_name:
cleaned_traceback = cleaned_traceback[:last_user_frame_index]
else:
cleaned_traceback = cleaned_traceback[:last_user_frame_index + 1]
last_user_user_fn_name = original_fn_name
else:
last_user_user_fn_name = None
last_user_frame_index = fi
last_user_user_file_path = frame_file_path
cleaned_traceback.append(frame)
for fi in range(len(cleaned_traceback)):
_rewrite_frame(source_map, cleaned_traceback, [fi])
op_name = error.op.name
op_message = error.message
rewritten_error = TfRuntimeError(op_name, op_message, cleaned_traceback)
return rewritten_error
except Exception: # pylint: disable=broad-except
logging.exception('Error while rewriting AutoGraph error:')
return error
# TODO(znado): Add arg to enable different levels of error rewriting.
@contextlib.contextmanager
def improved_errors(converted_function):
"""Context manager that rewrites runtime errors.
This context manager will rewrite runtime errors so that their traceback
is relative to the original code before conversion.
Use with the output of to_graph, and wrap the execution of respective ops.
Example:
converted_my_func = ag.to_graph(my_func)
ops = converted_my_func(...)
with ag.improved_errors(converted_my_func):
sess.run(ops)
Args:
converted_function: Callable[..., Any], the output of a to_graph call
Yields:
None
Raises:
TfRuntimeError: if any OpError originates in the converted code, it will
be wrapped into a TfRuntimeError
ValueError: If converted_func |
# Copyright (c) 2011-2013, ImageCat Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your op | tion) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
XML helper functions
"""
def get_node_attrib(node, attrib_name):
try:
return node.attrib[attrib_name]
except:
return '' |
#!/usr/bin/env python
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
# Read X.509 CRL on stdin, print them pretty and encode back into
# original wire format.
# CRL can be generated with "openssl openssl ca -gencrl ..." commands.
#
import sys
from pyasn1.codec.der import decoder
from pyasn1.codec.der import encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc2459
if | len(sys.argv) != 1:
print("""Usage:
$ cat crl.pem | %s""" % sys.argv[0])
sys.exit(-1)
asn1Spec = rfc2459.CertificateList()
cnt = 0
while True:
idx, substrate = pem.readPemBlocksFromFile(sys.stdin, ('-----BEGIN X509 CRL-----', '-----END X509 CRL-----'))
if not substrate:
break
key, | rest = decoder.decode(substrate, asn1Spec=asn1Spec)
if rest:
substrate = substrate[:-len(rest)]
print(key.prettyPrint())
assert encoder.encode(key) == substrate, 'pkcs8 recode fails'
cnt += 1
print('*** %s CRL(s) re/serialized' % cnt)
|
# Copyright (C) 2008-2014 Bastian Klein | eidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, o | r
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
HTML utils
"""
|
s np
from monty.json import jsaniti | ze
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.util.plotting import pretty_plot
from pymatgen.electronic_structure.plotter import plot_brillouin_zone
"""
This module implements plotter for DOS and band structure.
"""
logger = logging.getLogger(__name__)
class PhononDosPlotter(object):
"""
Class for plotting phonon DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typi | cal usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = PhononDosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompletePhononDos.get_element_dos().
Args:
stack: Whether to plot the DOS as a stacked area graph
key_sort_func: function used to sort the dos_dict keys.
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
def __init__(self, stack=False, sigma=None):
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
PhononDos object
"""
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
self._doses[label] = {'frequencies': dos.frequencies, 'densities': densities}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'frequencies':..,
'densities': ...}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
import prettyplotlib as ppl
from prettyplotlib import brewer2mpl
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
colors = brewer2mpl.get_map('Set1', 'qualitative', ncolors).mpl_colors
y = None
alldensities = []
allfrequencies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of frequencies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
frequencies = dos['frequencies']
densities = dos['densities']
if y is None:
y = np.zeros(frequencies.shape)
if self.stack:
y += densities
newdens = y.copy()
else:
newdens = densities
allfrequencies.append(frequencies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allfrequencies.reverse()
allpts = []
for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)):
allpts.extend(list(zip(frequencies, densities)))
if self.stack:
plt.fill(frequencies, densities, color=colors[i % ncolors],
label=str(key))
else:
ppl.plot(frequencies, densities, color=colors[i % ncolors],
label=str(key), linewidth=3)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel('Frequencies (THz)')
plt.ylabel('Density of states')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.show()
class PhononBSPlotter(object):
"""
Class to plot or get data to facilitate the plot of band structure objects.
Args:
bs: A BandStructureSymmLine object.
"""
def __init__(self, bs):
if not isinstance(bs, PhononBandStructureSymmLine):
raise ValueError(
"PhononBSPlotter only works with PhononBandStructureSymmLine objects. "
"A PhononBandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
self._nb_bands = self._bs.nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if ticks['label'][i] == ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
|
import click
import inflection
import os
def default(txt):
return click.style(txt, fg="white", bold=True)
def prompt(txt):
return click.style(txt, fg="green")
@click.command()
@click.argument("name")
@click.option("--description", prompt=prompt("Description"), default=default("N/A | "))
@click.option(
"--author",
prompt=prompt("Author name"),
default=lambda: default(os.environ.get("USER", "")),
)
@click.option(
"--email",
prompt=prompt("Author email"),
default=lambda: default(os.environ.get("USER", "") + "@me.com"),
)
@click.option("--version", prompt=prompt("Version"), default=default("0 | .0.1"))
@click.option(
"--django-version", prompt=prompt("Django version"), default=default("1.10")
)
def get_context(name, description, author, email, version, django_version):
name = click.unstyle(name)
description = click.unstyle(description)
email = click.unstyle(email)
author = click.unstyle(author)
version = click.unstyle(version)
django_version = click.unstyle(django_version)
return {
"app": inflection.underscore(name),
"description": description,
"author": author,
"email": email,
"version": version,
"django_version": django_version,
}
|
"""Tests for the nut integration."""
import json
from unittest.mock import MagicMock, patch
from homeassistant.components.nut.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_RESOURCES
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fix | ture
def _get_mock_pynutclient(list_vars=None, list_ups=None):
pynutclient = MagicMock()
type(pynutclient).list_ups = MagicMock(return_value=list_ups)
type(pynutcl | ient).list_vars = MagicMock(return_value=list_vars)
return pynutclient
async def async_init_integration(
hass: HomeAssistant, ups_fixture: str, resources: list, add_options: bool = False
) -> MockConfigEntry:
"""Set up the nexia integration in Home Assistant."""
ups_fixture = f"nut/{ups_fixture}.json"
list_vars = json.loads(load_fixture(ups_fixture))
mock_pynut = _get_mock_pynutclient(list_ups={"ups1": "UPS 1"}, list_vars=list_vars)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "mock", CONF_PORT: "mock", CONF_RESOURCES: resources},
options={CONF_RESOURCES: resources} if add_options else {},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
* fudgeFactor['nni']
self.proposals.append(p)
#object.__setattr__(self.tuningsUsage, 'local', p)
if self.prob.polytomy:
p = STProposal(self)
p.name = 'polytomy'
p.weight = self.prob.polytomy
self.proposals.append(p)
if not self.proposals:
gm.append("No proposals?")
raise Glitch, gm
self.propWeights = []
for p in self.proposals:
self.propWeights.append(p.weight)
self.cumPropWeights = [self.propWeights[0]]
for i in range(len(self.propWeights))[1:]:
self.cumPropWeights.append(self.cumPropWeights[i - 1] + self.propWeights[i])
self.totalPropWeights = sum(self.propWeights)
if self.totalPropWeights < 1e-9:
gm.append("No proposal weights?")
raise Glitch, gm
for p in self.proposals:
self.proposalsHash[p.name] = p
def _refreshProposalProbsAndTunings(self):
"""Adjust proposals after a restart."""
gm = ['STMcmc._refreshProposalProbsAndTunings()']
for p in self.proposals:
# nni
if p.name == 'nni':
#p.weight = self.prob.local * (len(self.tree.nodes) - 1) * fudgeFactor['local']
p.weight = self.prob.nni
self.propWeights = []
for p in self.proposals:
self.propWeights.append(p.weight)
self.cumPropWeights = [self.propWeights[0]]
for i in range(len(self.propWeights))[1:]:
self.cumPropWeights.append(self.cumPropWeights[i - 1] + self.propWeights[i])
self.totalPropWeights = sum(self.propWeights)
if self.totalPropWeights < 1e-9:
gm.append("No proposal weights?")
raise Glitch, gm
def writeProposalAcceptances(self):
"""Pretty-print the proposal acceptances."""
if (self.gen - self.startMinusOne) <= 0:
print "\nSTMcmc.writeProposalAcceptances() There is no info in memory. "
print " Maybe it was just emptied after writing to a checkpoint? "
print "If so, read the checkPoint and get the proposalAcceptances from there."
else:
spacer = ' ' * 8
print "\nProposal acceptances, run %i, for %i gens, from gens %i to %i, inclusive." % (
self.runNum, (self.gen - self.startMinusOne), self.startMinusOne + 1, self.gen)
print "%s %20s %10s %13s%8s" % (spacer, 'proposal', 'nProposals', 'acceptance(%)', 'tuning')
for p in self.proposals:
print "%s" % spacer,
print "%20s" % p.name,
print "%10i" % p.nProposals[0],
if p.nProposals[0]: # Don't divide by zero
print " %5.1f " % (100.0 * float(p.nAcceptances[0]) / float(p.nProposals[0])),
else:
print " - ",
if p.tuning == None:
print " -",
elif p.tuning < 2.0:
print " %5.3f" % p.tuning,
else:
print "%7.1f" % p.tuning,
print
# # Tabulate topology changes, if any were attempted.
# doTopol = 0
# p = None
# try:
# p = self.proposalsHash['local']
# except KeyError:
# pass
# if p:
# for tNum in range(self.nChains):
# if p.nTopologyChangeAttempts[tNum]:
# doTopol = 1
# break
# if doTopol:
# p = self.proposalsHash['local']
# print "'Local' proposal-- attempted topology changes"
# print "%s tempNum nProps nAccepts percent nTopolChangeAttempts nTopolChanges percent" % spacer
# for tNum in range(self.nChains):
# print "%s" % spacer,
# print "%4i " % tNum,
# print "%9i" % p.nProposals[tNum],
# print "%8i" % p.nAcceptances[tNum],
# print " %5.1f" % (100.0 * float(p.nAcceptances[tNum]) / float(p.nProposals[tNum])),
# print "%20i" % p.nTopologyChangeAttempts[tNum],
# print "%13i" % p.nTopologyChanges[tNum],
# print " %5.1f" % (100.0 * float(p.nTopologyChanges[tNum])/float(p.nTopologyChangeAttempts[tNum]))
# else:
# print "%sFor the 'local' proposals, there were no attempted" % spacer
# print "%stopology changes in any of the chains." % spacer
# Check for aborts.
# p = None
# try:
# p = self.proposalsHash['local']
# except KeyError:
# pass
# if p:
# if hasattr(p, 'nAborts'):
# if p.nAborts[0]:
# print "The 'local' proposal had %i aborts." % p.nAborts[0]
# print "(Aborts might be due to brLen proposals too big or too small)"
# if self.constraints:
# print "(Or, more likely, due to violated constraints.)"
# else:
# print "The 'local' proposal had no aborts (either due to brLen proposals"
# print "too big or too small, or due to violated constraints)."
# for pN in ['polytomy', 'compLocation', 'rMatrixLocation', 'gdasrvLocation']:
# p = None
# try:
# p = self.proposalsHash[pN]
# except KeyError:
# pass
# if p:
# if hasattr(p, 'nAborts'):
# p | rint "The %15s proposal had %5i aborts." % (p.name, p.nAborts[0])
def writeSwapMatrix(self):
print "\nChain swapping, for %i gens, from gens %i to %i, inclusive." % (
(self.gen - self. | startMinusOne), self.startMinusOne + 1, self.gen)
print " Swaps are presented as a square matrix, nChains * nChains."
print " Upper triangle is the number of swaps proposed between two chains."
print " Lower triangle is the percent swaps accepted."
print " The current tunings.chainTemp is %5.3f\n" % self.tunings.chainTemp
print " " * 10,
for i in range(self.nChains):
print "%7i" % i,
print
print " " * 10,
for i in range(self.nChains):
print " ----",
print
for i in range(self.nChains):
print " " * 7, "%2i" % i,
for j in range(self.nChains):
if i < j: # upper triangle
print "%7i" % self.swapMatrix[i][j],
elif i == j:
print " -",
else:
if self.swapMatrix[j][i] == 0: # no proposals
print " -",
else:
print " %5.1f" % (100.0 * float(self.swapMatrix[i][j]) / float(self.swapMatrix[j][i])),
print
def _makeChainsAndProposals(self):
"""Make chains and proposals."""
gm = ['STMcmc._makeChainsAndProposals()']
#random.seed(0)
# Make chains, if needed
if not self.chains:
self.chains = []
for chNum in range(self.nChains):
aChain = STChain(self)
aChain.tempNum = chNum # Temperature. Set this way to start, but it changes.
self.chains.append(aChain)
if not self.proposals:
self._makeProposals()
# If we are going to be doing the resolution class prior
# in the polytomy move, we want to pre-compute the logs of
# T_{n,m}. Its a vector with indices (ie m) from zero to
# nTax-2 inclusive.
# if self.proposalsHash.has_ke |
from pyotp.otp import OTP
from pyotp import utils
class HOTP(OTP):
def at(self, count | ):
"""
Generates the OTP for the given count
@param [Integer] count counter
@returns [Integer] OTP
"""
return self.generate_otp(count)
def v | erify(self, otp, counter):
"""
Verifies the OTP passed in against the current time OTP
@param [String/Integer] otp the OTP to check against
@param [Integer] counter the counter of the OTP
"""
return unicode(otp) == unicode(self.at(counter))
def provisioning_uri(self, name, initial_count=0, issuer_name=None):
"""
Returns the provisioning URI for the OTP
This can then be encoded in a QR Code and used
to provision the Google Authenticator app
@param [String] name of the account
@param [Integer] initial_count starting counter value, defaults to 0
@param [String] the name of the OTP issuer; this will be the
organization title of the OTP entry in Authenticator
@return [String] provisioning uri
"""
return utils.build_uri(
self.secret,
name,
initial_count=initial_count,
issuer_name=issuer_name,
)
|
e, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
""" drizzled.py: code to allow a serverManager
to provision and start up a drizzled server object
for test execution
"""
# imports
import os
from lib.server_mgmt.server import Server
class dri | zzleServer(Server):
""" represents a drizzle server, its possessions
(datadir, ports, etc), and methods for controlling
and querying it
TODO: create a base server class that contains
standard methods from which we can inherit
Currently there are definitely methods / attr
which are general
"""
def __init__( self, name, server_manager, code_tree, default_storage_engine
, server_options, requester, test_executor, workdir_root):
super(drizzleServer, self).__init__( name
, server_manager
, code_tree
, default_storage_engine
, server_options
, requester
, test_executor
, workdir_root)
self.preferred_base_port = 9306
# client files
self.drizzledump = self.code_tree.drizzledump
self.drizzle_client = self.code_tree.drizzle_client
self.drizzleimport = self.code_tree.drizzleimport
self.drizzleslap = self.code_tree.drizzleslap
self.server_path = self.code_tree.drizzle_server
self.drizzle_client_path = self.code_tree.drizzle_client
self.schemawriter = self.code_tree.schemawriter
self.trx_reader = self.code_tree.trx_reader
# Get our ports
self.port_block = self.system_manager.port_manager.get_port_block( self.name
, self.preferred_base_port
, 6 )
self.master_port = self.port_block[0]
self.drizzle_tcp_port = self.port_block[1]
self.mc_port = self.port_block[2]
self.pbms_port = self.port_block[3]
self.rabbitmq_node_port = self.port_block[4]
self.json_server_port = self.port_block[5]
# Generate our working directories
self.dirset = {'var_%s' %(self.name): {'std_data_ln':( os.path.join(self.code_tree.testdir,'std_data'))
,'log':None
,'run':None
,'tmp':None
,'master-data': {'local': { 'test':None
, 'mysql':None
}
}
}
}
self.workdir = self.system_manager.create_dirset( workdir_root
, self.dirset)
self.vardir = self.workdir
self.tmpdir = os.path.join(self.vardir,'tmp')
self.rundir = os.path.join(self.vardir,'run')
self.logdir = os.path.join(self.vardir,'log')
self.datadir = os.path.join(self.vardir,'master-data')
self.error_log = os.path.join(self.logdir,'error.log')
self.pid_file = os.path.join(self.rundir,('%s.pid' %(self.name)))
self.socket_file = os.path.join(self.vardir, ('%s.sock' %(self.name)))
if len(self.socket_file) > 107:
# MySQL has a limitation of 107 characters for socket file path
# we copy the mtr workaround of creating one in /tmp
self.logging.verbose("Default socket file path: %s" %(self.socket_file))
self.socket_file = "/tmp/%s_%s.%s.sock" %(self.system_manager.uuid
,self.owner
,self.name)
self.logging.verbose("Changing to alternate: %s" %(self.socket_file))
self.timer_file = os.path.join(self.logdir,('timer'))
# Do magic to create a config file for use with the slave
# plugin
self.slave_config_file = os.path.join(self.logdir,'slave.cnf')
self.create_slave_config_file()
self.snapshot_path = os.path.join(self.tmpdir,('snapshot_%s' %(self.master_port)))
# We want to use --secure-file-priv = $vardir by default
# but there are times / tools when we need to shut this off
if self.no_secure_file_priv:
self.secure_file_string = ''
else:
self.secure_file_string = "--secure-file-priv='%s'" %(self.vardir)
self.user_string = '--user=root'
self.initialize_databases()
self.take_db_snapshot()
self.logging.debug_class(self)
def report(self):
""" We print out some general useful info """
report_values = [ 'name'
, 'master_port'
, 'drizzle_tcp_port'
, 'mc_port'
, 'pbms_port'
, 'rabbitmq_node_port'
, 'vardir'
, 'status'
]
self.logging.info("%s server:" %(self.owner))
for key in report_values:
value = vars(self)[key]
self.logging.info("%s: %s" %(key.upper(), value))
def get_start_cmd(self):
""" Return the command string that will start up the server
as desired / intended
"""
server_args = [ self.process_server_options()
, "--mysql-protocol.port=%d" %(self.master_port)
, "--mysql-protocol.connect-timeout=60"
, "--innodb.data-file-path=ibdata1:20M:autoextend"
, "--sort-buffer-size=256K"
, "--max-heap-table-size=1M"
, "--mysql-unix-socket-protocol.path=%s" %(self.socket_file)
, "--pid-file=%s" %(self.pid_file)
, "--drizzle-protocol.port=%d" %(self.drizzle_tcp_port)
, "--default-storage-engine=%s" %(self.default_storage_engine)
, "--datadir=%s" %(self.datadir)
, "--tmpdir=%s" %(self.tmpdir)
, self.secure_file_string
, self.user_string
]
if self.gdb:
server_args.append('--gdb')
return self.system_manager.handle_gdb_reqs(self, server_args)
else:
return "%s %s %s & " % ( self.cmd_prefix
, self.server_path
, " ".join(server_args)
)
def get_stop_cmd(self):
""" Return the command that will shut us down """
return "%s --user=root --port=%d --connect-timeout=5 --silent --password= --shutdown " %(self.drizzle_client_path, self.master_port)
def get_ping_cmd(self):
"""Return the command string that will
ping / check if the server is alive
"""
return "%s --ping --port=%d --user=root" % (self.drizzle_client_path, self.master_port)
def is_started(self):
""" Determine if the server is up and running -
this may vary from server type to server type
"""
# We experiment with waiting for a pid file |
e Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a layer of abstraction for the issue tracker API."""
import json
import logging
from apiclient import discovery
from apiclient import errors
_DISCOVERY_URI = ('https://monorail-prod.appspot.com'
'/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest')
STATUS_DUPLICATE = 'Duplicate'
class IssueTrackerService(object):
"""Class for updating bug issues."""
def __init__(self, http):
"""Initializes an object for adding and updating bugs on the issue tracker.
This object can be re-used to make multiple requests without calling
apliclient.discovery.build multiple times.
This class makes requests to the Monorail API.
API explorer: https://goo.gl/xWd0dX
Args:
http: A Http object that requests will be made through; this should be an
Http object that's already authenticated via OAuth2.
"""
self._service = discovery.build(
'monorail', 'v1', discoveryServiceUrl=_DISCOVERY_URI, http=http)
def AddBugComment(self, bug_id, comment, status=None, cc_list=None,
merge_issue=None, labels=None, owner=None, send_email=True):
"""Adds a comment with the bisect results to the given bug.
Args:
bug_id: Bug ID of the issue to update.
comment: Bisect results information.
status: A string status for bug, e.g. Assigned, Duplicate, WontFix, etc.
cc_list: List of email addresses of users to add to the CC list.
merge_issue: ID of the issue to be merged into; specifying this option
implies that the status should be "Duplicate".
labels: List of labels for bug.
owner: Owner of the bug.
send_email: True to send email to bug cc list, False otherwise.
Returns:
True if successful, False otherwise.
"""
if not bug_id or bug_id < 0:
return False
body = {'content': comment}
updates = {}
# Mark issue as duplicate when relevant bug ID is found in the datastore.
# Avoid marking an issue as duplicate of itself.
if merge_issue and int(merge_issue) != bug_id:
status = STATUS_DUPLICATE
updates['mergedInto'] = merge_issue
logging.info('Bug %s marked as duplicate of %s', bug_id, merge_issue)
if status:
updates['status'] = status
if cc_list:
updates['cc'] = cc_list
if labels:
updates['labels'] = labels
if owner:
updates['owner'] = owner
body['updates'] = updates
return self._MakeCommentRequest(bug_id, body, send_email=send_email)
def List(self, **kwargs):
"""Makes a request to the issue tracker to list bugs."""
request = self._service.issues().list(projectId='chromium', **kwargs)
return self._ExecuteRequest(request)
def GetIssue(self, issue_id):
"""Makes a request to the issue tracker to get an issue."""
request = self._service.issues().get(projectId='chromium', issueId=issue_id)
return self._ExecuteRequest(request)
def _Mak | eCommentRequest(self, bug_id, body, retry=True, send_email=False):
"""Makes a request to the issue tracker to update a bug.
Args:
bug_id: Bug ID of the issue.
body: Dict of comment parameters.
retry: True to retry on failure, False otherwise.
send_email: True to send email to bug cc list, False otherwise.
Returns:
True if successful poste | d a comment or issue was deleted. False if
making a comment failed unexpectedly.
"""
request = self._service.issues().comments().insert(
projectId='chromium',
issueId=bug_id,
sendEmail=send_email,
body=body)
try:
if self._ExecuteRequest(request, ignore_error=False):
return True
except errors.HttpError as e:
reason = _GetErrorReason(e)
# Retry without owner if we cannot set owner to this issue.
if retry and 'The user does not exist' in reason:
_RemoveOwnerAndCC(body)
return self._MakeCommentRequest(bug_id, body, retry=False)
# This error reason is received when issue is deleted.
elif 'User is not allowed to view this issue' in reason:
logging.warning('Unable to update bug %s with body %s', bug_id, body)
return True
logging.error('Error updating bug %s with body %s', bug_id, body)
return False
def NewBug(self, title, description, labels=None, components=None,
owner=None, cc=None):
"""Creates a new bug.
Args:
title: The short title text of the bug.
description: The body text for the bug.
labels: Starting labels for the bug.
components: Starting components for the bug.
owner: Starting owner account name.
cc: CSV of email addresses to CC on the bug.
Returns:
The new bug ID if successfully created, or None.
"""
body = {
'title': title,
'summary': title,
'description': description,
'labels': labels or [],
'components': components or [],
'status': 'Assigned' if owner else 'Untriaged',
}
if owner:
body['owner'] = {'name': owner}
if cc:
body['cc'] = [{'name': account.strip()}
for account in cc.split(',') if account.strip()]
return self._MakeCreateRequest(body)
def _MakeCreateRequest(self, body):
"""Makes a request to create a new bug.
Args:
body: The request body parameter dictionary.
Returns:
A bug ID if successful, or None otherwise.
"""
request = self._service.issues().insert(
projectId='chromium',
sendEmail=True,
body=body)
logging.info('Making create issue request with body %s', body)
response = self._ExecuteRequest(request)
if response and 'id' in response:
return response['id']
logging.error('Failed to create new bug; response %s', response)
return None
def GetIssueComments(self, bug_id):
"""Gets all the comments for the given bug.
Args:
bug_id: Bug ID of the issue to update.
Returns:
A list of comments
"""
if not bug_id or bug_id < 0:
return None
response = self._MakeGetCommentsRequest(bug_id)
if not response:
return None
return [{
'author': r['author'].get('name'),
'content': r['content'],
'published': r['published']
} for r in response.get('items')]
def GetLastBugCommentsAndTimestamp(self, bug_id):
"""Gets last updated comments and timestamp in the given bug.
Args:
bug_id: Bug ID of the issue to update.
Returns:
A dictionary with last comment and timestamp, or None on failure.
"""
if not bug_id or bug_id < 0:
return None
response = self._MakeGetCommentsRequest(bug_id)
if response and all(v in response.keys()
for v in ['totalResults', 'items']):
bug_comments = response.get('items')[response.get('totalResults') - 1]
if bug_comments.get('content') and bug_comments.get('published'):
return {
'comment': bug_comments.get('content'),
'timestamp': bug_comments.get('published')
}
return None
def _MakeGetCommentsRequest(self, bug_id):
"""Makes a request to the issue tracker to get comments in the bug."""
# TODO (prasadv): By default the max number of comments retrieved in
# one request is 100. Since bisect-fyi jobs may have more then 100
# comments for now we set this maxResults count as 10000.
# Remove this max count once we find a way to clear old comments
# on FYI issues.
request = self._service.issues().comments().list(
projectId='chromium',
issueId=bug_id,
maxResults=10000)
return self._ExecuteRequest(request)
def _ExecuteRequest(self, request, ignore_error=True):
"""Makes a request to the issue tracker.
Args:
request: The request object, which has a execute method.
Returns:
The response if there was one, or else None.
"""
try:
response = request.execute()
return response
except errors.HttpError as |
item_id = 498 | 6168
user_id = 20000
item_category = 9656
time = | 31
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 19:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('books', '0003_initial_subjects_languages_creatortypes'),
]
operations = [
migrations.AlterModelOptions(
name='creatortype',
options={'ordering' | : ['name']},
),
migrations.AlterModelOptions(
name='language',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='owninginstitution',
options={'ordering': ['name'] | },
),
migrations.AlterModelOptions(
name='personbookrelationshiptype',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='publisher',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='subject',
options={'ordering': ['name']},
),
migrations.AlterUniqueTogether(
name='booklanguage',
unique_together=set([('book', 'language')]),
),
migrations.AlterUniqueTogether(
name='booksubject',
unique_together=set([('subject', 'book')]),
),
]
|
"""
SH shell
"""
import os
import os.path
import pipes
import subprocess
from rez.config import config
from rez.utils.platform_ import platform_
from rez.shells import Shell, UnixShell
from rez.rex import EscapedString
class SH(UnixShell):
norc_arg = '--noprofile'
histfile = "~/.bash_history"
histvar = "HISTFILE"
_executable = None
@property
def executable(cls):
if cls._executable is None:
cls._executable = Shell.find_executable('sh')
return cls._executable
@classmethod
def name(cls):
return 'sh'
@classmethod
def file_extension(cls):
return 'sh'
@classmethod
def get_syspaths(cls):
if not cls.syspaths:
cmd = "cmd=`which %s`; unset PATH; $cmd %s %s 'echo __PATHS_ $PATH'" \
% (cls.name(), cls.norc_arg, cls.command_arg)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
out_, err_ = p.communicate()
if p.returncode:
paths = []
else:
lines = out_.split('\n')
line = [x for x in lines if "__PATHS_" in x.split()][0]
paths = line.strip().split()[-1].split(os.pathsep)
for path in os.defpath.split(os.path.pathsep):
if path not in paths:
paths.append(path)
cls.syspaths = [x for x in paths if x]
return cls.syspaths
@classmethod
def startup_capabilities(cls, rcfile=False, norc=False, stdin=False,
command=False):
cls._unsupported_option('rcfile', rcfile)
rcfile = False
if command is not None:
cls._overruled_option('stdin', 'command', stdin)
stdin = False
return (rcfile, norc, stdin, command)
@classmethod
def get_startup_sequence(cls, rcfile, norc, stdin, command):
_, norc, stdin, command = \
cls.startup_capabilities(rcfile, norc, stdin, command)
envvar = None
files = []
if not ((command is not None) or stdin):
if not norc:
for file in ("~/.profile",):
if os.path.exists(os.path.expanduser(file)):
files.append(file)
envvar = 'ENV'
path = os.getenv(envvar)
if path and os.path.isfile(os.path.expanduser(path)):
files.append(path)
return dict(
stdin=stdin,
command=command,
do_rcfile=False,
envvar=envvar,
files=files,
bind_files=[],
source_bind_files=False)
def _bind_interactive_rez(self):
if config.set_prompt and self.settings.prompt:
self._addline('if [ -z "$REZ_STORED_PROMPT" ]; then export REZ_STORED_PROMPT=$PS1; fi')
if config.prefix_prompt:
cmd = 'export PS1="%s $REZ_STORED_PROMPT"'
else:
cmd = 'export PS1="$REZ_STORED_PROMPT" %s'
self._addline(cmd % "\[\e[1m\]$REZ_ENV_PROMPT\[\e[0m\]")
def setenv(self, key, value):
value = self.escape_string(value)
self._addline('export %s=%s' % (key, value))
def unsetenv(self, key):
self._addline("unset %s" % key)
def alias(self, key, value):
value = EscapedString.disallow(value)
cmd = 'function {key}() {{ {value} "$@"; }};export -f {key};'
self._addline(cmd.format(key=key, value=value))
def source(self, value):
value = self.escape_string(value)
self._addline('. %s' % value)
def escape_string(self, value):
value = EscapedString.promote(value)
value = value.expanduser()
result = ''
for is_literal, txt in value.strings:
if is_literal:
txt = pipes.quote | (txt)
if not txt.startswith("'"):
txt = "'%s'" % txt
else:
txt = txt.replace('\\', '\\\\')
txt = txt.replace('"', '\\"')
txt = '"%s"' % txt
result += txt
return result
def _saferefenv(self, key):
pass
def register_plugin():
if platform_.name != "windows":
ret | urn SH
|
"""
Solution to simple TensorFlow exercises
For the problems
"""
import tensorflow as tf
###############################################################################
# 1a: Create two random 0-d tensors x and y of any distribution.
# Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
# Hint: look up tf.cond()
# I do the first problem for you
###############################################################################
x = tf.random_uniform([]) # Empty array as shape creates a scalar.
y = tf.random_uniform([])
out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))
###############################################################################
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
# Return x + y if x < y, x - y if x > y, 0 otherwise.
# Hint: Look up tf.case().
###############################################################################
x = tf.random_uniform([], -1, 1, dtype=tf.float32)
y = tf.random_uniform([], -1, 1, dtype=tf.float32)
out = tf.case({tf.less(x, y): lambda: tf.add(x, y),
tf.greater(x, y): lambda: tf.subtract(x, y)},
default=lambda: tf.constant(0.0), exclusive=True)
print(x)
sess = tf.InteractiveSession()
print(sess.run(x))
###############################################################################
# 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]]
# and y as a tensor of zeros with the same shape as x.
# Return a boolean tensor that yields Trues if x equals y element-wise.
# Hint: Look up tf.equal().
###############################################################################
x = tf.constant([[0, -2, -1], [0, 1, 2]])
y = tf.zeros_like(x)
out = tf.equal(x, y)
###############################################################################
# 1d: Create the tensor x of value
# [29.05088806, 27.61298943, 31.19073486, 29.35532951,
# 30.97266006, 26.67541885, 38.08450317, 20.74983215,
# 34.94445419, 34.45999146, 29.06485367, 36.01657104,
# 27.88236427, 20.56035233, 30.20379066, 29.51215172,
# 33.71149445, 28.59134293, 36.05556488, 28.66994858].
# Get the indices of elements in x whose values are greater than 30.
# Hint: Use tf.where().
# Then extract elements whose values are greater than 30.
# Hint: Use tf.gather().
###############################################################################
x = tf.constant([29.05088806, 27.61298943, 31.19073486, 29.35532951,
30.97266006, 26.67541885, 38.08450317, 20.74983215,
34.94445419, 34.45999146, 29.06485367, 36.01657104,
27.88236427, 20.56035233, 30.20379066, 29.51215172,
33.71149445, 28.59134293, 36.05556488, 28.66994858])
indices = tf.where(x > 30)
out = tf.gather(x, indices)
###############################################################################
# 1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,
# 2, ..., 6
# Hint: Use tf.range() and tf.diag().
###############################################################################
values = tf.range(1, 7)
out = tf.diag(values)
########## | #####################################################################
# 1f: Create a random 2-d tensor of s | ize 10 x 10 from any distribution.
# Calculate its determinant.
# Hint: Look at tf.matrix_determinant().
###############################################################################
m = tf.random_normal([10, 10], mean=10, stddev=1)
out = tf.matrix_determinant(m)
###############################################################################
# 1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].
# Return the unique elements in x
# Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.
###############################################################################
x = tf.constant([5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9])
unique_values, indices = tf.unique(x)
###############################################################################
# 1h: Create two tensors x and y of shape 300 from any normal distribution,
# as long as they are from the same distribution.
# Use tf.cond() to return:
# - The mean squared error of (x - y) if the average of all elements in (x - y)
# is negative, or
# - The sum of absolute value of all elements in the tensor (x - y) otherwise.
# Hint: see the Huber loss function in the lecture slides 3.
###############################################################################
x = tf.random_normal([300], mean=5, stddev=1)
y = tf.random_normal([300], mean=5, stddev=1)
average = tf.reduce_mean(x - y)
def f1(): return tf.reduce_mean(tf.square(x - y))
def f2(): return tf.reduce_sum(tf.abs(x - y))
out = tf.cond(average < 0, f1, f2) |
e(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
res = np.divide.reduce(a, axis=0)
assert_equal(res, [8.0, 4.0, 8.0])
res = np.divide.reduce(a, axis=1)
assert_equal(res, [2.0, 8.0])
res = np.divide.reduce(a, axis=())
assert_equal(res, a)
assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
def test_reduce_zero_axis(self):
# If we have a n x m array and do a reduction with axis=1, then we are
# doing n reductions, and each reduction takes an m-element array. For
# a reduction operation without an identity, then:
# n > 0, m > 0: fine
# n = 0, m > 0: fine, doing 0 reductions of m-element arrays
# n > 0, m = 0: can't reduce a 0-element array, ValueError
# n = 0, m = 0: can't reduce a 0-element array, ValueError (for
# consistency with the above case)
# This test doesn't actually look at return values, it just checks to
# make sure that error we get an error in exactly those cases where we
# expect one, and assumes the calculations themselves are done
# correctly.
def ok(f, *args, **kwargs):
f(*args, **kwargs)
def err(f, *args, **kwargs):
assert_raises(ValueError, f, *args, **kwargs)
def t(expect, func, n, m):
expect(func, np.zeros((n, m)), axis=1)
expect(func, np.zeros((m, n)), axis=0)
expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
expect(func, np.zeros((m // 3, m // 3, m // 3,
n // 2, n // 2)),
axis=(0, 1, 2))
# Check what happens if the inner (resp. outer) dimensions are a
# mix of zero and non-zero:
expect(func, np.zeros((10, m, n)), axis=(0, 1))
expect(func, np.zeros((10, n, m)), axis=(0, 2))
expect(func, np.zeros((m, 10, n)), axis=0)
expect(func, np.zeros((10, m, n)), axis=1)
expect(func, np.zeros((10, n, m)), axis=2)
# np.maximum is just an arbitrary ufunc with no reduction identity
assert_equal(np.maximum.identity, None)
t(ok, np.maximum.reduce, 30, 30)
t(ok, np.maximum.reduce, 0, 30)
t(err, np.maximum.reduce, 30, 0)
t(err, np.maximum.reduce, 0, 0)
err(np.maximum.reduce, [])
np.maximum.reduce(np.zeros((0, 0)), axis=())
# all of the combinations are fine for a reduction that has an
# identity
t(ok, np.add.reduce, 30, 30)
t(ok, np.add.reduce, 0, 30)
t(ok, np.add.reduce, 30, 0)
t(ok, np.add.reduce, 0, 0)
np.add.reduce([])
np.add.reduce(np.zeros((0, 0)), axis=())
# OTOH, accumulate always makes sense for any combination of n and m,
# because it maps an m-element array to an m-element array. These
# tests are simpler because accumulate doesn't accept multiple axes.
for uf in (np.maximum, np.add):
uf.accumulate(np.zeros((30, 0)), axis=0)
uf.accumulate(np.zeros((0, 30)), axis=0)
uf.accumulate(np.zeros((30, 30)), axis=0)
uf.accumulate(np.zeros((0, 0)), axis=0)
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In versions >= 1.10, 'same_kind' is the
# default and an exception is raised instead of a warning.
# when 'same_kind' is not satisfied.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_raises(TypeError, np.add, a, 1.1, out=a)
def add_inplace(a, b):
a += b
assert_raises(TypeError, add_inplace, a, 1.1)
# Make sure that explicitly overriding the exception is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [2, 3, 4])
def test_ufunc_custom_out(self):
# Test ufunc with built in input types and custom output type
a = np.array([0, 1, 2], dtype='i8')
b = np.array([0, 1, 2], dtype='i8')
c = np.empty(3, dtype=_rational_tests.rational)
# Output must be specified so numpy knows what
# ufunc signature to look for
result = _rational_tests.test_add(a, b, c)
target = np.array([0, 2, 4], dtype=_rational_tests.rational)
assert_equal(result, target)
# The new resolution means that we can (usually) find custom loops
# as long as they match exactly:
result = _rational_tests.test_add(a, b)
assert_equal(result, target)
# This works even more generally, so long the default common-dtype
# promoter works out:
result = _rational_tests.test_add(a, b.astype(np.uint16), out=c)
assert_equal(result, target)
# But, it can be fooled, e.g. (use scalars, which forces legacy
# type resolution to kick in, which then fails):
with assert_raises(TypeError):
_rational_tests.test_add(a, np.uint16(2))
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
b = np.arange(9, dtype='l').reshape(3, 3)
opflag_tests.inplace_add(a[:-1, :-1], b)
assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
[14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
a = np.array(0)
opflag_tests.inplace_add(a, 3)
assert_equal(a, 3)
opflag_tests.inplace_add(a, [3, 4])
assert_equal(a, 10)
def test_struct_ufunc(self):
import numpy.core._struct_ufunc_tests as struct_ufunc
a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
result = struct_ufunc.add_triplet(a, b)
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
assert_raises(RuntimeError, struct_ufunc.register_fail)
def test_custom_ufunc(self):
a = np.array(
[_rational_tests.rational(1, 2),
_rational_tests.rational(1, 3),
_rational_tests.rational(1, 4)],
dtype=_rational_tests.rational)
b = np.array(
[_rational_tests.rational(1, 2),
_rational_tests.rational(1, 3),
_rational_tests.rational(1, 4)],
dtype=_rational_tes | ts.rational)
result = _rational_tests.test_add_rationals(a, b)
expected = np.array(
[_rational_tests.rational(1),
_rational_tests.rational(2, 3),
_rational_tests.rational(1, 2)],
dtype=_rational_tests.rational)
assert_equal(result, expected)
def test_custom_ufunc_forced_sig(self):
# gh-9351 - looking for a non-first userloop would previously hang
| with assert_raises(TypeError):
np.multiply(_rational_tests.rational(1), 1,
signature=(_rational_tests.rational, int, None))
def test_custom_array_like(self):
class MyThing:
__array_priority__ = 1000
rmul_count = 0
getitem_count = 0
def __init__(self, shape):
self.shape = shape
def __len__(self):
return self.shape[0]
def __getitem__(self, i):
MyThing.getitem_count += 1
if not isinstance(i, tuple):
i = (i,)
if len(i) > self.ndim:
raise IndexError("boo")
return MyThing(self.shape[len(i):])
def __rmul__(self, other):
MyThing.rmul_count += 1
return self
np.float64(5)*MyThing((3, 3))
assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
assert_(MyThing.getitem_count <= 2, MyThing.getitem |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.