prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
if distribution_strategy_context.get_cross_tower_context():
_init_from_checkpoint(None, ckpt_dir_or_file, assignment_map)
else:
distribution_strategy_context.get_tower_context().merge_call(
_init_from_checkpoint, ckpt_dir_or_file, assignment_map)
def _init_from_checkpoint(_, ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer"):
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
restore_op = io_ops.restore_v2(
ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
names_to_saveables = saver.BaseSaverBuilder.OpListToDict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saver.BaseSaverBuilder.SaveableObjectsForOp(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple) | ):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
| (slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_nam |
_by_index(node, "ports_out", port_index)
def get_node_port_in_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_in", port_index)
class FlowTypePrinter(object):
"Print a 'struct sol_flow_node_type'"
def __init__(self, val):
self.val = val
self.port_in_type = gdb.lookup_type("struct sol_flow_port_type_in").const().pointer()
def display_hint(self):
return 'sol_flow_node_type'
def _port_description_to_string(self, index, port, port_type):
s = ("\n %d %s (%s)\n" \
" description: %s\n") % (
index,
port["name"].string(),
port["data_type"].string(),
port["description"].string())
if port_type["connect"]:
s += " connect(): %s\n" % (port_type["connect"],)
if port_type["disconnect"]:
s += " disconnect(): %s\n" % (port_type["disconnect"],)
if port_type.type == self.port_in_type and port_type["process"]:
s += " process(): %s\n" % (port_type["process"],)
return s
def _option_description_to_string(self, option):
data_type = option["data_type"].string()
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = option["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
return "\n %s(%s) \"%s\"%s," % (
option["name"].string(),
data_type,
option["description"].string(),
defvalue)
def _ports_description_to_string(self, array, get_port_type):
if not array:
return ""
i = 0
r = []
while array[i]:
port_type = get_port_type(i)
r.append(self._port_description_to_string(i, array[i], port_type))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def _options_description_to_string(self, opts):
if not opts:
return ""
opts = opts.dereference()
array = opts["members"]
if not array:
return ""
i = 0
r = []
while array[i]["name"]:
r.append(self._option_description_to_string(array[i]))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def to_string(self):
type = self.val
tdesc = get_type_description(type)
if tdesc:
get_port_in = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
get_port_out = gdb.parse_and_eval("sol_flow_node_type_get_port_out")
p_type = type.address
ports_in = self._ports_description_to_string(tdesc["ports_in"], lambda idx: get_port_in(p_type, idx))
ports_out = self._ports_description_to_string(tdesc["ports_out"], lambda idx: get_port_out(p_type, idx))
options = self._options_description_to_string(tdesc["options"])
return "%s=%s" \
"\n name=\" | %s\"," \
"\n category=\"%s\"," \
"\n description=\"%s\"," \
"\n ports_in={%s}," \
"\n ports_out={%s}," \
"\n options={%s})" % (
| tdesc["symbol"].string(),
type.address,
tdesc["name"].string(),
tdesc["category"].string(),
tdesc["description"].string(),
ports_in,
ports_out,
options)
return "(struct sol_flow_node_type)%s (no node type description)" % (type.address,)
class FlowPrinter(object):
"Print a 'struct sol_flow_node'"
def __init__(self, val):
self.val = val
def display_hint(self):
return 'sol_flow_node'
def to_string(self):
id = self.val["id"]
type = self.val["type"]
if not type:
return "sol_flow_node(%s) is under construction." % (
self.val.address,)
tname = "%#x (no node type description)" % (type.address,)
tdesc = get_type_description(type)
if tdesc:
tname = "%s(%s=%s)" % (
tdesc["name"].string(),
tdesc["symbol"].string(),
type.address)
return "sol_flow_node(%s, id=\"%s\", type=%s)" % (
self.val.address, id.string(), tname)
def sol_flow_pretty_printers(val):
lookup_tag = val.type.tag
if lookup_tag == "sol_flow_node":
return FlowPrinter(val)
elif lookup_tag == "sol_flow_node_type":
return FlowTypePrinter(val)
return None
def register_pretty_printers(objfile):
gdb.pretty_printers.append(sol_flow_pretty_printers)
def get_type_options_string(type, options):
if not options:
return ""
tdesc = get_type_description(type)
if not tdesc or not tdesc["options"] or not tdesc["options"]["members"]:
return "OPTIONS: %s (no node type description)\n" % (options,)
string = ""
opts_desc = tdesc["options"]
array = opts_desc["members"]
i = 0
string += "OPTIONS: (struct %s*)%s\n" % (tdesc["options_symbol"].string(), options)
opt_type = gdb.lookup_type("struct %s" % (tdesc["options_symbol"].string(),))
options = options.cast(opt_type.pointer())
while array[i]["name"]:
m = array[i]
name = m["name"].string()
data_type = m["data_type"].string()
description = m["description"].string()
value = options[name]
if data_type == "string":
if value:
value = value.string()
else:
value = "NULL"
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = m["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
string += " %s (%s) = %s // %s%s\n" % (name, data_type, value, description, defvalue)
i += 1
string += "\n"
return string
class InspectAndBreakIfMatches(gdb.Breakpoint):
class InternalBreak(gdb.Breakpoint):
def __init__(self, method, banner=None, matches=None, values=None):
addr = "*%s" % (method.cast(gdb.lookup_type("long")),)
self.method = method
self.banner = banner
self.matches = matches or {}
self.values = values or {}
gdb.Breakpoint.__init__(self, addr, gdb.BP_BREAKPOINT, internal=True, temporary=True)
def stop(self):
if self.banner:
if callable(self.banner):
self.banner(self.matches, self.values)
else:
gdb.write(self.banner)
return True
def __init__(self, spec, matches):
gdb.Breakpoint.__init__(self, spec, gdb.BP_BREAKPOINT, internal=False)
self.matches = {}
for k, v in matches.items():
self.matches[k] = get_str_or_regexp_match(v)
def print_matches(self, values=None):
gdb.write("%s matches:\n" % (self.__class__.__name__,), gdb.STDERR)
if not values:
values = {}
for k, func in self.matches.items():
v = values.get(k)
if v is None:
gdb.write(" %s = %s (no value provided)\n" % (k, func.__doc__), gdb.STDERR)
else:
try:
res = func(v)
except Exception as e:
res = "Exception executing match: %s" % (e,)
gdb.write(" %s = %s (value: '%s', match: %s)\n" %
(k, func.__doc__, v, res), gdb.STDERR)
gdb.write("\n", gdb. |
def m | ain():
retu | rn
|
"""
This module provides miscellaneous utilities.
.. moduleauthor:: Fabian Hirschmann <fabian@hirschmann.email>
.. moduleauthor:: Michael Markert <markert.michael@googlemail.com>
:copyright: PenchY Developers 2011-2012, see AUTHORS
:license: MIT License, see LICENSE
"""
from | __future__ import print_function
import hashlib
import imp
import logging
import os
import shutil
import sys
import tempfile
import inspect
from contextlib import contextmanager
from functools import wraps
from xml.etree.ElementTree import SubElement
from tempfile import NamedTemporaryFile
|
from penchy.compat import write
from penchy import bootstrap
log = logging.getLogger(__name__)
def memoized(f):
"""
Decorator that provides memoization, i.e. a cache that saves the result of
a function call and returns them if called with the same arguments.
The function will not be evaluated if the arguments are present in the
cache.
"""
cache = {}
@wraps(f)
def _memoized(*args, **kwargs):
key = tuple(args) + tuple(kwargs.items())
try:
if key in cache:
return cache[key]
except TypeError: # if passed an unhashable type evaluate directly
return f(*args, **kwargs)
ret = f(*args, **kwargs)
cache[key] = ret
return ret
return _memoized
# Copyright (c) 1995-2010 by Frederik Lundh
# <http://effbot.org/zone/element-lib.htm#prettyprint>
# Licensed under the terms of the Historical Permission Notice
# and Disclaimer, see <http://effbot.org/zone/copyright.htm>.
def tree_pp(elem, level=0):
"""
Pretty-prints an ElementTree.
:param elem: root node
:type elem: :class:`~xml.etree.ElementTree.Element`
:param level: current level in tree
:type level: int
"""
i = '\n' + level * ' '
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + ' '
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
tree_pp(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def dict2tree(elem, dict_):
"""
Transform the given dictionary to a ElementTree and
add it to the given element.
:param elem: parent element
:type elem: :class:`xml.etree.ElementTree.Element`
:param dict_: dict to add to ``elem``
:type dict_: dict
"""
for key in dict_:
if dict_[key]:
e = SubElement(elem, key)
if type(dict_[key]) == dict:
dict2tree(e, dict_[key])
else:
e.text = dict_[key]
def sha1sum(filename, blocksize=65536):
"""
Returns the sha1 hexdigest of a file.
"""
hasher = hashlib.sha1()
with open(filename, 'rb') as afile:
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
@contextmanager
def tempdir(prefix='penchy-invocation', delete=False):
"""
Contextmanager to execute in new created temporary directory.
:param prefix: prefix of the temporary directory
:type prefix: str
:param delete: delete the temporary directory afterwards
:type delete: bool
"""
fwd = os.getcwd()
cwd = tempfile.mkdtemp(prefix=prefix)
os.chdir(cwd)
yield
os.chdir(fwd)
if delete:
shutil.rmtree(cwd)
def make_bootstrap_client():
"""
Returns the temporary filename of a file containing
the bootstrap client.
"""
tf = NamedTemporaryFile()
source = inspect.getsource(bootstrap)
write(tf, source)
tf.flush()
return tf
def load_job(filename):
"""
Loads a job.
:param filename: filename of the job
:type filename: str
"""
assert 'config' in sys.modules, 'You have to load the penchyrc before a job'
with disable_write_bytecode():
job = imp.load_source('job', filename)
log.info('Loaded job from %s' % filename)
return job
def load_config(filename):
"""
Loads the config module from filename.
:param filename: filename of the config file
:type filename: str
"""
with disable_write_bytecode():
config = imp.load_source('config', filename)
log.info('Loaded configuration from %s' % filename)
return config
def get_config_attribute(config, name, default_value):
"""
Returns an attribute of a config module or the
default value.
:param config: config module to use
:param name: attribute name
:type name: str
:param default: default value
"""
if hasattr(config, name):
return getattr(config, name)
else:
return default_value
@contextmanager
def disable_write_bytecode():
"""
Contextmanager to temporarily disable writing bytecode while executing.
"""
old_state = sys.dont_write_bytecode
sys.dont_write_bytecode = True
yield
sys.dont_write_bytecode = old_state
def default(value, replacement):
"""
Check if ``value`` is ``None`` and then return ``replacement`` or else
``value``.
:param value: value to check
:param replacement: default replacement for value
:returns: return the value or replacement if value is None
"""
return value if value is not None else replacement
def die(msg):
"""
Print msg to stderr and exit with exit code 1.
:param msg: msg to print
:type msg: str
"""
print(msg, file=sys.stderr)
sys.exit(1)
def depth(l):
"""
Computes the depth of a nested balanced list.
Raises ``ValueError`` if the lists are not
balanced.
:param l: the nested list
:rtype: int
:raises: ValueError
"""
if isinstance(l, list):
depths = map(depth, l)
if min(depths) != max(depths):
raise ValueError("Lists are not balanced.")
return max(depths) + 1
else:
return 0
def unify(xs):
"""
Removes duplicates from xs while preserving the order.
:param xs: the list
:type xs: list object
"""
seen = set()
return [x for x in xs if x not in seen and not seen.add(x)]
|
#!/usr/bin/env python
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#########################################################################
# Copyright 2011-2013 Marcus Popp marcus@popp.mx
#########################################################################
# This file is part of SmartHome.py. http://smarthome.sourceforge.net/
#
# SmartHome.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHome.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHome.py. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
import logging
import os
import configobj
logger = logging.getLogger('')
class Logics():
def __init__(self, smarthome, configfile):
logger.info('Start Logics')
self._sh = smarthome
self._workers = []
self._logics = {}
self._bytecode = {}
self.alive = True
logger.debug("reading logics from %s" % configfile)
try:
self._config = configobj.ConfigObj(configfile, file_error=True)
| except Exception, e:
logger.critical(e)
return
for name in self._config:
logger.debug("Logic: %s" % name)
logic = Logic(self._sh, name, self._config[name])
if hasattr(logic, 'bytecode'):
self._logics[name] = logic
self._sh.scheduler.add(name, logic, logic.prio, logic.crontab, logic.cycle)
else:
continue
# plugin hook
for plugi | n in self._sh._plugins:
if hasattr(plugin, 'parse_logic'):
plugin.parse_logic(logic)
# item hook
if hasattr(logic, 'watch_item'):
if isinstance(logic.watch_item, str):
logic.watch_item = [logic.watch_item]
items = []
for entry in logic.watch_item:
items += self._sh.match_items(entry)
for item in items:
item.add_logic_trigger(logic)
def __iter__(self):
for logic in self._logics:
yield logic
def __getitem__(self, name):
if name in self._logics:
return self._logics[name]
class Logic():
def __init__(self, smarthome, name, attributes):
self._sh = smarthome
self.name = name
self.crontab = None
self.cycle = None
self.prio = 3
self.last = None
self.conf = attributes
for attribute in attributes:
vars(self)[attribute] = attributes[attribute]
self.generate_bytecode()
self.prio = int(self.prio)
if self.crontab is not None:
if isinstance(self.crontab, list):
self.crontab = ','.join(self.crontab) # rejoin crontab entry to a string
def id(self):
return self.name
def __call__(self, caller='Logic', source=None, value=None, dest=None, dt=None):
self._sh.scheduler.trigger(self.name, self, prio=self.prio, by=caller, source=source, dest=dest, value=value, dt=dt)
def trigger(self, by='Logic', source=None, value=None, dest=None, dt=None):
self._sh.scheduler.trigger(self.name, self, prio=self.prio, by=by, source=source, dest=dest, value=value, dt=dt)
def generate_bytecode(self):
if hasattr(self, 'filename'):
filename = self._sh.base_dir + '/logics/' + self.filename
if not os.access(filename, os.R_OK):
logger.warning("%s: Could not access logic file (%s) => ignoring." % (self.name, self.filename))
return
try:
self.bytecode = compile(open(filename).read(), self.filename, 'exec')
except Exception, e:
logger.warning("Exception: %s" % e)
else:
logger.warning("%s: No filename specified => ignoring." % self.name)
|
ain.c", self.line, num_expected_locations=1, loc_exact=True)
# Breakpoint 4 - set at the same location as breakpoint 1 to test
# setting breakpoint commands on two breakpoints at a time
lldbutil.run_break_set_by_file_and_line(
self, None, self.line, num_expected_locations=1, loc_exact=True)
# Make sure relative path source breakpoints work as expected. We test
# with partial paths with and without "./" prefixes.
lldbutil.run_break_set_by_file_and_line(
self, "./main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self | , "basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "./basic/main.c", self.line,
num_expected_locations= | 1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "breakpoint/basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "./breakpoint/basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
# Test relative breakpoints with incorrect paths and make sure we get
# no breakpoint locations
lldbutil.run_break_set_by_file_and_line(
self, "invalid/main.c", self.line,
num_expected_locations=0, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "./invalid/main.c", self.line,
num_expected_locations=0, loc_exact=True)
# Now add callbacks for the breakpoints just created.
self.runCmd(
"breakpoint command add -s command -o 'frame variable --show-types --scope' 1 4")
self.runCmd(
"breakpoint command add -s python -o 'import side_effect; side_effect.one_liner = \"one liner was here\"' 2")
self.runCmd(
"breakpoint command add --python-function bktptcmd.function 3")
# Check that the breakpoint commands are correctly set.
# The breakpoint list now only contains breakpoint 1.
self.expect(
"breakpoint list", "Breakpoints 1 & 2 created", substrs=[
"2: file = 'main.c', line = %d, exact_match = 0, locations = 1" %
self.line], patterns=[
"1: file = '.*main.c', line = %d, exact_match = 0, locations = 1" %
self.line])
self.expect(
"breakpoint list -f",
"Breakpoints 1 & 2 created",
substrs=[
"2: file = 'main.c', line = %d, exact_match = 0, locations = 1" %
self.line],
patterns=[
"1: file = '.*main.c', line = %d, exact_match = 0, locations = 1" %
self.line,
"1.1: .+at main.c:%d:?[0-9]*, .+unresolved, hit count = 0" %
self.line,
"2.1: .+at main.c:%d:?[0-9]*, .+unresolved, hit count = 0" %
self.line])
self.expect("breakpoint command list 1", "Breakpoint 1 command ok",
substrs=["Breakpoint commands:",
"frame variable --show-types --scope"])
self.expect("breakpoint command list 2", "Breakpoint 2 command ok",
substrs=["Breakpoint commands (Python):",
"import side_effect",
"side_effect.one_liner"])
self.expect("breakpoint command list 3", "Breakpoint 3 command ok",
substrs=["Breakpoint commands (Python):",
"bktptcmd.function(frame, bp_loc, internal_dict)"])
self.expect("breakpoint command list 4", "Breakpoint 4 command ok",
substrs=["Breakpoint commands:",
"frame variable --show-types --scope"])
self.runCmd("breakpoint delete 4")
self.runCmd("command script import --allow-reload ./bktptcmd.py")
# Next lets try some other breakpoint kinds. First break with a regular expression
# and then specify only one file. The first time we should get two locations,
# the second time only one:
lldbutil.run_break_set_by_regexp(
self, r"._MyFunction", num_expected_locations=2)
lldbutil.run_break_set_by_regexp(
self,
r"._MyFunction",
extra_options="-f a.c",
num_expected_locations=1)
lldbutil.run_break_set_by_regexp(
self,
r"._MyFunction",
extra_options="-f a.c -f b.c",
num_expected_locations=2)
# Now try a source regex breakpoint:
lldbutil.run_break_set_by_source_regexp(
self,
r"is about to return [12]0",
extra_options="-f a.c -f b.c",
num_expected_locations=2)
lldbutil.run_break_set_by_source_regexp(
self,
r"is about to return [12]0",
extra_options="-f a.c",
num_expected_locations=1)
# Reset our canary variables and run the program.
side_effect.one_liner = None
side_effect.bktptcmd = None
self.runCmd("run", RUN_SUCCEEDED)
# Check the value of canary variables.
self.assertEquals("one liner was here", side_effect.one_liner)
self.assertEquals("function was here", side_effect.bktptcmd)
# Finish the program.
self.runCmd("process continue")
# Remove the breakpoint command associated with breakpoint 1.
self.runCmd("breakpoint command delete 1")
# Remove breakpoint 2.
self.runCmd("breakpoint delete 2")
self.expect(
"breakpoint command list 1",
startstr="Breakpoint 1 does not have an associated command.")
self.expect(
"breakpoint command list 2",
error=True,
startstr="error: '2' is not a currently valid breakpoint ID.")
# The breakpoint list now only contains breakpoint 1.
self.expect(
"breakpoint list -f",
"Breakpoint 1 exists",
patterns=[
"1: file = '.*main.c', line = %d, exact_match = 0, locations = 1, resolved = 1" %
self.line,
"hit count = 1"])
# Not breakpoint 2.
self.expect(
"breakpoint list -f",
"No more breakpoint 2",
matching=False,
substrs=[
"2: file = 'main.c', line = %d, exact_match = 0, locations = 1, resolved = 1" %
self.line])
# Run the program again, with breakpoint 1 remaining.
self.runCmd("run", RUN_SUCCEEDED)
# We should be stopped again due to breakpoint 1.
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 2.
self.expect("breakpoint list -f", BREAKPOINT_HIT_TWICE,
substrs=['resolved, hit count = 2'])
def breakpoint_command_script_parameters(self):
"""Test that the frame and breakpoint location are being properly passed to the script breakpoint command function."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Add a breakpoint.
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line, num_expected_locations=1, loc_exact=True)
# Now add callbacks for the breakpoints just created.
self.runCmd("breakpoint command add -s python -o 'import side_effect; side_effect.frame = str(frame); side_effect.bp_loc = str(bp_loc)' 1")
# Reset canary variables and run.
side_effect.frame = None
side_effect.bp_loc = None
self.runCmd("run", RUN_SUCCEEDED)
self.expect(side_effect.frame, exe=False, startstr="frame #0:")
self.expect(side_effect.bp_loc, ex |
m datetime import date
import click
import yaml
from indico.util.console import cformat
# Dictionary listing the files for which to change the header.
# The key is the extension of the file (without the dot) and the value is another
# dictionary containing two keys:
# - 'regex' : A regular expression matching comments in the given file type
# - 'format': A dictionary with the comment characters to add to the header.
# There must be a `comment_start` inserted before the header,
# `comment_middle` inserted at the beginning of each line except the
# first and last one, and `comment_end` inserted at the end of the
# header. (See the `HEADER` above)
SUPPORTED_FILES = {
'py': {
'regex': re.compile(r'((^#|[\r\n]#).*)*'),
'format': {'comment_start': '#', 'comment_middle': '#', 'comment_end': ''}},
'wsgi': {
'regex': re.compile(r'((^#|[\r\n]#).*)*'),
'format': {'comment_start': '#', 'comment_middle': '#', 'comment_end': ''}},
'js': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
'jsx': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
'css': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/'),
'format': {'comment_start': '/*', 'comment_middle': ' *', 'comment_end': ' */'}},
'scss': {
'rege | x': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
}
# The substring which must be part of a comment block in order for the comment to be updated by the header.
SUBSTRING = 'This file is part of'
USAGE = '''
Updates all the headers in the supported files ({supported_files}).
By default, all the files t | racked by git in the current repository are updated
to the current year.
You can specify a year to update to as well as a file or directory.
This will update all the supported files in the scope including those not tracked
by git. If the directory does not contain any supported files (or if the file
specified is not supported) nothing will be updated.
'''.format(supported_files=', '.join(SUPPORTED_FILES)).strip()
def _walk_to_root(path):
"""Yield directories starting from the given directory up to the root."""
# Based on code from python-dotenv (BSD-licensed):
# https://github.com/theskumar/python-dotenv/blob/e13d957b/src/dotenv/main.py#L245
if os.path.isfile(path):
path = os.path.dirname(path)
last_dir = None
current_dir = os.path.abspath(path)
while last_dir != current_dir:
yield current_dir
parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
last_dir, current_dir = current_dir, parent_dir
def _get_config(path, end_year):
config = {}
for dirname in _walk_to_root(path):
check_path = os.path.join(dirname, 'headers.yml')
if os.path.isfile(check_path):
with open(check_path) as f:
config.update((k, v) for k, v in yaml.safe_load(f.read()).items() if k not in config)
if config.pop('root', False):
break
if 'start_year' not in config:
click.echo('no valid headers.yml files found: start_year missing')
sys.exit(1)
if 'name' not in config:
click.echo('no valid headers.yml files found: name missing')
sys.exit(1)
if 'header' not in config:
click.echo('no valid headers.yml files found: header missing')
sys.exit(1)
config['end_year'] = end_year
return config
def gen_header(data):
if data['start_year'] == data['end_year']:
data['dates'] = data['start_year']
else:
data['dates'] = '{} - {}'.format(data['start_year'], data['end_year'])
return '\n'.join(line.rstrip() for line in data['header'].format(**data).strip().splitlines())
def _update_header(file_path, config, substring, regex, data, ci):
found = False
with open(file_path) as file_read:
content = orig_content = file_read.read()
if not content.strip():
return False
shebang_line = None
if content.startswith('#!/'):
shebang_line, content = content.split('\n', 1)
for match in regex.finditer(content):
if substring in match.group():
found = True
content = content[:match.start()] + gen_header(data | config) + content[match.end():]
if shebang_line:
content = shebang_line + '\n' + content
if content != orig_content:
msg = 'Incorrect header in {}' if ci else cformat('%{green!}Updating header of %{blue!}{}')
print(msg.format(os.path.relpath(file_path)))
if not ci:
with open(file_path, 'w') as file_write:
file_write.write(content)
return True
elif not found:
msg = 'Missing header in {}' if ci else cformat('%{red!}Missing header%{reset} in %{blue!}{}')
print(msg.format(os.path.relpath(file_path)))
return True
def update_header(file_path, year, ci):
config = _get_config(file_path, year)
ext = file_path.rsplit('.', 1)[-1]
if ext not in SUPPORTED_FILES or not os.path.isfile(file_path):
return False
if os.path.basename(file_path)[0] == '.':
return False
return _update_header(file_path, config, SUBSTRING, SUPPORTED_FILES[ext]['regex'],
SUPPORTED_FILES[ext]['format'], ci)
def blacklisted(root, path, _cache={}):
orig_path = path
if path not in _cache:
_cache[orig_path] = False
while (path + os.path.sep).startswith(root):
if os.path.exists(os.path.join(path, '.no-headers')):
_cache[orig_path] = True
break
path = os.path.normpath(os.path.join(path, '..'))
return _cache[orig_path]
@click.command(help=USAGE)
@click.option('--ci', is_flag=True, help='Indicate that the script is running during CI and should use a non-zero '
'exit code unless all headers were already up to date. This also prevents '
'files from actually being updated.')
@click.option('--year', '-y', type=click.IntRange(min=1000), default=date.today().year, metavar='YEAR',
help='Indicate the target year')
@click.option('--path', '-p', type=click.Path(exists=True), help='Restrict updates to a specific file or directory')
@click.pass_context
def main(ctx, ci, year, path):
error = False
if path and os.path.isdir(path):
if not ci:
print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for all the files in '
'%{yellow!}{path}%{reset}...').format(year=year, path=path))
for root, _, filenames in os.walk(path):
for filename in filenames:
if not blacklisted(path, root):
if update_header(os.path.join(root, filename), year, ci):
error = True
elif path and os.path.isfile(path):
if not ci:
print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for the file '
'%{yellow!}{file}%{reset}...').format(year=year, file=path))
if update_header(path, year, ci):
error = True
else:
if not ci:
print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for all '
'git-tracked files...').format(year=year))
try:
for filepath in subprocess.check_output(['git', 'ls-files'], text=True).splitlines():
filepath = os.path.abspath(filepath)
if not blacklisted(os.getcwd(), os.path.dirname(filepath)):
if update_header(filepath, year, ci):
error = True
except subprocess.CalledProcessError:
raise click.UsageError(cformat('%{red!}You mu |
# -*- test-case-name: txdav.carddav.datastore.test -*-
##
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Vers | ion 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable | law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
AddressBook store tests.
"""
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class PlaybookState:
'''
A helper class, which keeps track of the task iteration
state for a given playbook. This is used in the PlaybookIterator
class on a per-host basis.
'''
def __init__(self, parent_iterator):
self._parent_iterator = parent_iterator
self._cur_play = 0
self._task_list = None
self._cur_task_pos = 0
self._done = False
def next(self, peek=False):
'''
Determines and returns the next available task from the playbook,
advancing through the list of plays as it goes.
'''
task = None
# we save these locally so that we can peek at the next task
# without updating the internal state of the iterator
cur_play = self._cur_play
task_list = self._task_list
cur_task_pos = self._cur_task_pos
while True:
# when we hit the end of the playbook entries list, we set a flag
# and return None to indicate we're there
# FIXME: accessing the entries and parent iterator playbook members
# should be done through accessor functions
if self._done or cur_play > len(self._p | arent_iterator._playbook._entries) - 1:
self._done = True
return None
# initialize the task list by calling the .compile() method
# on the play, which will call compile | () for all child objects
if task_list is None:
task_list = self._parent_iterator._playbook._entries[cur_play].compile()
# if we've hit the end of this plays task list, move on to the next
# and reset the position values for the next iteration
if cur_task_pos > len(task_list) - 1:
cur_play += 1
task_list = None
cur_task_pos = 0
continue
else:
# FIXME: do tag/conditional evaluation here and advance
# the task position if it should be skipped without
# returning a task
task = task_list[cur_task_pos]
cur_task_pos += 1
# Skip the task if it is the member of a role which has already
# been run, unless the role allows multiple executions
if task._role:
# FIXME: this should all be done via member functions
# instead of direct access to internal variables
if task._role.has_run() and not task._role._metadata._allow_duplicates:
continue
# Break out of the while loop now that we have our task
break
# If we're not just peeking at the next task, save the internal state
if not peek:
self._cur_play = cur_play
self._task_list = task_list
self._cur_task_pos = cur_task_pos
return task
class PlaybookIterator:
'''
The main iterator class, which keeps the state of the playbook
on a per-host basis using the above PlaybookState class.
'''
def __init__(self, inventory, log_manager, playbook):
self._playbook = playbook
self._log_manager = log_manager
self._host_entries = dict()
self._first_host = None
# build the per-host dictionary of playbook states
for host in inventory.get_hosts():
if self._first_host is None:
self._first_host = host
self._host_entries[host.get_name()] = PlaybookState(parent_iterator=self)
def get_next_task(self, peek=False):
''' returns the next task for host[0] '''
return self._host_entries[self._first_host.get_name()].next(peek=peek)
def get_next_task_for_host(self, host, peek=False):
''' fetch the next task for the given host '''
if host.get_name() not in self._host_entries:
raise AnsibleError("invalid host specified for playbook iteration")
return self._host_entries[host.get_name()].next(peek=peek)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; | without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008. |
#
# Version: 1.0
#
from webclient.controller import BaseController
class BaseImpexp(BaseController):
def __init__(self, conn, **kw):
BaseController.__init__(self, conn)
|
# Generated by Django 3.2.6 on 2021-10-19 09:14
fro | m django.db import migrations
from ureport.sql import InstallSQL
class Migration(migrations.Migration):
dependencies = [
("polls", "0069_pollquestion_color_choice"),
]
operations = [InstallSQL("polls_0070")]
| |
#!/usr/bin/env python3
"""
librepo - download a package
"""
import os
import sys
import shutil
from pprint import pprint
import librepo
DESTDIR = "downloaded | _metadata"
PROGRESSBAR_LEN = 40
finished = False
def callback(data, total_to_download, downloaded):
"""Progress callback"""
global finished
if total_to_download != downloaded:
| finished = False
if total_to_download <= 0 or finished == True:
return
completed = int(downloaded / (total_to_download / PROGRESSBAR_LEN))
print("%30s: [%s%s] %8s/%8s\r" % (data, '#'*completed, '-'*(PROGRESSBAR_LEN-completed), int(downloaded), int(total_to_download)), )
sys.stdout.flush()
if total_to_download == downloaded and not finished:
print()
finished = True
return
if __name__ == "__main__":
pkgs = [
("ImageMagick-djvu", "Packages/i/ImageMagick-djvu-6.7.5.6-3.fc17.i686.rpm"),
("i2c-tools-eepromer", "Packages/i/i2c-tools-eepromer-3.1.0-1.fc17.i686.rpm")
]
h = librepo.Handle()
h.setopt(librepo.LRO_URLS, ["http://ftp.linux.ncsu.edu/pub/fedora/linux/releases/17/Everything/i386/os/"])
h.setopt(librepo.LRO_REPOTYPE, librepo.LR_YUMREPO)
h.setopt(librepo.LRO_PROGRESSCB, callback)
h.setopt(librepo.LRO_PROGRESSDATA, "")
for pkg_name, pkg_url in pkgs:
h.progressdata = pkg_name
h.download(pkg_url)
|
from corehq.apps.users.models import CommCareUser
from corehq.apps.hqcase.management.commands.ptop_f | ast_reindexer import PtopReindexer
from corehq.pillows.user import UserPillow
CHUNK_SIZE = 500
POOL_SIZE = 15
class Command(PtopReindexer):
help = "Fast reindex of user elastic index by using the domain view and reindexing users"
doc_class = Comm | CareUser
view_name = 'users/by_username'
pillow_class = UserPillow
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid -**InaSAFE Wizard**
This module provides: Keyword Wizard Step: Classification Selector
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'qgis@borysjurgiel.pl'
__revision__ = '$Format:%H$'
__date__ = '16/03/2016'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
# noinspection PyPackageRequirements
from PyQt4 import QtCore
# noinspection PyPackageRequirements
from PyQt4.QtGui import QListWidgetItem
from safe.definitions import layer_purpose_hazard
from safe.utilities.gis import is_raster_layer
from safe.utilities.keyword_io import definition
from safe.gui.tools.wizard.wizard_strings import classification_question
from safe.gui.tools.wizard.wizard_step import get_wizard_step_ui_class
from safe.gui.tools.wizard.wizard_step import WizardStep
FORM_CLASS = get_wizard_step_ui_class(__file__)
class StepKwClassification(WizardStep, FORM_CLASS):
"""Keyword Wizard Step: Classification Selector"""
def is_ready_to_next_step(self):
"""Check if the step is complete. If so, there is
no reason to block the Next button.
:returns: True if new step may be enabled.
:rtype: bool
"""
| return bool(self.selected_class | ification())
def get_previous_step(self):
"""Find the proper step when user clicks the Previous button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
new_step = self.parent.step_kw_layermode
return new_step
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
if is_raster_layer(self.parent.layer):
new_step = self.parent.step_kw_classify
else:
new_step = self.parent.step_kw_field
return new_step
def classifications_for_layer(self):
"""Return a list of valid classifications for a layer.
:returns: A list where each value represents a valid classification.
:rtype: list
"""
layer_geometry_id = self.parent.get_layer_geometry_id()
layer_mode_id = self.parent.step_kw_layermode.\
selected_layermode()['key']
subcategory_id = self.parent.step_kw_subcategory.\
selected_subcategory()['key']
if self.parent.step_kw_purpose.\
selected_purpose() == layer_purpose_hazard:
hazard_category_id = self.parent.step_kw_hazard_category.\
selected_hazard_category()['key']
if is_raster_layer(self.parent.layer):
return self.impact_function_manager.\
raster_hazards_classifications_for_layer(
subcategory_id,
layer_geometry_id,
layer_mode_id,
hazard_category_id)
else:
return self.impact_function_manager\
.vector_hazards_classifications_for_layer(
subcategory_id,
layer_geometry_id,
layer_mode_id,
hazard_category_id)
else:
# There are no classifications for exposures defined yet, apart
# from postprocessor_classification, processed paralelly
return []
def on_lstClassifications_itemSelectionChanged(self):
"""Update classification description label and unlock the Next button.
.. note:: This is an automatic Qt slot
executed when the field selection changes.
"""
self.clear_further_steps()
classification = self.selected_classification()
# Exit if no selection
if not classification:
return
# Set description label
self.lblDescribeClassification.setText(classification["description"])
# Enable the next button
self.parent.pbnNext.setEnabled(True)
def selected_classification(self):
"""Obtain the classification selected by user.
:returns: Metadata of the selected classification.
:rtype: dict, None
"""
item = self.lstClassifications.currentItem()
try:
return definition(item.data(QtCore.Qt.UserRole))
except (AttributeError, NameError):
return None
def clear_further_steps(self):
""" Clear all further steps
in order to properly calculate the prev step
"""
self.parent.step_kw_field.lstFields.clear()
self.parent.step_kw_classify.treeClasses.clear()
def set_widgets(self):
"""Set widgets on the Classification tab."""
self.clear_further_steps()
purpose = self.parent.step_kw_purpose.selected_purpose()['name']
subcategory = self.parent.step_kw_subcategory.\
selected_subcategory()['name']
self.lstClassifications.clear()
self.lblDescribeClassification.setText('')
self.lblSelectClassification.setText(
classification_question % (subcategory, purpose))
classifications = self.classifications_for_layer()
for classification in classifications:
if not isinstance(classification, dict):
classification = definition(classification)
item = QListWidgetItem(
classification['name'],
self.lstClassifications)
item.setData(QtCore.Qt.UserRole, classification['key'])
self.lstClassifications.addItem(item)
# Set values based on existing keywords (if already assigned)
geom = 'raster' if is_raster_layer(self.parent.layer) else 'vector'
key = '%s_%s_classification' % (
geom, self.parent.step_kw_purpose.selected_purpose()['key'])
classification_keyword = self.parent.get_existing_keyword(key)
if classification_keyword:
classifications = []
for index in xrange(self.lstClassifications.count()):
item = self.lstClassifications.item(index)
classifications.append(item.data(QtCore.Qt.UserRole))
if classification_keyword in classifications:
self.lstClassifications.setCurrentRow(
classifications.index(classification_keyword))
self.auto_select_one_item(self.lstClassifications)
|
_min', 'lum_max'],
3: ['w_mean', 'w_std'],
4: ['dm_igm_slope', 'dm_host']}
self.norm_surv = NORM_SURV
self.so = SimulationOverview()
self.tns = self.get_tns()
def get_tns(self):
# Only get one-offs
return TNS(repeaters=False, mute=True, update=False).df
def dm(self, pop, survey_name):
| """Calculate GoodnessOfFit for DM distributions."""
mask = ((self.tns.survey == survey_name) & (self.tns.dm <= 950))
try:
ks_dm = ks_2samp(pop.frbs.dm, self.tns[mask].dm)[1]
except ValueError:
ks_dm = np.nan
return ks_dm
def snr(self, pop, survey_name):
mask = (( | self.tns.survey == survey_name) & (self.tns.dm <= 950))
try:
ks_snr = ks_2samp(pop.frbs.snr, self.tns[mask].snr)[1]
except ValueError:
ks_snr = np.nan
return ks_snr
def rate(self, pop, survey_name, norm_uuid, run, errs=False):
# Add rate details
sr = pop.source_rate
surv_sim_rate = sr.det / sr.days
# Perhaps use at some stage
if errs:
p_int = poisson_interval(sr.det, sigma=1)
surv_sim_rate_errs = [p/sr.days for p in p_int]
# Determine ratio of detection rates
if survey_name in EXPECTED:
n_frbs, n_days = EXPECTED[survey_name]
else:
n_frbs, n_days = [np.nan, np.nan]
surv_real_rate = n_frbs/n_days
# Get normalisation properties
norm_real_n_frbs, norm_real_n_days = EXPECTED[self.norm_surv]
norm_pop = unpickle(f'mc/run_{run}/{norm_uuid}')
norm_sim_n_frbs = norm_pop.source_rate.det
norm_sim_n_days = norm_pop.source_rate.days
norm_sim_rate = norm_sim_n_frbs / norm_sim_n_days
norm_real_rate = norm_real_n_frbs / norm_real_n_days
if norm_sim_rate == 0:
norm_sim_rate = POP_SIZE / norm_sim_n_days
sim_ratio = surv_sim_rate / norm_sim_rate
real_ratio = surv_real_rate / norm_real_rate
diff = np.abs(sim_ratio - real_ratio)
if diff == 0:
rate_diff = 1e-3
else:
rate_diff = 1 / diff
return rate_diff, pop.n_sources()
def calc_gofs(self, run):
# For each requested run
self.so = SimulationOverview()
par_set = self.so.df[self.so.df.run == run].par_set.iloc[0]
pprint(f'Calculating goodness of fit for run {run}, par set {par_set}')
pars = self.run_pars[par_set]
values = []
# Loop through all combination of parameters
for values, group in self.so.df[self.so.df.run == run].groupby(pars):
pprint(f' - {list(zip(pars, values))}')
# Calculate goodness of fit values for each simulation
for row_ix, row in group.iterrows():
survey_name = row.survey
uuid = row.uuid
pop = unpickle(f'mc/run_{run}/{uuid}')
# Apply a DM cutoff
mask = (pop.frbs.dm <= 950)
pop.frbs.apply(mask)
pop.source_rate.det = pop.n_sources() * pop.source_rate.f_area
dm_gof = self.dm(pop, survey_name)
snr_gof = self.snr(pop, survey_name)
self.so.df.at[row_ix, 'dm_gof'] = dm_gof
self.so.df.at[row_ix, 'snr_gof'] = snr_gof
if pop.n_sources() == 0:
self.so.df.at[row_ix, 'weight'] = 0
self.so.df.at[row_ix, 'n_det'] = pop.n_sources()
pprint(f' - No sources in {survey_name}')
continue
# Find corresponding rate normalisation population uuid
norm_mask = dict(zip(pars, values))
norm_mask['survey'] = self.norm_surv
norm_mask['run'] = run
k = norm_mask.keys()
v = norm_mask.values()
norm_uuid = group.loc[group[k].isin(v).all(axis=1), :].uuid
norm_uuid = norm_uuid.values[0]
rate_diff, n_det = self.rate(pop, survey_name, norm_uuid, run)
# Get rate weighting
self.so.df.at[row_ix, 'weight'] = rate_diff
self.so.df.at[row_ix, 'n_det'] = n_det
pprint(f'Saving the results for run {run}')
# Best matching in terms of rates
max_w = np.nanmax(self.so.df.weight)
self.so.df.loc[self.so.df.weight == 1e3]['weight'] = max_w
self.so.save()
def plot(self, run):
# Get data
# For each requested run
df = self.so.df
par_set = df[df.run == run].par_set.iloc[0]
# For each parameter
for main_par in self.run_pars[par_set]:
pprint(f'Plotting {main_par}')
other_pars = [e for e in self.run_pars[par_set] if e != main_par]
for compare_par in ['dm', 'snr']:
compare_col = f'{compare_par}_gof'
pprint(f' - {compare_col}')
for survey, group_surv in df[df.run == run].groupby('survey'):
pprint(f' - {survey}')
# Set up plot
plot_aa_style()
plt.rcParams["figure.figsize"] = (5.75373*3, 5.75373*3)
plt.rcParams['figure.max_open_warning'] = 125
n_x = group_surv[other_pars[0]].nunique()
if len(other_pars) > 1:
n_y = group_surv[other_pars[1]].nunique()
else:
n_y = 1
fig, ax = plt.subplots(n_x, n_y,
sharex='col', sharey='row')
groups = group_surv.groupby(other_pars)
x = -1
for i, (other_pars_vals, group) in enumerate(groups):
bins = group[main_par].values
values = group[compare_col].values
bins, values = self.add_edges_to_hist(bins, values)
if n_y > 1:
y = i % n_y
if y == 0:
x += 1
a = ax[y, x]
else:
y = i
a = ax[y]
a.step(bins, values, where='mid')
a.set_title = str(other_pars_vals)
diff = np.diff(bins)
if diff[1] != diff[0]:
a.set_xscale('log')
# Set axis label
if y == n_y - 1:
p = other_pars[0]
if isinstance(other_pars_vals, float):
val = other_pars_vals
else:
val = other_pars_vals[0]
p = p.replace('_', ' ')
a.set_xlabel(f'{p} = {val:.2}')
if x == 0:
p = other_pars[1]
val = other_pars_vals[1]
p = p.replace('_', ' ')
a.set_ylabel(f'{p} = {val:.2}')
# Set axis limits
subset = df[df.run == run][main_par]
y_subset = group_surv[compare_col].copy()
try:
low = np.nanmin(y_subset)
high = np.nanmax(y_subset)
except ValueError:
low = 0.0001
high = 1
log = False
if low > 0 and high > 0:
log = True
for a in ax.flatten():
a.set_xlim(subset.min(), subset.max())
if log:
a.set_yscale('log', nonposy='clip')
a.set_ylim(low, high)
p = main_par.replace('_', ' ' |
# This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2017 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
from invenio.config import CFG_SITE_URL, CFG_SITE_RECORD
from invenio.websubmit_functions.Shared_Functions import ParamFromFile
## Description: function Print_Success_SRV
## This function displays a message telling the user the
## revised files have been correctly received
## Author: T.Baron
## PARAMETERS: -
def Print_Success_SRV(parameters, curdir, form, user_info=None):
"""
This function simply displays a text on the screen, telling the
user the revision went fine. To be used in the Submit New File
(SRV) action.
"""
global rn
sysno = ParamFromFile("%s/%s" % (curdir, 'SN')).strip()
t = "<b>Modification completed!</b><br /><br />"
if sysno:
| # If we know the URL of the document, we display it for user's convenience (RQF0800417)
url = '%s/%s/%s' % (CFG_SITE_URL, CFG_SITE_RECORD, sysno)
t = "<br /><br /><b>Document %s (<b><a href='%s'>%s</a></b>) was successfully revised.</b>" % (rn, url, url)
else:
| t = "<br /><br /><b>Document %s was successfully revised.</b>" % rn
return t
|
from sympy import I, Integer
from sympy.physics.quantum.innerproduct import InnerProduct
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.state import B | ra, Ket, StateBase
def test_innerproduct():
k = Ket('k')
b = Bra('b')
ip = InnerProduct(b, k)
assert isinstance(ip, InnerProduct)
assert i | p.bra == b
assert ip.ket == k
assert b*k == InnerProduct(b, k)
assert k*(b*k)*b == k*InnerProduct(b, k)*b
assert InnerProduct(b, k).subs(b, Dagger(k)) == Dagger(k)*k
def test_innerproduct_dagger():
k = Ket('k')
b = Bra('b')
ip = b*k
assert Dagger(ip) == Dagger(k)*Dagger(b)
class FooState(StateBase):
pass
class FooKet(Ket, FooState):
@classmethod
def dual_class(self):
return FooBra
def _eval_innerproduct_FooBra(self, bra):
return Integer(1)
def _eval_innerproduct_BarBra(self, bra):
return I
class FooBra(Bra, FooState):
@classmethod
def dual_class(self):
return FooKet
class BarState(StateBase):
pass
class BarKet(Ket, BarState):
@classmethod
def dual_class(self):
return BarBra
class BarBra(Bra, BarState):
@classmethod
def dual_class(self):
return BarKet
def test_doit():
f = FooKet('foo')
b = BarBra('bar')
assert InnerProduct(b, f).doit() == I
assert InnerProduct(Dagger(f), Dagger(b)).doit() == -I
assert InnerProduct(Dagger(f), f).doit() == Integer(1)
|
default_app_config | = 'apps.da | tasetmanager.apps.datasetmanagerConfig'
|
"""
Allows custom jinja filters.
"""
### Built-in filters ###
def select(value, key):
"""
Select a key from a dictionary.
If ``value`` is not a dictionary or ``key`` does not exist in it,
the ``value`` is returned as is.
"""
return value.get(key, value) if isinstance( | value, d | ict) else value
def rotate(list_, pivot):
"""
Rotate a list around a pivot.
"""
try:
pos = list_.index(pivot)
except ValueError:
# pivot not in list
return list_
else:
return list_[pos:] + list_[:pos]
def map_format(sequence, format):
"""
Apply format string on elements in sequence.
:param format: format string. can use one positional format argument, i.e. '{}' or '{0}',
which will map to elements in the sequence.
"""
return [format.format(item) for item in sequence]
def built_in_filters():
"""
Confab built-in Jinja filters.
"""
return [
select,
rotate,
map_format,
]
### End built-in filters ###
class JinjaFiltersRegistry(object):
"""
Registry of custom Jinja filters that are applied on Jinja environments
when Confab generates templates.
"""
def __init__(self):
self._filters = set(built_in_filters())
def add_filter(self, filter):
self._filters.add(filter)
def remove_filter(self, filter):
try:
self._filters.remove(filter)
except KeyError:
return False
return True
@property
def filters(self):
return {filter.__name__: filter for filter in self._filters}
def register(self, environment):
"""
Register filters on a Jinja environment object.
"""
for name, filter in self.filters.iteritems():
environment.filters[name] = filter
class JinjaFilters(object):
"""
Context manager for Jinja filters.
"""
def __init__(self, *filters):
self._filters = filters
def __enter__(self):
for filter in self._filters:
add_jinja_filter(filter)
def __exit__(self, type, value, traceback):
for filter in self._filters:
remove_jinja_filter(filter)
def add_jinja_filter(filter):
"""
Add a custom jinja filter.
"""
jinja_filters.add_filter(filter)
def remove_jinja_filter(filter):
"""
Remove a custom jinja filter.
"""
return jinja_filters.remove_filter(filter)
jinja_filters = JinjaFiltersRegistry()
|
ead_out_line_len = 0.0
'''
Right here, I need to know the Holding Tags group from the tree that refers to this profile operation and build up the tags for PathKurve Utils.
I need to access the location vector, length, angle in radians and height.
'''
PathKurveUtils.clear_tags()
for i in range(len(obj.locs)):
tag = obj.locs[i]
h = obj.heights[i]
l = obj | .lengths[i]
a = math.radians(obj.angles[i])
PathKurveUtils.add_tag(area.Point(tag.x, tag.y), l, a, h)
depthparams = depth_params(
obj.ClearanceHeight.Value,
obj.SafeHeight.Value, obj.StartDepth.Value, obj.StepDown, 0.0,
obj.FinalDepth.Value, None)
PathKurveUtils.profile2(
curve, obj.Side, self.radius, self.vertFeed, self.horizFeed,
| self.vertRapid, self.horizRapid, obj.OffsetExtra.Value, roll_radius,
None, None, depthparams, extend_at_start, extend_at_end,
lead_in_line_len, lead_out_line_len)
output += PathKurveUtils.retrieve_gcode()
return output
def execute(self, obj):
import Part # math #DraftGeomUtils
output = ""
toolLoad = PathUtils.getLastToolLoad(obj)
# obj.ToolController = PathUtils.getToolControllers(obj)
# toolLoad = PathUtils.getToolLoad(obj, obj.ToolController)
if toolLoad is None or toolLoad.ToolNumber == 0:
self.vertFeed = 100
self.horizFeed = 100
self.vertRapid = 100
self.horizRapid = 100
self.radius = 0.25
obj.ToolNumber = 0
obj.ToolDescription = "UNDEFINED"
else:
self.vertFeed = toolLoad.VertFeed.Value
self.horizFeed = toolLoad.HorizFeed.Value
self.vertRapid = toolLoad.VertRapid.Value
self.horizRapid = toolLoad.HorizRapid.Value
tool = PathUtils.getTool(obj, toolLoad.ToolNumber)
if tool.Diameter == 0:
self.radius = 0.25
else:
self.radius = tool.Diameter/2
obj.ToolNumber = toolLoad.ToolNumber
obj.ToolDescription = toolLoad.Name
if obj.UserLabel == "":
obj.Label = obj.Name + " :" + obj.ToolDescription
else:
obj.Label = obj.UserLabel + " :" + obj.ToolDescription
output += "(" + obj.Label + ")"
if obj.Side != "On":
output += "(Compensated Tool Path. Diameter: " + str(self.radius * 2) + ")"
else:
output += "(Uncompensated Tool Path)"
if obj.Base:
# hfaces = []
# vfaces = []
wires = []
for b in obj.Base:
edgelist = []
for sub in b[1]:
edgelist.append(getattr(b[0].Shape, sub))
wires.extend(findWires(edgelist))
for wire in wires:
edgelist = wire.Edges
edgelist = Part.__sortEdges__(edgelist)
output += self._buildPathLibarea(obj, edgelist)
if obj.Active:
path = Path.Path(output)
obj.Path = path
obj.ViewObject.Visibility = True
else:
path = Path.Path("(inactive operation)")
obj.Path = path
obj.ViewObject.Visibility = False
class _ViewProviderProfile:
def __init__(self, vobj):
vobj.Proxy = self
def attach(self, vobj):
self.Object = vobj.Object
return
def setEdit(self, vobj, mode=0):
FreeCADGui.Control.closeDialog()
taskd = TaskPanel()
taskd.obj = vobj.Object
FreeCADGui.Control.showDialog(taskd)
taskd.setupUi()
return True
def getIcon(self):
return ":/icons/Path-Profile.svg"
def __getstate__(self):
return None
def __setstate__(self, state):
return None
class _CommandAddTag:
def GetResources(self):
return {'Pixmap': 'Path-Holding',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_Profile", "Add Holding Tag"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_Profile", "Add Holding Tag")}
def IsActive(self):
return FreeCAD.ActiveDocument is not None
def setpoint(self, point, o):
obj = FreeCADGui.Selection.getSelection()[0]
obj.StartPoint.x = point.x
obj.StartPoint.y = point.y
loc = obj.locs
h = obj.heights
l = obj.lengths
a = obj.angles
x = point.x
y = point.y
z = float(0.0)
loc.append(Vector(x, y, z))
h.append(4.0)
l.append(5.0)
a.append(45.0)
obj.locs = loc
obj.heights = h
obj.lengths = l
obj.angles = a
def Activated(self):
FreeCADGui.Snapper.getPoint(callback=self.setpoint)
class _CommandSetStartPoint:
def GetResources(self):
return {'Pixmap': 'Path-Holding',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_Profile", "Pick Start Point"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_Profile", "Pick Start Point")}
def IsActive(self):
return FreeCAD.ActiveDocument is not None
def setpoint(self, point, o):
obj = FreeCADGui.Selection.getSelection()[0]
obj.StartPoint.x = point.x
obj.StartPoint.y = point.y
def Activated(self):
FreeCADGui.Snapper.getPoint(callback=self.setpoint)
class _CommandSetEndPoint:
def GetResources(self):
return {'Pixmap': 'Path-Holding',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_Profile", "Pick End Point"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_Profile", "Pick End Point")}
def IsActive(self):
return FreeCAD.ActiveDocument is not None
def setpoint(self, point, o):
obj = FreeCADGui.Selection.getSelection()[0]
obj.EndPoint.x = point.x
obj.EndPoint.y = point.y
def Activated(self):
FreeCADGui.Snapper.getPoint(callback=self.setpoint)
class CommandPathProfileEdges:
def GetResources(self):
return {'Pixmap': 'Path-Profile-Edges',
'MenuText': QtCore.QT_TRANSLATE_NOOP("PathProfile", "Edge Profile"),
'Accel': "P, F",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("PathProfile", "Profile based on Edges")}
def IsActive(self):
if FreeCAD.ActiveDocument is not None:
for o in FreeCAD.ActiveDocument.Objects:
if o.Name[:3] == "Job":
return True
return False
def Activated(self):
ztop = 10.0
zbottom = 0.0
FreeCAD.ActiveDocument.openTransaction(translate("Path", "Create a Profile based on edge selection"))
FreeCADGui.addModule("PathScripts.PathProfile")
FreeCADGui.doCommand('obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython", "Edge Profile")')
FreeCADGui.doCommand('PathScripts.PathProfileEdges.ObjectProfile(obj)')
FreeCADGui.doCommand('PathScripts.PathProfileEdges._ViewProviderProfile(obj.ViewObject)')
FreeCADGui.doCommand('obj.Active = True')
FreeCADGui.doCommand('obj.ClearanceHeight = ' + str(ztop + 10.0))
FreeCADGui.doCommand('obj.StepDown = 1.0')
FreeCADGui.doCommand('obj.StartDepth= ' + str(ztop))
FreeCADGui.doCommand('obj.FinalDepth=' + str(zbottom))
FreeCADGui.doCommand('obj.SafeHeight = ' + str(ztop + 2.0))
FreeCADGui.doCommand('obj.Side = "On"')
FreeCADGui.doCommand('obj.OffsetExtra = 0.0')
FreeCADGui.doCommand('obj.Direction = "CW"')
FreeCADGui.doCommand('obj.UseComp = False')
FreeCADGui.doCommand('obj.PlungeAngle = 90.0')
#FreeCADGui.doCommand('obj.ActiveTC = None')
FreeCADGui.doCommand('PathScripts.PathUtils.addToJob(obj)')
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.doCommand('obj.ViewObject.startEditing()')
class TaskPanel:
def __init__(self):
self.form = FreeCADGui.PySideUic.loadUi(":/panel |
import sys
sys.path.insert(0, "../utils/")
import utils
def unjudged(qrelsFile, resultsFile):
qrels = utils.readQrels(qrelsFile)
results = utils.readResults(resultsFile)
unjudged = {}
for qid in results.keys():
qrelIds = set([did for (did, _) in qrels[qid]])
resIds = set([d | id for (did, _, _) in results[qid][:10]])
unjudged[qid] = len(resIds - qrelIds)
print "%d -> %d" % (qid, unjudged[qid])
print "--------------------"
totalUnjudged = sum( | unjudged.values())
return float(totalUnjudged)/(len(results.keys()) * 10)
print "unjudged=%.2f" % unjudged(sys.argv[1], sys.argv[2])
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x021000
_REQUEST_MESSAGE_TYPE = 135168
# hex: 0x02 | 1001
_RESPONSE_MESSAGE_TYPE = 135169
_REQUEST_THREAD_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_TTL_OFFSET = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_REFERENCE_ID_OFFSET = _REQUEST_TTL_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_REFERENCE_ID_OFFSET + LONG_SIZE_IN_BYTES
def encode_request(name, key, thread_id, ttl, reference_id):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encod | e_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_TTL_OFFSET, ttl)
FixSizedTypesCodec.encode_long(buf, _REQUEST_REFERENCE_ID_OFFSET, reference_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, key, True)
return OutboundMessage(buf, True)
|
#
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
import github.ProjectColumn
from . import Consts
class Project(github.GithubObject.CompletableGithubObject):
"""
This class represents Projects. The reference can be found here http://developer.github.com/v3/projects
"""
def __repr__(self):
return self.get__repr__({"name": self._name.value})
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def columns_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._columns_url)
return self._columns_url.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def creator(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._creator)
return self._creator.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def node_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._node_id)
return self._node_id.value
@property
def number(self):
"""
:type: integer
"""
self._completeIfNotSet(self._number)
return self._number.value
@property
def owner_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._owner_url)
return self._owner_url.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /projects/:project_id <https://developer.github.com/v3/projects/#delete-a-project>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url, headers={"Accept": Consts.mediaTypeProjectsPreview}
)
def edit(
self,
name=github.GithubObject.NotSet,
body=github.GithubObject.NotSet,
state=github.GithubObject.NotSet,
organization_permission=github.GithubObject.NotSet,
private=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /projects/:project_id <https://developer.github.com/v3/projects/#update-a-project>`_
:param name: string
:param body: string
:param state: string
:param organization_permission: string
:param private: bool
:rtype: None
"""
assert name is github.GithubObject.NotSet or isinstance(name, str), name
assert body is github.GithubObject.NotSet or isinstance(name, str), body
assert state is github.GithubObject.NotSet or isinstance(name, str), state
assert organization_permission is github.GithubObject.NotSet or isinstance(
organization_permission, str
), organization_permission
assert private is github.GithubObject.NotSet or isinstance(
private, bool
), private
patch_parameters = dict()
if name is not github.GithubObject.NotSet:
patch_parameters["name"] = name
if body is not github.GithubObject.NotSet:
patch_parameters["body"] = body
if state is not github.GithubObject.NotSet:
patch_parameters["state"] = state
if organization_permission is not github.GithubObject.NotSet:
patch_parameters["organization_permission"] = organization_permission
if private is not github.GithubObject.NotSet:
patch_parameters["private"] = private
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=patch_parameters,
headers={"Accept": Consts.mediaTypeProjectsPreview},
)
self._useAttributes(data)
def get_columns(self):
"""
:calls: `GET /projects/:project_id/columns <https://developer.github.com/v3/projects/columns/#list-project-columns>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.ProjectColumn.ProjectColumn`
"""
return github.PaginatedList.PaginatedList(
github.ProjectColumn.ProjectColumn,
self._requester,
self.columns_url,
None,
{"Accept": Consts.mediaTypeProjectsPreview},
)
def create_column(self, name):
"""
calls: `POST /projects/:project_id/columns <https://developer.github.com/v3/projects/columns/#create-a-project-column>`_
:param name: string
"""
assert isinstance(name, str), name
post_parameters = {"name": name}
import_header = {"Accept": Consts.mediaTypeProjectsPreview}
headers, data = self._requester.requestJsonAndCheck(
"POST", self.url + "/columns", headers=import_header, input=post_parameters
)
return github.ProjectColumn.ProjectColumn(
self._requester, headers, data, completed=True
)
def _initAttributes(self):
self._body = github.GithubObject.NotSet
self._columns_url = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._creator = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._node_id = github.GithubObject.NotSet
self._number = github.GithubObject.NotSet
self._owner_url = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "columns_url" in attributes: # pragma no branch
self._columns_url = self._makeStringAttribute(attributes["columns_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "creator" in attributes: # pragma no branch
self._creator = self._makeClassAttribute(
github.NamedUser.NamedUser, attributes["creator"]
)
if "html_url" in at | tributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if " | name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAtt |
def print_15perc_total_tax(bill):
'''return the amount of money to pay in tax in US dollars
'''
bill | = float(bill)
total_tax | = str( bill * .15 )
return "please graciously pay at least the amount of {} in total tax ".format(total_tax)
print print_15perc_total_tax(79)
|
#!/usr/bin/env python
import fileinput
class NotValidIP(Exception):
pass
class NotValidIPLength(Exception):
pass
while True:
try:
ip_addr = input("Enter | a network IP address: ")
ip_addr_split = ip_addr.split('.')
len1 = len(ip_addr_split)
ip_addr_split = ip_addr_split[:3]
ip_addr_split.append('0')
i=0
for element in ip_addr_split:
ip_addr_split[i] = int(element)
i = i+1
i = 0
for element in ip_add | r_split:
if (element > 255 or element < 0):
raise NotValidIP
if (len1!=3 and len1!=4):
raise NotValidIPLength
print("The network IP address now is: %s" % ip_addr_split)
break
except ValueError:
print('Not a good value')
except NotValidIP:
print('this is not a valid IP address')
except NotValidIPLength:
print('this is not an IP address size')
print('%20s %20s %20s' % ('NETWORK_NUMBER', 'FIRST_OCTET_BINARY', 'FIRST_OCTET_HEX') )
a = '.'.join(str(q) for q in ip_addr_split)
b = bin(ip_addr_split[0])
c = hex(ip_addr_split[0])
print('%20s %20s %20s' % (a, b, c)) |
# | ##############################################################################
#
# OSIS stands for Open S | tudent Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
# ##############################################################################
import attr
from osis_common.ddd import interface
@attr.s(frozen=True, slots=True)
class EntiteUclDTO(interface.DTO):
sigle = attr.ib(type=str)
intitule = attr.ib(type=str)
|
import html
from urllib.parse import urljoin
from urllib.parse import urlparse
import wiki
from django.urls import resolve
from django.urls.exceptions import Resolver404
from markdown.extensions import Extension
from markdown.postprocessors import AndSubstitutePostprocessor
from markdown.treeprocessors import Treeprocessor
from wiki.decorators import which_article
from wiki.models import Article
from wiki.models import URLPath
class LinkTreeprocessor(Treeprocessor):
def __init__(self, md, config):
super().__init__(md)
self.md = md
self.broken_class = config["broken"]
self.internal_class = config["internal"]
self.external_class = config["external"]
@property
def my_urlpath(self):
try:
return self._my_urlpath
except AttributeError:
self._my_urlpath = self.md.article.get_absolute_url()
return self._my_urlpath
def get_class(self, el): # noqa: max-complexity 11
href = el.get("href")
if not href:
return
# The autolinker turns email links into links with many HTML entities.
# These entities are further escaped using markdown-specific codes.
# First unescape the markdown-specific, then use html.unescape.
href = AndSubstitutePostprocessor().run(href)
href = html.unescape(href)
try:
url = urlparse(href)
except ValueError:
return
if url.scheme == "mailto":
return
if url.scheme or url.netloc:
# Contains a hostname or url schema ⇒ External link
return self.external_class
# Ensure that path ends with a slash
relpath = url.path.rstrip("/") + "/"
try:
target = resolve(urljoin(self.my_urlpath, relpath))
except Resolver404:
# Broken absolute link
return self.external_class
if target.app_names != ["wiki"]:
# Links outside wiki
return self.external_class
try:
article, urlpath = w | hich_article(**targ | et.kwargs)
except (
wiki.core.exceptions.NoRootURL,
URLPath.DoesNotExist,
Article.DoesNotExist,
):
return self.broken_class
return self.internal_class
def run(self, doc):
for el in doc.iter():
if el.tag != "a":
continue
class_ = self.get_class(el)
if class_:
# Append class
classes = (el.get("class", "") + " " + class_).strip()
el.set("class", classes)
class LinkExtension(Extension):
TreeProcessorClass = LinkTreeprocessor
def __init__(self, *args, **kwargs):
self.config = {
"broken": ["wiki-broken", "CSS class to use for broken internal links"],
"internal": ["wiki-internal", "CSS class to use for internal links"],
"external": ["wiki-external", "CSS class to use for external links"],
}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md):
md.registerExtension(self)
self.md = md
ext = self.TreeProcessorClass(md, self.getConfigs())
md.treeprocessors.add("redlinks", ext, ">inline")
def makeExtension(*args, **kwargs):
"""Return an instance of the extension."""
return LinkExtension(*args, **kwargs)
|
true"),
("Access-Control-Allow-Methods", "POST, GET, OPTIONS"),
("Access-Control-Max-Age", 3600),
]
self.handler = handler
self.config = config
def write(self, data=""):
# Gevent v 0.13
if hasattr(self.handler, 'response_headers_list'):
if 'Content-Length' not in self.handler.response_headers_list:
self.handler.response_headers.append(('Content-Length', len(data)))
self.handler.response_headers_list.append('Content-Length')
elif not hasattr(self.handler, 'provided_content_length'):
# Gevent 1.0bX
l = len(data)
self.handler.provided_content_length = l
self.handler.response_headers.append(('Content-Length', l))
self.handler.write(data)
def start_response(self, status, headers, **kwargs):
if "Content-Type" not in [x[0] for x in headers]:
headers.append(self.content_type)
headers.extend(self.headers)
self.handler.start_response(status, headers, **kwargs)
class XHRPollingTransport(BaseTransport):
def __init__(self, *args, **kwargs):
super(XHRPollingTransport, self).__init__(*args, **kwargs)
def options(self):
self.start_response("200 OK", ())
self.write()
return []
def get(self, socket):
socket.heartbeat()
heartbeat_interval = self.config['heartbeat_interval']
payload = self.get_messages_payload(socket, timeout=heartbeat_interval)
if not payload:
payload = "8::" # NOOP
self.start_response("200 OK", [])
self.write(payload)
def _request_body(self):
return self.handler.wsgi_input.readline()
def post(self, socket):
for message in self.decode_payload(self._request_body()):
socket.put_server_msg(message)
self.start_response("200 OK", [
("Connection", "close"),
("Content-Type", "text/plain")
])
self.write("1")
def get_messages_payload(self, socket, timeout=None):
"""This will fetch the messages from the Socket's queue, and if
there are many messes, pack multiple messages in one payload and return
"""
try:
msgs = socket.get_multiple_client_msgs(timeout=timeout)
data = self.encode_payload(msgs)
except Empty:
data = ""
return data
def encode_payload(self, messages):
"""Encode list of messages. Expects messages to be unicode.
``messages`` - List of raw messages to encode, if necessary
"""
if not messages or messages[0] is None:
return ''
if len(messages) == 1:
return messages[0].encode('utf-8')
payload = u''.join([(u'\ufffd%d\ufffd%s' % (len(p), p))
for p in messages if p is not None])
# FIXME: why is it so that we must filter None from here ? How
# is it even possible that a None gets in there ?
return payload.encode('utf-8')
def decode_payload(self, payload):
"""This function can extract multiple messages from one HTTP payload.
Some times, the XHR/JSONP/.. transports can pack more than one message
on a single packet. They are encoding following the WebSocket
semantics, which need to be reproduced here to unwrap the messages.
The semantics are:
\ufffd + [length as a string] + \ufffd + [payload as a unicode string]
This function returns a list of messages, even though there is only
one.
Inspired by socket.io/lib/transports/http.js
"""
payload = payload.decode('utf-8')
if payload[0] == u"\ufffd":
ret = []
while len(payload) != 0:
len_end = payload.find(u"\ufffd", 1)
length = int(payload[1:len_end])
msg_start = len_end + 1
msg_end = length + msg_start
message = payload[msg_start:msg_end]
ret.append(message)
payload = payload[msg_end:]
return ret
return [pay | load]
def do_exchange(self, socket, request_method):
if not socket.connection_established:
# Runs only the first time we get a Socket opening
self.start_response("200 OK", [
("Connection", "close"),
])
self.write("1::") # 'connect' packet
return
elif request_method in ("GET", "POST", "OPTIONS"):
return getattr(self, request_method.lower())(socket)
| else:
raise Exception("No support for the method: " + request_method)
class JSONPolling(XHRPollingTransport):
def __init__(self, handler, config):
super(JSONPolling, self).__init__(handler, config)
self.content_type = ("Content-Type", "text/javascript; charset=UTF-8")
def _request_body(self):
data = super(JSONPolling, self)._request_body()
# resolve %20%3F's, take out wrapping d="...", etc..
data = urllib.unquote_plus(data)[3:-1] \
.replace(r'\"', '"') \
.replace(r"\\", "\\")
# For some reason, in case of multiple messages passed in one
# query, IE7 sends it escaped, not utf-8 encoded. This dirty
# hack handled it
if data[0] == "\\":
data = data.decode("unicode_escape").encode("utf-8")
return data
def write(self, data):
"""Just quote out stuff before sending it out"""
args = urlparse.parse_qs(self.handler.environ.get("QUERY_STRING"))
if "i" in args:
i = args["i"]
else:
i = "0"
# TODO: don't we need to quote this data in here ?
super(JSONPolling, self).write("io.j[%s]('%s');" % (i, data))
class XHRMultipartTransport(XHRPollingTransport):
def __init__(self, handler):
super(JSONPolling, self).__init__(handler)
self.content_type = (
"Content-Type",
"multipart/x-mixed-replace;boundary=\"socketio\""
)
def do_exchange(self, socket, request_method):
if request_method == "GET":
return self.get(socket)
elif request_method == "POST":
return self.post(socket)
else:
raise Exception("No support for such method: " + request_method)
def get(self, socket):
header = "Content-Type: text/plain; charset=UTF-8\r\n\r\n"
self.start_response("200 OK", [("Connection", "keep-alive")])
self.write_multipart("--socketio\r\n")
self.write_multipart(header)
self.write_multipart(str(socket.sessid) + "\r\n")
self.write_multipart("--socketio\r\n")
def chunk():
while True:
payload = self.get_messages_payload(socket)
if not payload:
# That would mean the call to Queue.get() returned Empty,
# so it was in fact killed, since we pass no timeout=..
return
# See below
else:
try:
self.write_multipart(header)
self.write_multipart(payload)
self.write_multipart("--socketio\r\n")
except socket.error:
# The client might try to reconnect, even with a socket
# error, so let's just let it go, and not kill the
# socket completely. Other processes will ensure
# we kill everything if the user expires the timeouts.
#
# WARN: this means that this payload is LOST, unless we
# decide to re-inject it into the queue.
return
socket.spawn(chunk)
class WebsocketTransport(BaseTransport):
def do_exchange(self, socket, request_method):
websocket = self.handler.environ['wsgi.websocket']
websocket.send("1::") # 'connect' packe |
"""
Django settings for djangoecommerce project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qg(sw)grt&2v+++odrz%zac+h*2f@gyd*szcov1u2x$=ul%svz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# libs
'widget_tweaks',
# apps
'core',
'accounts',
'catalog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'core.middleware.LogMiddleware',
]
ROOT_URLCONF = 'djangoecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# apps
'catalog.context_processors.categories',
],
},
},
]
WSGI_APPLICATION = 'djangoecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static | files')
# auth
LOGIN_REDIRECT_URL = 'accounts:index'
LOGIN_URL = 'login'
AUTH_USER_MODEL = 'accounts.User'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'accounts.backends.ModelBackend',
]
# Messages
from django.contrib.messages import constants as message_constants
MESSAGE_TAGS = {
message_constants.DEBUG: 'debug',
message_constants.INFO: 'info',
messag | e_constants.SUCCESS: 'success',
message_constants.WARNING: 'warning',
message_constants.ERROR: 'danger',
}
try:
from .local_settings import *
except ImportError:
pass
|
from django.contrib.auth import get_user_model
from django.db import transaction
from django.db.models.signals import pre_delete
from django.dispatch import Signal, receiver
from misago.categories.models import Category
from misago.categories.signals import delete_category_content, move_category_content
from misago.core.pgutils import batch_delete, batch_update
from misago.users.signals import delete_user_content, username_changed
from .models import Attachment, Post, PostEdit, PostLike, Thread, Poll, PollVote
delete_post = Signal()
delete_thread = Signal()
merge_post = Signal(providing_args=["other_post"])
merge_thread = Signal(providing_args=["other_thread"])
move_post = Signal()
move_thread = Signal()
remove_thread_par | ticipant = Signal(providing_args=["user"])
"""
Signal handlers
"""
@receiver(merge_thread)
def merge_threads_posts(sender, **kwargs):
other | _thread = kwargs['other_thread']
other_thread.post_set.update(category=sender.category, thread=sender)
@receiver(merge_post)
def merge_posts(sender, **kwargs):
other_post = kwargs['other_post']
for user in sender.mentions.iterator():
other_post.mentions.add(user)
@receiver(move_thread)
def move_thread_content(sender, **kwargs):
Post.objects.filter(thread=sender).update(category=sender.category)
PostEdit.objects.filter(thread=sender).update(category=sender.category)
PostLike.objects.filter(thread=sender).update(category=sender.category)
Poll.objects.filter(thread=sender).update(category=sender.category)
PollVote.objects.filter(thread=sender).update(category=sender.category)
@receiver(delete_category_content)
def delete_category_threads(sender, **kwargs):
sender.thread_set.all().delete()
sender.post_set.all().delete()
@receiver(move_category_content)
def move_category_threads(sender, **kwargs):
new_category = kwargs['new_category']
Thread.objects.filter(category=sender).update(category=new_category)
Post.objects.filter(category=sender).update(category=new_category)
PostEdit.objects.filter(category=sender).update(category=new_category)
PostLike.objects.filter(category=sender).update(category=new_category)
Poll.objects.filter(category=sender).update(category=new_category)
PollVote.objects.filter(category=sender).update(category=new_category)
@receiver(delete_user_content)
def delete_user_threads(sender, **kwargs):
recount_categories = set()
recount_threads = set()
for thread in batch_delete(sender.thread_set.all(), 50):
recount_categories.add(thread.category_id)
with transaction.atomic():
thread.delete()
for post in batch_delete(sender.post_set.all(), 50):
recount_categories.add(post.category_id)
recount_threads.add(post.thread_id)
with transaction.atomic():
post.delete()
if recount_threads:
changed_threads_qs = Thread.objects.filter(id__in=recount_threads)
for thread in batch_update(changed_threads_qs, 50):
thread.synchronize()
thread.save()
if recount_categories:
for category in Category.objects.filter(id__in=recount_categories):
category.synchronize()
category.save()
@receiver(username_changed)
def update_usernames(sender, **kwargs):
Thread.objects.filter(starter=sender).update(
starter_name=sender.username,
starter_slug=sender.slug
)
Thread.objects.filter(last_poster=sender).update(
last_poster_name=sender.username,
last_poster_slug=sender.slug
)
Post.objects.filter(poster=sender).update(poster_name=sender.username)
Post.objects.filter(last_editor=sender).update(
last_editor_name=sender.username,
last_editor_slug=sender.slug
)
PostEdit.objects.filter(editor=sender).update(
editor_name=sender.username,
editor_slug=sender.slug
)
PostLike.objects.filter(user=sender).update(
user_name=sender.username,
user_slug=sender.slug
)
Attachment.objects.filter(uploader=sender).update(
uploader_name=sender.username,
uploader_slug=sender.slug
)
Poll.objects.filter(poster=sender).update(
poster_name=sender.username,
poster_slug=sender.slug
)
PollVote.objects.filter(voter=sender).update(
voter_name=sender.username,
voter_slug=sender.slug
)
@receiver(pre_delete, sender=get_user_model())
def remove_unparticipated_private_threads(sender, **kwargs):
threads_qs = kwargs['instance'].private_thread_set.all()
for thread in batch_update(threads_qs, 50):
if thread.participants.count() == 1:
with transaction.atomic():
thread.delete()
|
from django.core import exceptions
from devserver.logger import GenericLogger
MODULES = []
def load_modules():
global MODULES
MODULES = []
from devserver import settings
for path in settings.DEVSERVER_MODULES:
| try:
name, class_name = path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured, '%s isn\'t a devserver module' % path
try:
module = __import__(name, {}, {}, [''])
except ImportError, e:
raise exceptions.ImproperlyConfigured, 'Error importing devserver module %s: "%s"' % (name, e) |
try:
cls = getattr(module, class_name)
except AttributeError:
raise exceptions.ImproperlyConfigured, 'Error importing devserver module "%s" does not define a "%s" class' % (name, class_name)
try:
instance = cls(GenericLogger(cls))
except:
raise # Bubble up problem loading panel
MODULES.append(instance)
if not MODULES:
load_modules()
|
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program | . If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from geonode.catalogue.backends.generic import CatalogueBackend \
as GenericCatalogueBackend
class CatalogueBackend(GenericCatalogueBackend):
"""GeoNetwork CSW Backend"""
d | ef __init__(self, *args, **kwargs):
super(CatalogueBackend, self).__init__(*args, **kwargs)
self.catalogue.formats = ['Dublin Core', 'ISO']
|
less.lib.config.get()
MAX_VARIABLE_REPR = 250
MAX_LOCALS = 100
NUM_FRAMES_TO_SAVE = 20
HOSTPORT_INFO = list()
SCRUBBED_VARIABLES_REGEX = None
CACHE_ERRORS_AFTER_N_OCCURRENCES = 10
REPORT_AFTER_N_MILLIS = 10 * 60 * 1000 # 10 minutes
LRU_CACHE_SIZE = 200
ERROR_CACHE = ExpiringLRUCache(size=LRU_CACHE_SIZE)
class HostportInfo(object):
def __init__(self, hostport):
host, port = hostport.split(":")
self.host = host
self.port = int(port)
self.backoff_ms = 0
self.consecutive_connection_errors = 0
def increment_backoff(self):
self.consecutive_connection_errors = max(12, self.consecutive_connection_errors + 1)
backoff = 1000 * random.randint(1, 2 ** self.consecutive_connection_errors)
self.backoff_ms = _get_epoch_ms() + backoff
def decrement_backoff(self):
self.consecutive_connection_errors = int(self.consecutive_connection_errors / 2)
if self.consecutive_connection_errors > 0:
backoff = 1000 * random.randint(1, 2 ** self.consecutive_connection_errors)
self.backoff_ms = _get_epoch_ms() + backoff
class CachedErrorInfo(object):
def __init__(self):
self.last_report_ts = _get_epoch_ms()
self.last_occur_ts = _get_epoch_ms()
self.curr_count = 0
self.last_report_count = 0
@classmethod
def get_hash_key(cls, stack_lines):
m = hashlib.md5()
for line in stack_lines:
m.update(line.filename.encode('utf8'))
m.update(str(line.line_number).encode('utf8'))
return m.digest()
def increment(self):
self.last_occur_ts = _get_epoch_ms()
self.curr_count += 1
def mark_reported(self):
self.last_report_ts = _get_epoch_ms()
diff = self.curr_count - self.last_report_count
self.last_report_count = self.curr_count
return diff
def should_report(self):
report_conditions = list()
report_conditions.append(self.curr_count <= CACHE_ERRORS_AFTER_N_OCCURRENCES)
report_conditions.append(self.last_report_ts < (_get_epoch_ms() - REPORT_AFTER_N_MILLIS))
log_count = math.log(self.curr_count, 2)
report_conditions.append(int(log_count) == log_count)
return any(report_conditions)
def _get_epoch_ms():
return int(time.time() * 1000)
def set_hostports(hostports):
if type(hostports) not in [tuple, list]:
raise ValueError("hostports must be a list or tuple")
global HOSTPORT_INFO
HOSTPORT_INFO = [HostportInfo(hp) for hp in hostports]
def install_scrubbers(variables_regex):
global SCRUBBED_VARIABLES_REGEX
SCRUBBED_VARIABLES_REGEX = re.compile(variables_regex)
def _get_backend_host():
if config.flawless_hostports and not HOSTPORT_INFO:
set_hostports(config.flawless_hostports)
return random.choice(HOSTPORT_INFO) if HOSTPORT_INFO else None
def _get_service():
hostport_info = _get_backend_host()
if not hostport_info:
warnings.warn("Unable to record error: flawless server hostport not set", RuntimeWarning)
return None, None, None
tsocket = TSocket.TSocket(hostport_info.host, hostport_info.port)
tsocket.setTimeout(2000) # 2 second timeout
transport = TTransport.TFramedTransport(tsocket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Flawless.Client(protocol)
return client, transport, hostport_info
def _send_request(req):
# Try to send the request. If there are too many connection errors, then backoff
client, transport, hostport_info = _get_service()
try:
if all([client, transport, hostport_info]) and _get_epoch_ms() >= hostport_info.backoff_ms:
transport.open()
client.record_error(req)
hostport_info.decrement_backoff()
except TException as e:
hostport_info.increment_backoff()
log.exception(e)
finally:
if transport and transport.isOpen():
transport.close()
def _myrepr(var_name, value):
try:
if SCRUBBED_VARIABLES_REGEX and SCRUBBED_VARIABLES_REGEX.match(var_name):
return '**scrubbed**'
repr_str = repr(value)
return repr_str[:MAX_VARIABLE_REPR] + "..." * int(len(repr_str) > MAX_VARIABLE_REPR)
except:
return "Exception executing repr for this field"
def record_error(hostname, exc_info, preceding_stack=None, error_threshold=None, additional_info=None):
''' Helper function to record errors to the flawless backend '''
stack = []
exc_type, exc_value, sys_traceback = exc_info
while sys_traceback is not None:
stack.append(sys_traceback)
sys_traceback = sys_traceback.tb_next
stack_lines = []
for row in preceding_stack or []:
stack_lines.append(
api_ttypes.StackLine(filename=os.path.abspath(row[0]), line_number=row[1],
function_name=row[2], text=row[3])
)
for index, tb in enumerate(stack):
filename = tb.tb_frame.f_code.co_filename
func_name = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno
line = linecache.getline(filename, lineno, tb.tb_frame.f_globals)
frame_locals = None
if index >= (len(stack) - NUM_FRAMES_TO_SAVE):
# Include some limits on max string length & number of variables to keep things from getting
# out of hand
frame_locals = dict((k, _myrepr(k, v)) for k, v in
list(tb.tb_frame.f_locals.items())[:MAX_LOCALS] if k != "self")
if "self" in tb.tb_frame.f_locals and hasattr(tb.tb_frame.f_locals["self"], "__dict__"):
frame_locals.update(dict(("self." + k, _myrepr(k, v)) for k, v in
list(tb.tb_frame.f_locals["self"].__dict__.items())[:MAX_LOCALS]
if k != "self"))
stack_lines.append(
api_ttypes.StackLine(filename=os.path.abspath(filename), line_number=lineno,
function_name=func_name, text=line, frame_locals=frame_locals)
)
# Check LRU cache & potentially do not send error report if this client has already reported this error
# several times.
key = CachedErrorInfo.get_hash_key(stack_lines)
info = ERROR_CACHE.get(key) or CachedErrorInfo()
info.increment()
ERROR_CACHE[key] = info
if info.should_report():
error_count = info.mark_reported()
_send_request(
api_ttypes.RecordErrorRequest(
traceback=stack_lines,
exception_message=repr(exc_value),
exception_type=exc_type.__module__ + "." + exc_type.__name__,
hostname=hostname,
error_threshold=error_threshold,
additional_info=additional_info,
error_count=error_count,
)
)
def _safe_wrap(func):
safe_attrs = [attr for attr in functools.WRAPPER_ASSIGNMENTS if hasattr(func, attr)]
return functools.wraps(func, safe_attrs)
def _wrap_function_with_error_decorator(func,
save_current_stack_trace=True,
reraise_exception=True,
error_threshold=None):
preceding_stack = []
if save_current_stack_trace:
preceding_stack = traceback.extract_stack()
@_safe_wrap(func)
def wrapped_func_with_error_reporting(*args, **kwargs):
if not _get_backend_host():
warnings.warn("flawless server hostport not set", RuntimeWarning, stacklevel=2)
try:
return func(*args, **kwargs)
except:
type, value, sys_traceback = sys.exc_info()
# Check to try and prevent multiple reports of the same exception
if hasattr(value, "_flawless_already_caught"):
if reraise_exc | eption:
raise_(value, None, sys_traceback)
else:
return
# Get trackback & report it
hostname = socket.gethost | name()
record_ |
# -*- coding: utf-8 -*-
#
# PressMess processors.py.
# This file contains template processor functions.
# Copyright (C) 2013 Michał Kwiatkowski <michaelflowersky at gmail dot com>
# This file is released under the BSD license, see the COPYING file
from django.conf import settings
def disqus(request):
"""
Hands over DISQUS shortname to template context.
"""
return {'disqus_shortname': settings.DISQUS_SHORTNAME}
def pressmess(request):
"""
Hands over PRESSMESS_TITLE, PRESSMESS_URL
and PRESSMESS_HEADER to template context.
"""
return {
'press | mess_title': settings.PRESSMESS_TITLE,
'pressmess_header': settings.PRESSMESS_HEADER,
'pressmess_url': settings.PRESSMESS_URL,
}
d | ef meta(request):
"""
Hands over:
META_AUTHOR, META_DESCRIPTION, META_LANGUAGE and META_KEYWORDS
to template context.
"""
return {
'meta_author': settings.META_AUTHOR,
'meta_description_index': settings.META_DESCRIPTION_INDEX,
'meta_description_tags': settings.META_DESCRIPTION_TAGS,
'meta_language': settings.META_LANGUAGE,
'meta_keywords': settings.META_KEYWORDS,
} |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
sel | f._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
se | lf._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
from . | import choose_deli | very_package
|
#!/usr/bin/env python
import os
import setuptools
def read_requires(bas | e):
path = os.path.join('tools', base)
requires = []
if not os.path.isfile(path):
return requires
with open(path, 'rb') as h:
for line in h.read.splitlines():
line = line.strip()
if len(line) == 0 or line.startswith("#"):
continue
requires.append(line)
return requires
setuptools.setup(
| name='taskflow',
version='0.0.1',
author='OpenStack',
license='Apache Software License',
description='Taskflow structured state management library.',
long_description='The taskflow library provides core functionality that '
'can be used to build [resumable, reliable, '
'easily understandable, ...] highly available '
'systems which process workflows in a structured manner.',
author_email='openstack-dev@lists.openstack.org',
url='http://www.openstack.org/',
tests_require=read_requires('test-requires'),
install_requires=read_requires('pip-requires'),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6', ],
)
|
# This file is distributed under the same license as the Dja | ngo package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'g.i.A'
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use | the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import logging
import os
import pkg_resources
import sys
import ConfigParser
from paste.deploy import loadapp, loadwsgi
SERVER = loadwsgi.SERVER
from gunicorn.app.base import Application
from gunicorn.config import Config
class PasterBaseApplication(Application):
def app_config(self):
cx = loadwsgi.loadcontext(SERVER, self.cfgurl, relative_to=self.relpath)
gc, lc = cx.global_conf.copy(), cx.local_conf.copy()
cfg = {}
host, port = lc.pop('host', ''), lc.pop('port', '')
if host and port:
cfg['bind'] = '%s:%s' % (host, port)
elif host:
cfg['bind'] = host
cfg['workers'] = int(lc.get('workers', 1))
cfg['umask'] = int(lc.get('umask', 0))
cfg['default_proc_name'] = gc.get('__file__')
for k, v in gc.items():
if k not in self.cfg.settings:
continue
cfg[k] = v
for k, v in lc.items():
if k not in self.cfg.settings:
continue
cfg[k] = v
return cfg
def configure_logging(self):
if hasattr(self, "cfgfname"):
self.logger = logging.getLogger('gunicorn')
# from paste.script.command
parser = ConfigParser.ConfigParser()
parser.read([self.cfgfname])
if parser.has_section('loggers'):
if sys.version_info >= (2, 6):
from logging.config import fileConfig
else:
# Use our custom fileConfig -- 2.5.1's with a custom Formatter class
# and less strict whitespace (which were incorporated into 2.6's)
from gunicorn.logging_config import fileConfig
config_file = os.path.abspath(self.cfgfname)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
return
super(PasterBaseApplication, self).configure_logging()
class PasterApplication(PasterBaseApplication):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application name specified.")
cfgfname = os.path.normpath(os.path.join(os.getcwd(), args[0]))
cfgfname = os.path.abspath(cfgfname)
if not os.path.exists(cfgfname):
parser.error("Config file not found: %s" % cfgfname)
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
sys.path.insert(0, self.relpath)
pkg_resources.working_set.add_entry(self.relpath)
return self.app_config()
def load(self):
return loadapp(self.cfgurl, relative_to=self.relpath)
class PasterServerApplication(PasterBaseApplication):
def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
self.cfg = Config()
self.app = app
self.callable = None
gcfg = gcfg or {}
cfgfname = gcfg.get("__file__")
if cfgfname is not None:
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
cfg = kwargs.copy()
if port and not host.startswith("unix:"):
bind = "%s:%s" % (host, port)
else:
bind = host
cfg["bind"] = bind
if gcfg:
for k, v in gcfg.items():
cfg[k] = v
cfg["default_proc_name"] = cfg['__file__']
try:
for k, v in cfg.items():
if k.lower() in self.cfg.settings and v is not None:
self.cfg.set(k.lower(), v)
except Exception, e | :
sys.stderr.write("\nConfig error: %s\n" % str(e))
sys.stderr.flush()
sys.exit(1)
self.configure_logging()
def | load_config(self):
if not hasattr(self, "cfgfname"):
return
cfg = self.app_config()
for k,v in cfg.items():
try:
self.cfg.set(k.lower(), v)
except:
sys.stderr.write("Invalid value for %s: %s\n\n" % (k, v))
raise
def load(self):
if hasattr(self, "cfgfname"):
return loadapp(self.cfgurl, relative_to=self.relpath)
return self.app
def run():
"""\
The ``gunicorn_paster`` command for launcing Paster compatible
apllications like Pylons or Turbogears2
"""
from gunicorn.app.pasterapp import PasterApplication
PasterApplication("%prog [OPTIONS] pasteconfig.ini").run()
def paste_server(app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
"""\
A paster server.
Then entry point in your paster ini file should looks like this:
[server:main]
use = egg:gunicorn#main
host = 127.0.0.1
port = 5000
"""
from gunicorn.app.pasterapp import PasterServerApplication
PasterServerApplication(app, gcfg=gcfg, host=host, port=port, *args, **kwargs).run()
|
""" Tests for library reindex command """
import ddt
from django.core.management import call_command, CommandError
import mock
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from common.test.utils import nostderr
from xmodule.modulestore.tests.factories import CourseFactory, LibraryFactory
from opaque_keys import InvalidKeyError
from contentstore.management.commands.reindex_library import Command as ReindexCommand
from contentstore.courseware_index import SearchIndexingError
@ddt.ddt
class TestReindexLibrary(ModuleStoreTestCase):
""" Tests for library reindex command """
def setUp(self):
""" Setup method - create libraries and courses """
super(TestReindexLibrary, self).setUp()
self.store = modulestore()
self.first_lib = LibraryFactory.create(
org="test", library="lib1", display_name="run1", default_store=ModuleStoreEnum.Type.split
)
self.second_lib = LibraryFactory.create(
org="test", library="lib2", display_name="run2", default_store=ModuleStoreEnum.Type.split
)
self.first_course = CourseFactory.create(
org="test", course="course1", display_name="run1", default_store=ModuleStoreEnum.Type.split
)
self.second_course = CourseFactory.create(
org="test", course="course2", display_name="run1", default_store=ModuleStoreEnum.Type.split
)
REINDEX_PATH_LOCATION = 'contentstore.management.commands.reindex_library.LibrarySearchIndexer.do_library_reindex'
MODULESTORE_PATCH_LOCATION = 'contentstore.management.commands.reindex_library.modulestore'
YESNO_PATCH_LOCATION = 'contentstore.management.commands.reindex_library.query_yes_no'
def _get_lib_key(self, library):
""" Get's library key as it is passed to indexer """
return library.location.library_key
def _build_calls(self, *libraries):
""" BUilds a list of mock.call instances representing calls to reindexing method """
return [mock.call(self.store, self._get_lib_key(lib)) for lib in libraries]
def test_given_no_arguments_raises_command_error(self):
| """ Test that raises CommandError for incorrect arguments """
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* requires one or more arguments .*"):
call_command('reindex_library')
@ddt.data('qwerty', 'invalid_key', 'xblock-v1:qwe+rty')
def test_given_invalid_lib_key_raises_not_found(self, invalid | _key):
""" Test that raises InvalidKeyError for invalid keys """
with self.assertRaises(InvalidKeyError):
call_command('reindex_library', invalid_key)
def test_given_course_key_raises_command_error(self):
""" Test that raises CommandError if course key is passed """
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* is not a library key"):
call_command('reindex_library', unicode(self.first_course.id))
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* is not a library key"):
call_command('reindex_library', unicode(self.second_course.id))
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* is not a library key"):
call_command(
'reindex_library',
unicode(self.second_course.id),
unicode(self._get_lib_key(self.first_lib))
)
def test_given_id_list_indexes_libraries(self):
""" Test that reindexes libraries when given single library key or a list of library keys """
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_library', unicode(self._get_lib_key(self.first_lib)))
self.assertEqual(patched_index.mock_calls, self._build_calls(self.first_lib))
patched_index.reset_mock()
call_command('reindex_library', unicode(self._get_lib_key(self.second_lib)))
self.assertEqual(patched_index.mock_calls, self._build_calls(self.second_lib))
patched_index.reset_mock()
call_command(
'reindex_library',
unicode(self._get_lib_key(self.first_lib)),
unicode(self._get_lib_key(self.second_lib))
)
expected_calls = self._build_calls(self.first_lib, self.second_lib)
self.assertEqual(patched_index.mock_calls, expected_calls)
def test_given_all_key_prompts_and_reindexes_all_libraries(self):
""" Test that reindexes all libraries when --all key is given and confirmed """
with mock.patch(self.YESNO_PATCH_LOCATION) as patched_yes_no:
patched_yes_no.return_value = True
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_library', all=True)
patched_yes_no.assert_called_once_with(ReindexCommand.CONFIRMATION_PROMPT, default='no')
expected_calls = self._build_calls(self.first_lib, self.second_lib)
self.assertItemsEqual(patched_index.mock_calls, expected_calls)
def test_given_all_key_prompts_and_reindexes_all_libraries_cancelled(self):
""" Test that does not reindex anything when --all key is given and cancelled """
with mock.patch(self.YESNO_PATCH_LOCATION) as patched_yes_no:
patched_yes_no.return_value = False
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_library', all=True)
patched_yes_no.assert_called_once_with(ReindexCommand.CONFIRMATION_PROMPT, default='no')
patched_index.assert_not_called()
def test_fail_fast_if_reindex_fails(self):
""" Test that fails on first reindexing exception """
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index:
patched_index.side_effect = SearchIndexingError("message", [])
with self.assertRaises(SearchIndexingError):
call_command('reindex_library', unicode(self._get_lib_key(self.second_lib)))
|
# -*- coding: utf-8 -*-
#
# two_neurons.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the ho | pe that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU | General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
This is a simple example of two hh_psc_alpha_gap neurons connected
by a gap-junction. Please note that gap junctions are two-way connections:
In order to create an accurate gap-junction connection between two
neurons i and j two connections are required.
"""
import nest
import pylab
import numpy
nest.ResetKernel()
nest.SetKernelStatus({'resolution': 0.05})
nest.SetKernelStatus({'max_num_prelim_iterations': 15, 'prelim_interpolation_order': 3, 'prelim_tol': 0.0001})
neuron = nest.Create('hh_psc_alpha_gap',2)
vm = nest.Create('voltmeter', params={ "to_file": False, 'withgid': True, 'withtime': True, 'interval': 0.1})
nest.SetStatus(neuron, {'I_e': 100.})
nest.SetStatus([neuron[0]], {'V_m': -10.})
nest.Connect(vm, neuron, 'all_to_all')
"""
Use 'all_to_all' to connect neurons.
This is equivalent to:
nest.Connect([neuron[0]],[neuron[1]], 'one_to_one', syn_spec={'model': 'gap_junction', 'weight': 0.5})
nest.Connect([neuron[1]],[neuron[0]], 'one_to_one', syn_spec={'model': 'gap_junction', 'weight': 0.5})
"""
nest.Connect(neuron,neuron, 'all_to_all', syn_spec={'model': 'gap_junction', 'weight': 0.5})
nest.Simulate(351.)
senders_vm = nest.GetStatus(vm, 'events')[0]['senders']
times_vm = nest.GetStatus(vm, 'events')[0]['times']
V_vm = nest.GetStatus(vm, 'events')[0]['V_m']
V = [[] for i in range(2)]
times = [[] for i in range(2)]
for i in range(len(senders_vm)):
V[senders_vm[i]-1].append(V_vm[i])
times[senders_vm[i]-1].append(times_vm[i])
V = numpy.array(V)
times = numpy.array(times)
pylab.figure(1)
pylab.plot(times[0,:],V[0,:],'r-')
pylab.plot(times[0,:],V[1,:],'g-')
pylab.xlabel('time (ms)')
pylab.ylabel('membrane potential (mV)')
pylab.show() |
from tkinter import *
from PIL import Image, ImageTk
from mandelbrot import *
from julia_set import *
class App(object):
def __init__(self, master):
# CANVAS
self.ulx, self.uly, self.drx, self.dry, self.def_width = default_settings()[
:5]
self.image = ImageTk.PhotoImage(make_fractal(*default_settings() | ))
self.canvas = Canvas(master, width=self.image.width(),
height=self.image.height())
self.canvas.grid(column=2, row=1)
self.canvas.create_image(0, 0, image=self.image, anchor=NW)
self.canvas.bind('<Butto | nPress-1>', self.press)
self.canvas.bind('<ButtonRelease-1>', self.release)
self.canvas.bind('<B1-Motion>', self.motion)
# ITERATIONS
self.iterval = IntVar(value=50)
self.iterslider = Scale(master, from_=0, to=2000, variable=self.iterval,
orient=HORIZONTAL, length=250)
self.iterslider.grid(row=1, column=1)
self.iterslider.bind('<ButtonRelease-1>', self.update_image)
def press(self, event):
self.sx, self.sy = event.x, event.y
def release(self, event):
self.ex, self.ey = event.x, event.y
if self.ex == self.sx or self.ey == self.sy:
return
self.sx, self.ex = sorted([self.ex, self.sx])
self.sy, self.ey = sorted([self.ey, self.sy])
sysw = self.drx - self.ulx
sysh = self.uly - self.dry
imw, imh = self.image.width(), self.image.height()
oldx, oldy = self.ulx, self.dry
self.ulx = oldx + self.sx/imw*sysw
self.uly = oldy + self.ey/imh*sysh
self.drx = oldx + self.ex/imw*sysw
self.dry = oldy + self.sy/imh*sysh
self.update_image()
def motion(self, event):
if self.sx == -1:
return
ex, ey = event.x, event.y
try:
self.canvas.delete(self.rect)
except:
pass
finally:
self.rect = self.canvas.create_rectangle((self.sx, self.sy, ex, ey), fill='',
outline='white')
def update_image(self, *args):
img = make_fractal(self.ulx, self.uly, self.drx, self.dry, self.def_width,
self.iterval.get())
self.image = ImageTk.PhotoImage(img)
self.canvas.config(width=self.image.width(),
height=self.image.height())
self.canvas.create_image(0, 0, image=self.image, anchor=NW)
root = Tk()
root.wm_title("Fractal Explorer")
app = App(root)
root.mainloop()
|
from jabbapylib.distance.dist import lev_dist, ham_dist, similarity
def test_lev_ | dist():
assert lev_dist('ag-tcc', 'cgctca') == 3
assert lev_dist('GUMBO', 'GAMBOL') == 2
assert lev_dist('Google', 'Yahoo!') == 6
def test_ham_dist():
assert ham_dist('toned', 'roses') == 3
def test_similarity():
assert similarity('t | oned', 'roses') == 2
|
rick.org... (opción 1)')
else:
print('Recuerda que si tienes conocimientos de "SQLite" puedes abrir dicho archivo para "jugar" con tus datos xDDD')
print('Por ejemplo, con la aplicacion gratuita "DB Browser", la puedes encontrar aqui: http://sqlitebrowser.org/ \n')
print('Por otro lado, solo comentar que '+Fore.RED+Back.WHITE+Style.BRIGHT+'NO'+Style.RESET_ALL+' se envian tu claves personales CHPP.')
print('Estas claves se encuentran a salvo en otro archivo (auth.sqlite) y no se enviaran\n')
print('Enviamos pues '+Fore.GREEN+'"bigdata.sqlite"'+Style.RESET_ALL+' al servidor (s/n)?')
seguir = input('(por defecto n) >> ')
if seguir == 's' or seguir == 'S':
send.enviar_datos(basedatos, user)
print(Back.GREEN + Fore.BLACK + 'Envio completado con éxito!!' + Style.RESET_ALL)
print(Fore.GREEN+'Muchas Gracias por participar!')
else:
print('\nOk, pues mejor en otro momento..')
elif opcion == '3':
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat('bigdata.sqlite')
size = size/1024
conn = sqlite3.connect(basedatos)
cur = conn.cursor()
cur.execute('SELECT count(MatchID) FROM partidos')
numpartidos = cur.fetchone()[0]
cur.execute('SELECT count(SubPorteria) FROM eventos')
numeventos = cur.fetchone()[0]
cur.execute('SELECT count(PlayerID) FROM jugadores')
numjugadores = cur.fetchone()[0]
cur.execute('SELECT count(MatchID) FROM lesiones')
numlesiones = cur.fetchone()[0]
cur.execute('SELECT count(MatchID) FROM sustituciones')
numsus = cur.fetchone()[0]
cur.execute('SELECT count(MatchID) FROM tarjetas')
numtarjetas = cur.fetchone()[0]
cur.execute( 'SELECT max(MatchDate) FROM partidos')
fechamax = cur.fetchone()[0]
cur.close()
if numpartidos == 0:
fechamax = 'Ningun partido en la base'
print('\nLa base de partidos tiene un tamaño de',Back.BLACK+Fore.GREEN+str(size),'KB y contiene:')
print(Back.BLACK+Fore.GREEN+str(numpartidos),' partidos, el mas reciente de: '+Back.BLACK+Fore.GREEN+fechamax)
print(Back.BLACK+Fore.GREEN+str(numeventos),' eventos especiales (con las habilidades de tus jugadores implicados, no las rivales)')
print(Back.BLACK+Fore.GREEN+str(numjugadores),' jugadores (su especialidad y caracter)')
print(Back.BLACK+Fore.GREEN+str(numlesiones),' lesiones')
print(Back.BLACK+Fore.GREEN+str(numsus),' sustituciones')
print(Back.BLACK+Fore.GREEN+str(numtarjetas),' tarjetas\n')
print(Style.BRIGHT+Fore.GREEN+'IMPORTANTE, recuerda que:')
print('Las estadisticas mostradas a continuación son meramente orientativas.')
print('En esta primera versión del programa, no se pondera por el numero de slots ocupados!!')
print('Es decir, el % de aparicion mostrado no es fiable para ser extrapolado a nivel global.')
print('Además, se necesita una base de partidos más grande para llegar a buenas conclusiones.')
print('Pasa por la ',Style.BRIGHT+Fore.GREEN + 'federación "BigData"',' para ver/comentar/participar en los result | ados del estudio en detalle\n')
# Menu del estudio
while True:
if numpartidos == 0:
print(Fore.RED + Style.BRIGHT + 'Pero antes de nada, deberiamos recuperar algun partido de www. | hattrick.org... (opción 1)')
break
print('Que tipo de especialistas quieres ver?')
print(' 1.- Imprevisibles')
print(' 2.- Rápidos')
print(' 3.- Técnicos')
print(' 4.- Potentes')
print(' 5.- Extremos')
print(' 6.- Eventos de Equipo')
print(' 7.- Salir')
selecion = input('(por defecto 7) >> ')
if selecion == '1':
conn = sqlite3.connect(basedatos)
cur = conn.cursor()
cur.execute('SELECT count(MatchID) as Partidos_e05 from (select distinct MatchID from alineacion_all where Specialty = 4 and Pos < 106 and Pos > 99)')
Partidos_e05 = cur.fetchone()[0]
cur.execute('SELECT sum(maxMin) from (select MatchID, max(Minutos) as maxMin from (select * from alineacion_all where Specialty = 4 and Pos < 106 and Pos > 99) group by MatchID)')
Minutos_e05 = cur.fetchone()[0]
Partidos05_PondMin = Minutos_e05 / 90
cur.execute('SELECT count(EventTypeID) from eventos where EventTypeID = 105')
Gols05 = cur.fetchone()[0]
cur.execute('SELECT count(EventTypeID) from eventos where EventTypeID = 205')
Fallos05 = cur.fetchone()[0]
if Partidos05_PondMin == 0:
App = 0.0
else:
App = ((Gols05+Fallos05) / Partidos05_PondMin) * 100
if Gols05+Fallos05 == 0:
Con = 0.0
else:
Con = ((Gols05) / (Gols05+Fallos05)) * 100
print(Fore.YELLOW + Style.BRIGHT + '\nEv. Individual ID=05: Imprevisible Pase Largo - Porteros y defensas')
print(Minutos_e05, 'minutos en',Partidos_e05, 'partidos, es decir, en', Fore.GREEN + str("%.2f" % Partidos05_PondMin), 'partidos reales:')
print('Un total de',Fore.GREEN + str(Gols05+Fallos05),'eventos. Con', Fore.GREEN + str(Gols05),'goles.')
print('Es decir un',Fore.GREEN + str("%.2f" % App),'% de aparicion y un',Fore.GREEN + str("%.2f" % Con),'% de conversion global.\n')
cur.execute('SELECT count(MatchID) as Partidos_e06 from (select distinct MatchID from alineacion_all where Specialty = 4 and Pos > 105)')
Partidos_e06 = cur.fetchone()[0]
cur.execute('SELECT sum(maxMin) from (select MatchID, max(Minutos) as maxMin from (select * from alineacion_all where Specialty = 4 and Pos > 105) group by MatchID)')
Minutos_e06 = cur.fetchone()[0]
Partidos06_PondMin = Minutos_e06 / 90
cur.execute('SELECT count(EventTypeID) from eventos where EventTypeID = 106')
Gols06 = cur.fetchone()[0]
cur.execute('SELECT count(EventTypeID) from eventos where EventTypeID = 206')
Fallos06 = cur.fetchone()[0]
if Partidos06_PondMin == 0:
App = 0.0
else:
App = ((Gols06+Fallos06) / Partidos06_PondMin) * 100
if Gols06+Fallos06 == 0:
Con = 0.0
else:
Con = ((Gols06) / (Gols06+Fallos06)) * 100
print(Fore.YELLOW + Style.BRIGHT + '\nEv. Individual ID=06: Imprevisible Anotación - Extremos, Inners y Delanteros')
print(Minutos_e06, 'minutos en',Partidos_e06, 'partidos, es decir, en', Fore.GREEN + str("%.2f" % Partidos06_PondMin), 'partidos reales:')
print('Un total de',Fore.GREEN + str(Gols06+Fallos06),'eventos. Con', Fore.GREEN + str(Gols06),'goles.')
print('Es decir un',Fore.GREEN + str("%.2f" % App),'% de aparicion y un',Fore.GREEN + str("%.2f" % Con),'% de conversion global.\n')
cur.execute('SELECT count(MatchID) as Partidos_e08 from (select distinct MatchID from alineacion_all where Specialty = 4 and Pos > 100)')
Partidos_e08 = cur.fetchone()[0]
cur.execute('SELECT sum(maxMin) from (select MatchID, max(Minutos) as maxMin from (select * from alineacion_all where Specialty = 4 and Pos > 100) group by MatchID)')
Minutos_e08 = cur.fetchone()[0]
Partidos08_PondMin = Minutos_e08 / 90
cur.execute('SELECT count(EventTypeID) from eventos where EventTypeID = 108')
Gols08 = cur.fetchone()[0]
cur.execute('SELECT count(EventTypeID) from eventos where EventTypeID = 208')
Fallos08 = cur.f |
object.
Returns None if the tag is not set. Warning: EC2 tags are case-sensitive.
"""
tags = get_tags( ec, obj.id )
found = 0
for t in tags:
if t.name == tag:
found = 1
break
if found:
return t
else:
return None
def update_tag( obj, tag, val ):
"""
Given an EC2 resource object, a tag and a value, updates the given tag
to val.
"""
for x in range(0, 5):
error = False
try:
obj.add_tag( tag, val )
except:
error = True
e = sys.exc_info()[0]
print "Huh, trying again ({})".format(e)
time.sleep(5)
if not error:
print "Object {} successfully tagged.".format(obj)
break
return None
def init_region( r ):
"""
Takes a region string. Connects to that region. Returns EC2Connection
and VPCConnection objects in a tuple.
"""
# connect to region
c = vpc.connect_to_region( r )
ec = ec2.connect_to_region( r )
return ( c, ec )
def init_vpc( c, cidr ):
"""
Takes VPCConnection object (which is actually a connection to a
particular region) and a CIDR block string. Looks for our VPC in that
region. Returns the boto.vpc.vpc.VPC object corresponding to our VPC.
See:
http://boto.readthedocs.org/en/latest/ref/vpc.html#boto.vpc.vpc.VPC
"""
# look for our VPC
all_vpcs = c.get_all_vpcs()
found = 0
our_vpc = None
for v in all_vpcs:
if v.cidr_block == cidr:
our_vpc = v
found = 1
break
if not found:
raise SpinupError( "VPC {} not found".format(cidr) )
return our_vpc
def init_subnet( c, vpc_id, cidr ):
"""
Takes VPCConnection object, which is actually a connection to a
region, and a CIDR block string. Looks for our subnet in that region.
If subnet does not exist, creates it. Returns the subnet resource
object on success, raises exception on failure.
"""
# look for our VPC
all_subnets = c.get_all_subnets()
found = False
our_subnet = None
for s in all_subnets:
if s.cidr_block == cidr:
#print "Found subnet {}".format(cidr)
our_subnet = s
found = True
break
if not found:
our_subnet = c.create_subnet( vpc_id, cidr )
return our_subnet
def set_subnet_map_public_ip( ec, subnet_id ):
"""
Takes ECConnection object and SubnetId string. Attempts to set the
MapPublicIpOnLaunch attribute to True.
FIXME: give credit to source
"""
orig_api_version = ec.APIVersion
ec.APIVersion = '2014-06-15'
ec.get_status(
'ModifySubnetAttribute',
{'SubnetId': subnet_id, 'MapPublicIpOnLaunch.Value': 'true'},
verb='POST'
)
ec.APIVersion = orig_api_version
return None
def derive_ip_address( cidr_block, delegate, final8 ):
"""
Given a CIDR block string, a delegate number, and an integer
representing the final 8 bits of the IP address, construct and return
the IP address derived from this values. For example, if cidr_block is
10.0.0.0/16, the delegate number is 10, and the final8 is 8, the
derived IP address will be 10.0.10.8.
"""
result = ''
match = re.match( r'\d+\.\d+', cidr_block )
if match:
result = '{}.{}.{}'.format( match.group(0), delegate, final8 )
else:
raise SpinupError( "{} passed to derive_ip_address() is not a CIDR block!".format(cidr_block) )
return result
def get_master_instance( ec2_conn, subnet_id ):
"""
Given EC2Connection object and Master Subnet id, check that there is
just one instance running in that subnet - this is the Master. Raise
exception if the number of instances is != 0.
Return the Master instance object.
"""
instances = ec2_conn.get_only_instances( filters={ "subnet-id": subnet_id } )
if 1 > len(instances):
raise SpinupError( "There are no instances in the master subnet" )
if 1 < len(instances):
raise SpinupError( "There are too many instances in the master subnet" ) |
return instances[0]
def template_token_subst( buf, key, val ):
"""
Given a string (buf), a key (e.g. '@@MASTER_IP@@') and val, replace all
occurrences of key in buf with val. Return the new string.
"""
targetre = re.compile( re.escape( key ) )
return re.sub( targ | etre, str(val), buf )
def process_user_data( fn, vars = [] ):
"""
Given filename of user-data file and a list of environment
variable names, replaces @@...@@ tokens with the values of the
environment variables. Returns the user-data string on success
raises exception on failure.
"""
# Get user_data string.
buf = read_user_data( fn )
for e in vars:
if not e in environ:
raise SpinupError( "Missing environment variable {}!".format( e ) )
buf = template_token_subst( buf, '@@'+e+'@@', environ[e] )
return buf
def count_instances_in_subnet( ec, subnet_id ):
"""
Given EC2Connection object and subnet ID, count number of instances
in that subnet and return it.
"""
instance_list = ec.get_only_instances(
filters={ "subnet-id": subnet_id }
)
return len(instance_list)
def make_reservation( ec, ami_id, **kwargs ):
"""
Given EC2Connection object, delegate number, AMI ID, as well as
all the kwargs referred to below, make a reservation for an instance
and return the registration object.
"""
# extract arguments to be passed to ec.run_instances()
our_kwargs = {
"key_name": kwargs['key_name'],
"subnet_id": kwargs['subnet_id'],
"instance_type": kwargs['instance_type'],
"private_ip_address": kwargs['private_ip_address']
}
# Master or minion?
if kwargs['master']:
our_kwargs['user_data'] = kwargs['user_data']
else:
# perform token substitution in user-data string
u = kwargs['user_data']
u = template_token_subst( u, '@@MASTER_IP@@', kwargs['master_ip'] )
u = template_token_subst( u, '@@DELEGATE@@', kwargs['delegate_no'] )
u = template_token_subst( u, '@@ROLE@@', kwargs['role'] )
u = template_token_subst( u, '@@NODE_NO@@', kwargs['node_no'] )
our_kwargs['user_data'] = u
# Make the reservation.
reservation = ec.run_instances( ami_id, **our_kwargs )
# Return the reservation object.
return reservation
def wait_for_running( ec2_conn, instance_id ):
"""
Given an instance id, wait for its state to change to "running".
"""
print "Waiting for {} running state".format( instance_id )
while True:
instances = ec2_conn.get_only_instances( instance_ids=[ instance_id ] )
print "Current state is {}".format( instances[0].state )
if instances[0].state != 'running':
print "Sleeping for 5 seconds"
time.sleep(5)
else:
print "Waiting another 5 seconds for good measure"
time.sleep(5)
break
def wait_for_available( ec2_conn, volume_id ):
"""
Given a volume id, wait for its state to change to "available".
"""
print "Waiting for {} available state".format( volume_id )
while True:
volumes = ec2_conn.get_all_volumes( volume_ids=[ volume_id ] )
print "Current status is {}".format( volumes[0].status )
if volumes[0].status != 'available':
print "Sleeping for 5 seconds"
time.sleep(5)
else:
break
def wait_for_detachment( ec2_conn, v_id, i_id ):
"""
Given a volume ID and an instance ID, wait for volume to
become detached.
"""
print "Waiting for volume {} to be detached from instnace {}".format(v_id, i_id)
while True:
attached_vol = ec2_conn.get_all_volumes(
filters={
"volume-id": v_id,
" |
"""
This is the config file for the Migration
There are 3 things to configure.
- the old Database to migrate from
- the new Database to save the migration
- FTP connection to save the files
"""
# Old Database.
# This is where the Data is taken from
dbOld = {
'host': "", # host ip
'port': 0, # port
'user': "", # username
'password': "", # password
'database': "" # name of the database
}
# New Database.
# This is w | here the Data will be stored
dbNew = {
'host': "", # host ip
'p | ort': 0, # port
'user': "", # username
'password': "", # password
'database': "" # name of the database
}
# FTP connection to save the files
ftpConnection = {
'host': "", # host ip
'user': "", # username
'password': "", # password
'directory': "" # directory where to save the files to
}
# Every post with these tags will not be migrated.
# e.g. ['TFAktuell', 'AMS']
remove_tags = []
|
import urllib
import telnetlib
import logging
import cjson
from models import BinaryResource
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django import forms
from django.contrib.auth import authenticate, login
from sana.mrs.openmrs import sendToOpenMRS
from sana.mrs.util import enable_logging
from sana.mrs.models import Notification
from sana.mrs.util import enable_logging
def chunk( seq, size, pad=None ):
"""Slice a list into consecutive disjoint 'chunks' of
length equal to size. The last chunk is padded if necessary.
Example: ::
>>> list(chunk(range(1,10),3))
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> list(chunk(range(1,9),3))
[[1, 2, 3], [4, 5, 6], [7, 8, None]]
>>> list(chunk(range(1,8),3))
[[1, 2, 3], [4, 5, 6], [7, None, None]]
>>> list(chunk(range(1,10),1))
[[1], [2], [3], [4], [5], [6], [7], [8], [9]]
>>> list(chunk(range(1,10),9))
[[1, 2, 3, 4, 5, 6, 7, 8, 9]]
>>> for X in chunk([],3): print X
>>>
| Parmeters:
seq
The sequence to slice
size
The size of each chunk
pad
The size to pad each chunk to.
"""
n = len(seq)
mod = n % size
for i in xra | nge(0, n-mod, size):
yield seq[i:i+size]
if mod:
padding = [pad] * (size-mod)
yield seq[-mod:] + padding
class FakeProcedureSubmitForm(forms.Form):
"""Encounter form for testing"""
responses = forms.CharField(required=True,
help_text='question,answer,question,answer,..')
procedure_id = forms.IntegerField(required=True, help_text="integers only")
phone_id = forms.CharField(max_length=255)
patient_id = forms.CharField(max_length=255)
#data = forms.FileField(required=True)
def procedure_submit(request):
"""For testing encounter submission"""
upload = request.FILES.get('data', None)
print upload
if request.method == 'POST' and upload is not None:
form = FakeProcedureSubmitForm(request.POST)
else:
form = FakeProcedureSubmitForm()
if form.is_valid():
print "valid"
print form.cleaned_data
phoneId = form.cleaned_data['phone_id']
patientId = form.cleaned_data['patient_id']
procedureId = form.cleaned_data['procedure_id']
responses = form.cleaned_data['responses']
binary = BinaryResource(element_id='test',
content_type='',
procedure=procedureId)
binary.data.save(upload.name, upload)
binary.save()
qas = {}
for q,a in chunk(responses.split(','),2, pad=''):
qas[q] = a
if procedureId == 1:
procedureId = "Diagnose Cervical Cancer"
sendToOpenMRS(patientId, phoneId, procedureId, str(binary.data.path), qas)
return render_to_response("procedure_submit.html",
{'form': form})
def notification_submit(request):
return render_to_response("notification_submit.html")
@enable_logging
def list_notifications(request):
"""For synching notifications with mobile clients.
Request Params
username
A valid username.
password
A valid password.
Parameters:
request
A client request for patient list
"""
logging.info("entering notification list proc")
username = request.REQUEST.get('username',None)
password = request.REQUEST.get('password',None)
user = authenticate(username=username, password=password)
if user is not None:
try:
data = Notification.objects.all()
logging.info("we finished getting the notification list")
response = {'status': 'SUCCESS',
'data': [cjson.decode(d.to_json()) for d in data],
}
except Exception, e:
et, val, tb = sys.exc_info()
trace = traceback.format_tb(tb)
error = "Exception : %s %s %s" % (et, val, trace[0])
for tbm in trace:
logging.error(tbm)
logging.error("Got exception while fetching notification list: %s" % e)
response = {
'status': 'FAILURE',
'data': "Problem while getting notification list: %s" % e,
}
else:
logging.error('User not authenticated')
response = {
'status': 'FAILURE',
'data': 'User not authenticated',
}
return HttpResponse(cjson.encode(response), content_type=("application/json; charset=utf-8"))
def home(request):
"""Top level url
Displays ::
Sanamobile MDS : Online
"""
return HttpResponse('Sanamobile MDS : Online')
|
import tensorflow as tf
class GRU2DCell(tf.contrib.rnn.RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, channels):
self._num_units = num_units
self._channels = channels
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def make_var(self, name, shape, initializer=None, trainable=True):
return tf.get_variable(name, shape, initializer=initializer, trainable=trainable)
# inputs: [batch_size, height, width, channels]
# state: [batch_size, height, width, num_units]
def __call__(self, inputs, state, weights, scope=None):
with tf.variable_scope(scope or type(self).__name__): # "GRUCell"
inputs_shape = tf.shape(inputs)
inputs = tf.reshape(inputs, [inputs_shape[0], inputs_shape[1], inputs_shape[2], self._channels])
| with tf.variable_scope("Gates"): # Reset gate and update gate.
# concat inputs and state
inputs_state = tf.concat(axis=3, values=[inputs, state])
# define the variables
init_k | ernel = tf.constant_initializer(0.0)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [1, 1, self._num_units + self._channels, self._num_units], init_kernel)
biases = self.make_var('biases', [self._num_units], init_biases)
# 2D convolution
conv = tf.nn.conv2d(inputs_state, kernel, [1, 1, 1, 1], padding='SAME')
u = tf.nn.sigmoid(tf.nn.bias_add(conv, biases))
# ru = tf.nn.sigmoid(ru)
# r, u = tf.split(3, 2, ru)
'''
with tf.variable_scope("Candidate"):
inputs_rstate = tf.concat(3, [inputs, tf.mul(r, state)])
# define the variables
init_biases_1 = tf.constant_initializer(0.0)
kernel_1 = self.make_var('weights', [3, 3, self._num_units + self._channels, self._num_units])
biases_1 = self.make_var('biases', [self._num_units], init_biases_1)
# 2D convolution
conv_1 = tf.nn.conv2d(inputs_rstate, kernel_1, [1, 1, 1, 1], padding='SAME')
c = tf.nn.tanh(tf.nn.bias_add(conv_1, biases_1))
'''
new_w = weights + u
new_h = tf.nn.relu(tf.div(weights * state + u * inputs, new_w))
return new_h, new_h, new_w
|
#!/usr/bin/env python
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 24
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.sget_application_id()), # from
self.request.get('email'), # to
| 'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class SetFeaturedSpeakerHandler(webapp2.RequestHandler):
def post(self):
wsck = self.request.get('websafeConferenceKey')
ConferenceApi._cacheFeaturedSpeaker(wsck)
self.response.set | _status(204)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/set_featured_speaker', SetFeaturedSpeakerHandler),
], debug=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import simplejson as json
import tweepy
import bitly
import urllib2
import sqlite3
from local_settings import TwitterKey, BitlyKey
logging.basicConfig(filename='log.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
def run | ():
conn = sqlite3.connect('tweets.db')
# if table not exists, create table
cur = conn.cursor()
query = cur.execute("SELECT count(*) FROM sqlite_master WHERE type='table' AND name='tweet_table'")
if query.fetchone()[0] <= 0:
cur.execute( | "CREATE TABLE tweet_table(Id INTEGER PRIMARY KEY, reddit_id TEXT, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP)")
consumer_key = TwitterKey['consumer_key']
consumer_secret = TwitterKey['consumer_secret']
access_token = TwitterKey['access_token']
access_token_secret = TwitterKey['access_token_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
bot = tweepy.API(auth)
shortapi = bitly.Api(login=BitlyKey['login'], apikey=BitlyKey['apikey'])
url = 'http://www.reddit.com/r/programming/.json'
jsondata = json.loads(urllib2.urlopen(url).read())
if 'data' in jsondata and 'children' in jsondata['data']:
posts = jsondata['data']['children']
posts.reverse()
for ind, post in enumerate(posts):
entry = post['data']
# logging.debug(entry['permalink'] + ' ' +entry['url'])
postid = entry['id']
num_comments = entry['num_comments']
query = cur.execute("SELECT * FROM tweet_table WHERE reddit_id = '%s'" % postid)
if len(query.fetchall()) == 0 and num_comments > 5:
title = entry['title']
score = entry['score']
downs = entry['downs']
ups = entry['ups']
permalink = shortapi.shorten('http://www.reddit.com' + entry['permalink'])
url = shortapi.shorten(entry['url'])
author = entry['author']
status = ' %s [%s by:%s comments:%d score:%d]' % (url, permalink, author, num_comments, score)
status = title[:(135 - len(status))] + status
status = status.encode('utf-8')
logging.debug(status)
bot.update_status(status)
cur.execute("INSERT INTO tweet_table VALUES (?, ?, ?)", [None, postid, None])
conn.commit()
conn.close()
if __name__ == '__main__':
run() |
from django.template import Library, Node, TemplateSyntaxError
from stats.views import get_next_rank_title_and_exp_points
register = Library()
class SetVariable(Node):
def __init__(self, varname, nodelist):
self.varname = varname
self.nodelist = nodelist
def render(self,context):
context[self.varname] = self.nodelist.render(context)
return ''
@register.tag(name = 'setvar')
def setvar(parser, token):
"""
Set value to content of a rendered block.
{% setvar var_name %}
....
{% endsetvar
"""
try:
# split_contents() knows not to split quoted strings.
tag_name, varname = token.split_contents()
except ValueError:
raise TemplateSyntaxError, "%r tag require | s a single argument for variable name" % token.contents.split()[0]
nodelist = parser.parse(('endsetvar',))
parser.delete_first_token()
return SetVariable(varname, nodelist)
@register.simple_tag
def active(request, pattern):
if request.path.startswith | (pattern):
return 'active'
return ''
@register.simple_tag
def next_rank(category, exp_current):
next_rank = get_next_rank_title_and_exp_points(category, exp_current)
if not next_rank:
return 'Next rank is unknown'
(title, exp_needed, exp_total) = next_rank
return '<strong>%d EXP</strong> needed to reach the rank <strong>%s</strong> (<strong>%d EXP</strong>)' % (exp_needed, title, exp_total) |
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
from django.conf.urls import url
from django.contrib.auth.decorators import login_required, permission_required
from django.views.generic import TemplateView, ListView, CreateView, UpdateView, DeleteView
from cambridge.views import *
from cambridge.forms import *
from cambridge.models import *
urlpatterns = [
url(r'^list/$',login_required(RegistrationListView.as_view()), name="cambridge_list"),
url(r'^list/all$',login_required(RegistrationListViewAll.as_view()), name="cambridge_list_all"),
url(r'^list/exam/(?P<exam_id>\d+)/$',login_required(RegistrationListViewExam.as_view()), name="cambridge_list_exam"),
url(r'^excel/$',login_required(RegistrationExcelView.as_view()), name="cambridge_excel"),
url(r'^excel/exam/(?P<exam_id>\d+)/$',login_required(RegistrationExcelView.as_view()), name="cambridge_excel_exam"),
url(r'^pay/(?P<pk>\d+)/$',RegistrationPayment,name="cambridge_pay"),
url(r'^edit/(?P<pk>\d+)/$',
login_required(UpdateView.as_view(
model=Registration,
success_url = '/cambridge/list',
form_class = RegistrationEditForm,
template_name='cambridge/registration_edit.html')), name="cambridge_edit"),
url(r'^delete/(?P<pk>\d+)/$',
login_required(DeleteView.as_view(
model=Registration,
success_url="/cambridge/list/")), name="cambridge_delete"),
url(r'^view/(?P<pk>\d+)/$', ver, name="cambridge_view"),
url(r'^print/(?P<pk>\d+)/$', imprimir_cambridge, name="cambridge_imprimir"),
url(r'^new/(?P<exam_id>\d+)/$',RegistrationExamCreateView.as_view(), name="cambridge_nueva_examen"),
url(r'^new/$',RegistrationCreateView.as_view(), name="cambridge_nueva"),
#Colegios
url(r'schools/exam/list/$', login_required(SchoolExamList.as_view()),name="cambridge_schools_exam_list"),
url(r'schools/exam/(?P<school_name>\w+)/new/$', login_required(SchoolExamCreate.as_view()),name="cambridge_schools_exam_new"),
url(r'schools/list/$', login_required(SchoolListView.as_view()),name="cambridge_schools_list"),
url(r'schools/registrations/list/$', login_required(SchoolRegistrationListView.as_view()),name="cambridge_schools_registration_list"),
url(r'schools/new/(?P<school_name>\w+)/(?P<school_password>\w+)/$', SchoolRegistrationCreateView.as_view(),name="cambridge_schools_new_registration"),
url(r'schools/new/$', SchoolCreateView.as_view(),name="cambridge_schools_new"),
url(r'berriotxoa/$', TemplateView.as_view( template_name = 'cambridge/berriotxoa.html' ),name="cambridge_berriotxoa"),
url(r'schools/fuentefresnedo/$', TemplateView.as_view( template_name = 'cambridge/fuentefresnedo.html' ),name="cambridge_fuentefresnedo"),
#Venues
url(r'venue/exam/list/$', login_required(VenueExamList.as | _view()),name="cambridge_venues_exam_list"),
url(r'venue/exam/new/$', login_required(VenueExamCreate.as_view()),name="cambridge_venues_exam_new | "),
url(r'venue/list/$', login_required(VenueListView.as_view()),name="cambridge_venues_list"),
url(r'venue/registrations/list/$', login_required(VenueRegistrationListView.as_view()),name="cambridge_venues_registration_list"),
url(r'venue/new/(?P<venue_name>\w+)/$', VenueRegistrationCreateView.as_view(),name="cambridge_venues_new_registration"),
#Linguaskill
url(r'linguaskill/new/$', LinguaskillRegistrationCreateView.as_view(),name="cambridge_linguaskill_new_registration"),
url(r'linguaskill/list/$', LinguaskillRegistrationListView.as_view(),name="cambridge_linguaskill_registration_list"),
## Prep Center
url(r'prepcenter/$', PrepCenterHomeView.as_view(),name="cambridge_prepcenter_home"),
url(r'prepcenter/pay/registrations/$', PrepCenterRegistrationsPayView.as_view(),name="cambridge_prepcenters_registrations_pays"),
url(r'prepcenter/new/center/$', PrepCenterCreateView.as_view(),name="cambridge_prepcenters_new"),
url(r'prepcenter/update/center/(?P<pk>\d+)/$', PrepCenterUpdateView.as_view(),name="cambridge_prepcenters_update"),
url(r'prepcenter/list/$', login_required(PrepCenterListView.as_view()),name="cambridge_prepcenters_list"),
#url(r'prepcenter/exam/list/$', login_required(PrepCenterExamList.as_view()),name="cambridge_prepcenters_exam_list"),
# url(r'prepcenter/exam/new/$', PrepCenterExamCreate.as_view(),name="cambridge_prepcenters_exam_new"),
url(r'prepcenter/registrations/new/$', PrepCenterRegistrationCreateView.as_view(),name="cambridge_prepcenters_registration_new"),
url(r'prepcenter/registrations/new/exam/(?P<exam_id>\d+)/(?P<form_num>\d+)/$',PrepCenterRegistrationExamCreateView.as_view(),name="cambridge_prepcenters_registration_exam_new"),
url(r'prepcenter/registrations/new/exam/(?P<exam_id>\d+)/$',PrepCenterRegistrationExamCreateView.as_view(),name="cambridge_prepcenters_registration_exam_new"),
url(r'prepcenter/registrations/delete/(?P<pk>\d+)/$',PrepCenterRegistrationDeleteView.as_view(), name="prepcenter_registration_delete"),
#url(r'prepcenter/registrations/list/$', login_required(PrepCenterRegistrationListView.as_view()),name="cambridge_prepcenters_registration_list"),
url(r'prepcenter/passwordreset/(?P<pk>\d+)/$',PrepCenterPasswordResetView.as_view(), name="prepcenter_passwordreset"),
url(r'prepcenter/createuser/(?P<pk>\d+)/$',PrepCenterCreateUserView.as_view(), name="prepcenter_createuser"),
url(r'prepcenter/disableuser/(?P<pk>\d+)/$',PrepCenterDisableUserView.as_view(), name="prepcenter_disableuser"),
url(r'prepcenter/enableuser/(?P<pk>\d+)/$',PrepCenterEnableUserView.as_view(), name="prepcenter_enableuser"),
url(r'prepcenter/detalle/(?P<pk>\d+)/$',PrepCenterDetailView.as_view(), name="prepcenter_detalle"),
url(r'prepcenter/delete/(?P<pk>\d+)/$',PrepCenterDeleteView.as_view(), name="prepcenter_delete"),
url(r'prepcenter/registrations/pay/(?P<pk>\d+)/$',PrepCenterPayRegistrations.as_view(), name="prepcenter_registrations_admin_pay"),
## Genericas
url(r'thanks/$', TemplateView.as_view( template_name = 'cambridge/gracias.html' ),name="cambridge_gracias"),
url(r'error/$', TemplateView.as_view( template_name = 'cambridge/error.html' ),name="cambridge_error"),
##For the exams
url(r'^exam/list/$',login_required(
ListView.as_view(model=Exam,template_name='cambridge/exam_list.html')
), name="cambridge_exam_list"),
url(r'^exam/delete/(?P<pk>\d+)/$',
login_required(DeleteView.as_view(
model=Exam,
success_url="/cambridge/exam/list/")), name="cambridge_exam_delete"),
url(r'^exam/new/$', login_required(
CreateView.as_view(
model=Exam,
form_class = ExamForm,
success_url = '/cambridge/exam/list',
template_name='cambridge/exam_form.html')), name="cambridge_exam_new"),
url(r'^exam/edit/(?P<pk>\d+)/$',
login_required(UpdateView.as_view(
model=Exam,
fields = '__all__',
success_url = '/cambridge/exam/list',
template_name='cambridge/exam_edit.html')), name="cambridge_exam_edit"),
url(r'^$', IndexExamList.as_view(),name="cambridge"),
]
|
# perf script event handlers, generated by perf script -g python
# Licensed under the terms of the GNU GPL License version 2
# The common_* event handler fields are the most useful fields common to
# all events. They don't necessarily correspond to the 'common_*' fields
# in the format files. Those fields not available as handler params can
# be retrieved using Python functions of the form common_*(context).
# See the perf-trace-python Documentation for the list of available functions.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
| '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
syscalls = autodict()
def trace_begin():
pass
def trace_end():
pass
def raw_syscalls__sys_exit(event_name, context, cpu,
s, ns, pid, comm, callchain, syscall | _id, args):
if pid not in syscalls or syscall_id not in syscalls[pid]:
return
latency = nsecs(s, ns) - syscalls[pid][syscall_id]
print "[%04d] %04d => %9uns" % (pid, syscall_id, latency)
def raw_syscalls__sys_enter(event_name, context, cpu,
s, ns, pid, comm, callchain, syscall_id, ret):
syscalls[pid][syscall_id] = nsecs(s, ns)
def trace_unhandled(event_name, context, event_fields_dict):
pass
|
from django import forms
from django.forms import ModelForm
from django.db import models
from app.models import NFL_Division
from app.models import NFL_Conference_Choices
class NFL_DivisionForm_Create(ModelForm):
name = forms.CharField(max_length = 100,
widget = forms.TextInput({
'class':'form-control',
'placeholder': 'Enter Division Name'}))
conference_id = forms.ChoiceField(choices = NFL_Conference_Choices.make_conference_choices,
widget = forms.Select({'class':'form-control'}))
filter = forms.IntegerField(widget = forms.HiddenInput())
class Meta:
model = NFL_Division
fields = ['name', 'conference_id']
class NFL_DivisionForm_Edit(ModelForm):
id = forms.IntegerField(widget = forms.HiddenInput())
name = forms.CharField(max_length = 100,
widget = forms.TextInput({
'class':'form-control',
'placeholder': 'Enter Division Name'}))
conference_id = forms.ChoiceField(choices = NFL_Conference_Choices.make_conference_choices,
widget = forms.Select({'class':'form-control'}))
filter = forms. | IntegerField(widget = forms.HiddenInput())
class Meta:
model = NFL_Division
fields = ['id', 'name', 'conferen | ce_id']
|
from pyh import *
list=[[1,'Lucy',25],[2,'Tom',30],[3,'Lily',20]]
page = PyH('Test')
page<<div(style="text-align:center")<<h4('Test table')
mytab = page << table(border="1",cellpadding="3",cellspacing="0",style="margin:auto")
tr1 = mytab << tr(bgcolor="lightgrey")
tr1 << th('id') + th('name')+th('age')
for i in range(len(list)):
| tr2 = mytab << tr()
for j in range(3):
tr2 << td(list[i][j])
if list[i][j]=='Tom':
tr2.attributes['bgcolor']='yellow'
if list[i][j]=='Lily':
tr2[1].attributes['style']='color:red'
page.printOut('/Users/ | miraclewong/github/PythonBasic/PyH/demo.html') |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from heatclient import client as heat_client
from oslo_config import cfg
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.utils.openstack import base
opts = [
cfg.BoolOpt('api_insecure',
default=False,
help='Allow to perform insecure SSL requests to heat.'),
cfg.StrOpt('ca_file',
help='Location of ca certificates file to use for heat '
'clie | nt requests.')
]
heat_group = cfg.OptGroup(name='heat',
title='Heat client options')
CONF = cfg.CONF
CONF.register_group(heat_group)
CONF.register_opts(opts, group=heat_group)
def client():
ctx = context.current()
heat_url = base.url_for(ctx.ser | vice_catalog, 'orchestration')
return heat_client.Client('1', heat_url, token=ctx.auth_token,
cert_file=CONF.heat.ca_file,
insecure=CONF.heat.api_insecure)
def get_stack(stack_name):
heat = client()
for stack in heat.stacks.list():
if stack.stack_name == stack_name:
return stack
raise ex.NotFoundException(_('Failed to find stack %(stack)s')
% {'stack': stack_name})
def wait_stack_completion(stack):
# NOTE: expected empty status because status of stack
# maybe is not set in heat database
while stack.status in ['IN_PROGRESS', '']:
context.sleep(1)
stack.get()
if stack.status != 'COMPLETE':
raise ex.HeatStackException(stack.stack_status)
|
# encoding: utf-8
# module pango
# from /usr/lib/python2.7/dist-packages/gtk-2.0/pango.so
# by generator 1.135
# no doc
# imports
import gobject as __gobject
import gobject._gobject as __gobject__gobject
class WrapMode(__gobject.GEnum):
# n | o doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
2: 2,
}
__gtype__ = None # (!) real value is ''
| |
# -*- coding: utf-8 -*-
# Copyright(C) 2015 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.backend import Module
from weboob.capabilities.housing import CapHousing, Housing, HousingPhoto
from .browser import EntreparticuliersBrowser
__all__ = ['EntreparticuliersModule']
class EntreparticuliersModule(Module, CapHousing):
NAME = 'entreparticuliers'
DESCRIPTION = u'entreparticuliers.com website'
MAINTAINER = u'Bezleputh'
EMAIL = 'carton_ben@yahoo.fr'
LICENSE = 'AGPLv3+'
VERSION = '1.1'
BROWSER = EntreparticuliersBrowser
def search_city(self, pattern):
return self.browser.search_city(pattern)
def search_housings(self, query):
cities = [c.id for c in query.cities if c.backend == self.name]
if len(cities) == 0:
return list([])
return self.browser.search_housings(query.type, cities, query.nb_rooms,
query.area_min, query.area_max,
query.cost_min, query.cost_max,
query.house_types)
def get_housing(self, | _id):
return self.browser.get_housing(_id)
def fill_housing(self, housing, fields):
return self.browser.get_housing(housing.id, housing)
def fill_photo(self, photo, fields):
if 'data' in fields and photo.url and not photo.data:
photo.data = self.browser.open(photo.url).content
return photo
OBJECTS = {Housing: fill_housin | g, HousingPhoto: fill_photo}
|
#!/usr/bin/python |
import magic
import sys
m = magic.open(magic.MIME_TYPE)
m.load()
for f in sys.argv[1:]:
try :
print(f, m.file(f))
| except :
print("Except with %s" % f)
|
n
)
from weblate.trans.machine.weblatetm import (
WeblateSimilarTranslation, WeblateTranslation
)
GLOSBE_JSON = u'''
{
"result":"ok",
"authors":{
"1":{"U":"http://en.wiktionary.org","id":1,"N":"en.wiktionary.org"}
},
"dest":"ces",
"phrase":"world",
"tuc":[
{
"authors":[1],
"meaningId":-311020347498476098,
"meanings":[
{
"text":"geographic terms (above country level)",
"language":"eng"
}
],
"phrase":{"text":"svět","language":"ces"}}],
"from":"eng"
}
'''.encode('utf-8')
MYMEMORY_JSON = u'''
{"responseData":{"translatedText":"svět"},"responseDetails":"",
"responseStatus":200,
"matches":[
{"id":"428492143","segment":"world","translation":"svět","quality":"",
"reference":"http://aims.fao.org/standards/agrovoc",
"usage-count":15,"subject":"Agriculture_and_Farming",
"created-by":"MyMemoryLoader",
"last-updated-by":"MyMemoryLoader","create-date":"2013-06-12 17:02:07",
"last-update-date":"2013-06-12 17:02:07","match":1},
{"id":"424273685","segment":"World view","translation":"Světový názor",
"quality":"80",
"reference":"//cs.wikipedia.org/wiki/Sv%C4%9Btov%C3%BD_n%C3%A1zor",
"usage-count":1,"subject":"All","created-by":"","last-updated-by":"Wikipedia",
"create-date":"2012-02-22 13:23:31","last-update-date":"2012-02-22 13:23:31",
"match":0.85},
{"id":"428493395","segment":"World Bank","translation":"IBRD","quality":"",
"reference":"http://aims.fao.org/standards/agrovoc",
"usage-count":1,"subject":"Agriculture_and_Farming",
"created-by":"MyMemoryLoader","last-updated-by":"MyMemoryLoader",
"create-date":"2013-06-12 17:02:07",
"last-update-date":"2013-06-12 17:02:07","match":0.84}
]}
'''.encode('utf-8')
AMAGAMA_JSON = u'''
[{"source": "World", "quality": 80.0, "target": "Svět", "rank": 100.0}]
'''.encode('utf-8')
GOOGLE_JSON = u'''
[
[["svět","world","",""]],
[[
"noun",["svět","země","společnost","lidstvo"],
[
["svět",["world","earth"],null,0.465043187],
["země",["country","land","ground","nation","soil","world"]
,null,0.000656803953],
["lidstvo",["humanity","mankind","humankind","people","world"]
,null,0.000148860636]
],
"world",1
]],
"en",null,
[["svět",[4],1,0,1000,0,1,0]],
[[
"world",4,[["svět",1000,1,0],
["World",0,1,0],
["Světová",0,1,0],
["světě",0,1,0],
["světa",0,1,0]],
[[0,5]],"world"]],
null,null,[],2
]
'''.encode('utf-8')
OPENTRAN_JSON = u'''
[{
"count":4,
"projects":[{
"count":4,"flags":0,"name":"KDE","orig_phrase":" World",
"path":"K/step_qt"
}],
"text":"Svět","value":1
}]
'''.encode('utf-8')
class MachineTranslationTest(TestCase):
'''
Testing of machine translation core.
'''
def test_support(self):
machine_translation = DummyTranslation()
self.assertTrue(machine_translation.is_supported('cs'))
self.assertFalse(machine_translation.is_supported('de'))
def test_translate(self):
machine_translation = DummyTranslation()
self.assertEqual(
machine_translation.translate('cs', 'Hello', None, None),
[]
)
self.assertEqual(
len(
machine_trans | lation.translate(
'cs', 'Hello, world!', None, None
)
),
2
)
def assertTranslate(self, machine, lang='cs', word='world', empty=False):
translation = machine.translate(lang, word, None | , None)
self.assertIsInstance(translation, list)
if not empty:
self.assertTrue(len(translation) > 0)
@httpretty.activate
def test_glosbe(self):
httpretty.register_uri(
httpretty.GET,
'http://glosbe.com/gapi/translate',
body=GLOSBE_JSON
)
machine = GlosbeTranslation()
self.assertTranslate(machine)
@httpretty.activate
def test_mymemory(self):
httpretty.register_uri(
httpretty.GET,
'http://mymemory.translated.net/api/get',
body=MYMEMORY_JSON
)
machine = MyMemoryTranslation()
self.assertTranslate(machine)
@httpretty.activate
def test_opentran(self):
httpretty.register_uri(
httpretty.GET,
'http://open-tran.eu/json/supported',
body='["en","cs"]'
)
httpretty.register_uri(
httpretty.GET,
'http://en.cs.open-tran.eu/json/suggest/world',
body=OPENTRAN_JSON
)
machine = OpenTranTranslation()
self.assertTranslate(machine)
@httpretty.activate
def test_opentran_wrong_lang(self):
httpretty.register_uri(
httpretty.GET,
'http://open-tran.eu/json/supported',
body='["en","cs"'
)
machine = OpenTranTranslation()
# Prevent cache issues
machine.mtid += 'wrong_lang'
self.assertTranslate(machine, empty=True)
@httpretty.activate
def test_opentran_wrong(self):
httpretty.register_uri(
httpretty.GET,
'http://open-tran.eu/json/supported',
body='["en","cs"]'
)
httpretty.register_uri(
httpretty.GET,
'http://en.cs.open-tran.eu/json/suggest/world',
body='['
)
machine = OpenTranTranslation()
# Prevent cache issues
machine.mtid += 'wrong'
self.assertRaises(
MachineTranslationError,
self.assertTranslate,
machine
)
@httpretty.activate
def test_apertium(self):
httpretty.register_uri(
httpretty.GET,
'http://api.apertium.org/json/listPairs',
body='{"responseStatus": 200, "responseData":'
'[{"sourceLanguage": "en","targetLanguage": "es"}]}'
)
httpretty.register_uri(
httpretty.GET,
'http://api.apertium.org/json/translate',
body='{"responseData":{"translatedText":"Mundial"},'
'"responseDetails":null,"responseStatus":200}'
)
machine = ApertiumTranslation()
self.assertTranslate(machine, 'es')
@httpretty.activate
def test_microsoft(self):
httpretty.register_uri(
httpretty.POST,
'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13',
body='{"access_token":"TOKEN"}'
)
httpretty.register_uri(
httpretty.GET,
'http://api.microsofttranslator.com/V2/Ajax.svc/'
'GetLanguagesForTranslate',
body='["en","cs"]'
)
httpretty.register_uri(
httpretty.GET,
'http://api.microsofttranslator.com/V2/Ajax.svc/Translate',
body=u'"svět"'.encode('utf-8')
)
machine = MicrosoftTranslation()
self.assertTranslate(machine)
@httpretty.activate
def test_googleweb(self):
httpretty.register_uri(
httpretty.GET,
'http://translate.google.com/translate_a/t',
body=GOOGLE_JSON
)
machine = GoogleWebTranslation()
self.assertTranslate(machine)
@httpretty.activate
def test_google(self):
httpretty.register_uri(
httpretty.GET,
'https://www.googleapis.com/language/translate/v2/languages',
body='{"data": {"languages": [ { "language": "cs" }]}}'
)
httpretty.register_uri(
httpretty.GET,
'https://www.googleapis.com/language/translate/v2/',
body='{"data":{"translations":[{"translatedText":"svet"}]}}'
)
machine = GoogleTranslation()
self.assertTranslate(machine)
@httpretty.activate
def test_amagama(self):
httpretty.register_uri(
httpretty.GET,
'http://amagama.locamotion.org/tmserver/en/cs/unit/world',
body=AMAGAMA_JSON
)
machine = AmagamaTran |
# coding: utf-8
from __future__ import division
class CharityItem(object):
def __init__(self, name, short_desc, long_desc, image_name, detail_imag | e_name, rating, major, minor, objective_money, actual_money):
self.name = name
self.short_desc = short_desc
self.long_desc = long_desc
self.image_name = image_name
self.detail_image_name = detail_image_name
self.rating = rating
self.minor = minor
self.major = major
self.objective_money = objective_money
self.actual_money = actual_money
def to_dict(self):
| return {
"name": self.name,
"short_desc": self.short_desc,
"long_desc": self.long_desc,
"image_name": self.image_name,
"detail_image_name": self.detail_image_name,
"minor": self.minor,
"major": self.major,
"rating": self.rating,
"objective_money": self.objective_money,
"actual_money": self.actual_money,
}
@property
def accomplishment_rate(self):
return self.actual_money / self.objective_money
@classmethod
def from_dict(cls, json_data):
name = json_data["name"]
short_desc = int(json_data["short_desc"])
long_desc = json_data["long_desc"]
image_name = json_data["image_name"]
detail_image_name = json_data["detail_image_name"]
minor = int(json_data["minor"])
major = int(json_data["major"])
rating = int(json_data["rating"])
objective_money = int(json_data["objective_money"])
actual_money = int(json_data["actual_money"])
return cls(name, short_desc, long_desc, image_name, detail_image_name, rating, major, minor, objective_money, actual_money)
|
from zerver.lib.test_classes import WebhookTestCase
class OpsGenieHookTests(WebhookTestCase):
STREAM_NAME = "opsgenie"
URL_TEMPLATE = "/api/v1/external/opsgenie?&api_key={api_key}&stream={stream}"
WEBHOOK_DIR_NAME = "opsgenie"
def test_acknowledge_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: Acknowledge
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"acknowledge",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_addnote_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: AddNote
* **Note**: note to test alert
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"addnote",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_addrecipient_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: AddRecipient
* **Recipient**: team2_escalation
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"addrecipient",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_addtags_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: AddTags
* **Tags added**: tag1,tag2,tag3
* **Message**: test alert
* **Tags**: `tag1`, `tag2`, `tag3`
""".strip()
self.check_webhook(
"addtags",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_addteam_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: AddTeam
* **Team added**: team2
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"addteam",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_assignownership_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: AssignOwnership
* **Assigned owner**: user2@ifountain.com
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"assignownership",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_close_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: Close
* **Message**: test alert
""".strip()
self.check_webhook(
"close",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_create_alert(self) -> None:
expected_topic = "Webhook"
expected_message = """
[OpsGenie alert for Webhook](https://app.opsgenie.com/alert/V2#/show/ec03dad6-62c8-4c94-b38b-d88f398e900f):
* **Type**: Create
* **Message**: another alert
* **Tags**: `vip`
""".strip()
self.check_webhook(
"create",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_customaction_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: TestAction
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"customaction",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_delete_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: Delete
* **Message**: test alert
""".strip()
self.check_webhook(
"delete",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_escalate_alert(self) -> None:
expected_topic = "Webhook_Test"
expected_message = """
[OpsGenie alert for Webhook_Test](https://app.opsgenie.com/alert/V2#/show/7ba97e3a-d328-4b5e-8f9a-39e945a3869a):
* **Type**: Escalate
* **Escalation**: test_esc
""".strip()
self.check_webhook(
"escalate",
ex | pected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_remov | etags_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: RemoveTags
* **Tags removed**: tag3
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"removetags",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_takeownership_alert(self) -> None:
expected_topic = "Webhook"
expected_message = """
[OpsGenie alert for Webhook](https://app.opsgenie.com/alert/V2#/show/8a745a79-3ed3-4044-8427-98e067c0623c):
* **Type**: TakeOwnership
* **Message**: message test
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"takeownership",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_unacknowledge_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: UnAcknowledge
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"unacknowledge",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
|
# TODO separate into dif | fere | nt package modules
|
# Natural Language Toolkit: Clusterers
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
This module contains a number of basic clustering algorithms. Clustering
describes the task of discovering groups of similar items with a la | rge
collection. It is also describe | as unsupervised machine learning, as the data
from which it learns is unannotated with class information, as is the case for
supervised learning. Annotated data is difficult and expensive to obtain in
the quantities required for the majority of supervised learning algorithms.
This problem, the knowledge acquisition bottleneck, is common to most natural
language processing tasks, thus fueling the need for quality unsupervised
approaches.
This module contains a k-means clusterer, E-M clusterer and a group average
agglomerative clusterer (GAAC). All these clusterers involve finding good
cluster groupings for a set of vectors in multi-dimensional space.
The K-means clusterer starts with k arbitrary chosen means then allocates each
vector to the cluster with the closest mean. It then recalculates the means of
each cluster as the centroid of the vectors in the cluster. This process
repeats until the cluster memberships stabilise. This is a hill-climbing
algorithm which may converge to a local maximum. Hence the clustering is
often repeated with random initial means and the most commonly occurring
output means are chosen.
The GAAC clusterer starts with each of the M{N} vectors as singleton clusters.
It then iteratively merges pairs of clusters which have the closest centroids.
This continues until there is only one cluster. The order of merges gives rise
to a dendrogram - a tree with the earlier merges lower than later merges. The
membership of a given number of clusters M{c}, M{1 <= c <= N}, can be found by
cutting the dendrogram at depth M{c}.
The Gaussian EM clusterer models the vectors as being produced by a mixture
of k Gaussian sources. The parameters of these sources (prior probability,
mean and covariance matrix) are then found to maximise the likelihood of the
given data. This is done with the expectation maximisation algorithm. It
starts with k arbitrarily chosen means, priors and covariance matrices. It
then calculates the membership probabilities for each vector in each of the
clusters - this is the 'E' step. The cluster parameters are then updated in
the 'M' step using the maximum likelihood estimate from the cluster membership
probabilities. This process continues until the likelihood of the data does
not significantly increase.
They all extend the ClusterI interface which defines common operations
available with each clusterer. These operations include.
- cluster: clusters a sequence of vectors
- classify: assign a vector to a cluster
- classification_probdist: give the probability distribution over cluster memberships
The current existing classifiers also extend cluster.VectorSpace, an
abstract class which allows for singular value decomposition (SVD) and vector
normalisation. SVD is used to reduce the dimensionality of the vector space in
such a manner as to preserve as much of the variation as possible, by
reparameterising the axes in order of variability and discarding all bar the
first d dimensions. Normalisation ensures that vectors fall in the unit
hypersphere.
Usage example (see also demo())::
from nltk import cluster
from nltk.cluster import euclidean_distance
from numpy import array
vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]]
# initialise the clusterer (will also assign the vectors to clusters)
clusterer = cluster.KMeansClusterer(2, euclidean_distance)
clusterer.cluster(vectors, True)
# classify a new vector
print clusterer.classify(array([3, 3]))
Note that the vectors must use numpy array-like
objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for
efficiency when required.
"""
from util import *
from kmeans import *
from gaac import *
from em import *
__all__ = ['KMeansClusterer', 'GAAClusterer', 'EMClusterer',
'VectorSpaceClusterer', 'Dendrogram']
|
use this script to backplot nc files to *.scr file for autocad,bricscad,
draftsight,progecad,ares commander, etc....
usage: python cad_iso_read.py temp.nc temp.scr
"""
import cad_nc_read as nc
import re
import sys
################################################################################
class Parser(nc.Parser):
def __init__(self, writer):
nc.Parser.__init__(self, writer)
self.pattern_main = re.compile('([(!;].*|\s+|[a-zA-Z0-9_:](?:[+-])?\d*(?:\.\d*)?|\w\#\d+|\(.*?\)|\#\d+\=(?:[+-])?\d*(?:\.\d*)?)')
#if ( or ! or ; at least one space or a letter followed by some character or not followed by a +/- followed by decimal, with a possible decimal point
# followed by a possible deimcal, or a letter followed by # with a decimal . deimcal
# add your character here > [(!;] for comments char
# then look for the 'comment' function towards the end of the file and add another elif
def ParseWord(self, word):
if (word[0] == 'A' or word[0] == 'a'):
self.col = "axis"
self.a = eval(word[1:])
self.move = True
elif (word[0] == 'B' or word[0] == 'b'):
self.col = "axis"
self.b = eval(word[1:])
self.move = True
elif (word[0] == 'C' or word[0] == 'c'):
self.col = "axis"
self.c = eval(word[1:])
self.move = True
elif (word[0] == 'F' or word[0] == 'f'):
self.col = "axis"
self.f = eval(word[1:])
self.move = True
elif (word == 'G0' or word == 'G00' or word == 'g0' or word == 'g00'):
self.path_col = "rapid"
self.col = "rapid"
self.arc = 0
elif (word == 'G1' or word == 'G01' or word == 'g1' or word == 'g01'):
self.path_col = "feed"
self.col = "feed"
self.arc = 0
elif (word == 'G2' or word == 'G02' or word == 'g2' or word == 'g02' or word == 'G12' or word == 'g12'):
self.path_col = "feed"
self.col = "feed"
self.arc = -1
elif (word == 'G3' or word == 'G03' or word == 'g3' or word == 'g03' or word == 'G13' or word == 'g13'):
self.path_col = "feed"
self.col = "feed"
self.arc = +1
elif (word == 'G10' or word == 'g10'):
self.no_move = True
elif (word == 'L1' or word == 'l1'):
self.no_move = True
elif (word == 'G61.1' or word == 'g61.1' or word == 'G61' or word == 'g61' or word == 'G64' or word == 'g64'):
self.no_move = True
elif (word == 'G20' or word == 'G70'):
self.col = "prep"
self.set_mode(units=25.4)
elif (word == 'G21' or word == 'G71'):
self.col = "prep"
self.set_mode(units=1.0)
elif (word == 'G81' or word == 'g81'):
self.drill = True
self.no_move = True
self.path_col = "feed"
self.col = "feed"
elif (word == 'G82' or word == 'g82'):
self.drill = True;
self.no_move = True
self.path_col = "feed"
self.col = "feed"
elif (word == 'G83' or word == 'g83'):
self.drill = True
self.no_move = True
self.path_col = "feed"
self.col = "feed"
elif (word == 'G90' or word == 'g90'):
self.ab | solute()
elif (word == 'G91' or word == 'g91'):
| self.incremental()
elif (word[0] == 'G') : col = "prep"
elif (word[0] == 'I' or word[0] == 'i'):
self.col = "axis"
self.i = eval(word[1:])
self.move = True
elif (word[0] == 'J' or word[0] == 'j'):
self.col = "axis"
self.j = eval(word[1:])
self.move = True
elif (word[0] == 'K' or word[0] == 'k'):
self.col = "axis"
self.k = eval(word[1:])
self.move = True
elif (word[0] == 'M') : self.col = "misc"
elif (word[0] == 'N') : self.col = "blocknum"
elif (word[0] == 'O') : self.col = "program"
elif (word[0] == 'P' or word[0] == 'p'):
if (self.no_move != True):
self.col = "axis"
self.p = eval(word[1:])
self.move = True
elif (word[0] == 'Q' or word[0] == 'q'):
if (self.no_move != True):
self.col = "axis"
self.q = eval(word[1:])
self.move = True
elif (word[0] == 'R' or word[0] == 'r'):
self.col = "axis"
self.r = eval(word[1:])
self.move = True
elif (word[0] == 'S' or word[0] == 's'):
self.col = "axis"
self.s = eval(word[1:])
self.move = True
elif (word[0] == 'T') :
self.col = "tool"
self.set_tool( eval(word[1:]) )
elif (word[0] == 'X' or word[0] == 'x'):
self.col = "axis"
self.x = eval(word[1:])
self.move = True
elif (word[0] == 'Y' or word[0] == 'y'):
self.col = "axis"
self.y = eval(word[1:])
self.move = True
elif (word[0] == 'Z' or word[0] == 'z'):
self.col = "axis"
self.z = eval(word[1:])
self.move = True
elif (word[0] == '(') : (self.col, self.cdata) = ("comment", True)
elif (word[0] == '!') : (self.col, self.cdata) = ("comment", True)
elif (word[0] == ';') : (self.col, self.cdata) = ("comment", True)
elif (word[0] == '#') : self.col = "variable"
elif (word[0] == ':') : self.col = "blocknum"
elif (ord(word[0]) <= 32) : self.cdata = True
def Parse(self, name, oname=None):
self.files_open(name,oname)
#self.begin_ncblock()
#self.begin_path(None)
#self.add_line(z=500)
#self.end_path()
#self.end_ncblock()
self.path_col = None
self.f = None
self.arc = 0
while (self.readline()):
self.a = None
self.b = None
self.c = None
self.i = None
self.j = None
self.k = None
self.p = None
self.q = None
self.r = None
self.s = None
self.x = None
self.y = None
self.z = None
#self.begin_ncblock()
self.move = False
self.drill = False
self.no_move = False
words = self.pattern_main.findall(self.line)
for word in words:
self.col = None
self.cdata = False
self.ParseWord(word)
self.add_text(word, self.col, self.cdata)
if (self.drill):
self.begin_path("rapid")
self.add_line(self.x, self.y, self.r)
self.end_path()
self.begin_path("feed")
self.add_line(self.x, self.y, self.z)
self.end_path()
self.begin_path("feed")
self.add_line(self.x, self.y, self.r)
self.end_path()
else:
if (self.move and not self.no_move):
self.begin_path(self.path_col)
if (self.arc==-1):
self.add_arc(self.x, self.y, self.z, self.i, self.j, self.k, self.r, self.arc)
elif (self.arc==1):
#self.add_arc(x, y, z, i, j, k, -r, arc) #if you want to use arcs with R values uncomment the first part of this line and comment the next one
self.add_arc(self.x, self.y, self.z, self.i, self.j, self.k, self.r, self.arc)
else : self.add_line(self.x, self.y, self.z, self.a, self.b, self.c)
self.end_path()
self.end_ncblock()
self.files_close()
################################################################################
if __name__ == '__main__':
parser = ParserIso()
|
'''
Create | d on 2014-1-21
@author: Administrator
'''
#import class/method
from athelets import get_data_filelist, get_data_in_file
james = get_data_in_file('james2.txt')
print(james.name);
print(get_data_filelist(['james2.tx | t']));
|
"""Findall regex operations in python.
findall(strin | g[, pos[, endpos]])
Returns a list:
not like search and match which returns objects
Otherwise, it returns an empty list.
"""
import re
# look for every word in a string
pattern = re.compile(r"\w+")
result = pattern.findall("hey bro")
print result
patt = re.compile(r"a*b")
# returns ['ab', 'ab', 'ab | ', 'b']
res = patt.findall("abababb")
print res
# match a group of words onto a tuple
p = re.compile(r"(\w+) (\w+)")
rv = p.findall("Hello world, i lived")
print rv
# Using unicode characters
print re.findall(ur"\w+", u"这是一个例子", re.UNICODE)
# using named groups inside pattern itself
patt = re.compile(r"(?P<word>\w+) (?P=word)")
|
from django.shortcuts import redirect, get_object_or_404, render
from django.views.generic import TemplateView, View
from django.http import HttpResponse
from django.views.decorators.clickjacking import xframe_options_exempt
import json
from .models import Page, CurrentGame, VisiblePage
from .game import Manager
class ClosedView(TemplateView):
template_name = 'intranet/closed.html'
def dispatch(self, *args, **kwargs):
if Manager().is_started():
return redirect('home')
return super(ClosedView, self).dispatch(*args, **kwargs)
class IntranetBaseView(View):
""" View accesible only if a game is running """
def dispatch(self, *args, **kwargs):
if not Manager().is_started():
return redirect('closed')
return super(IntranetBaseView, self).dispatch(*args, **kwargs)
class HomeView(IntranetBaseView, TemplateView):
template_name = 'intranet/home.html'
class DeniedView(IntranetBaseView, TemplateView):
template_name = 'intranet/denied.html'
class PageView(IntranetBaseView):
""" Base view for intranet page (those used in iframe) """
def fetch_url_name(self, **kwargs):
self.url_name = kwargs['url_name']
return self.url_name
def fetch_page(self):
self.page = get_object_or_404(Page, url_name=self.url_name)
re | turn self.page
def page_is_visible(self):
try:
VisiblePage.objects.get(page=self.page)
return True
except VisiblePage.DoesNotExist:
return False
@xframe_options_exempt
def dispatch(self, request, *args, **kwargs):
return super(PageView, self) | .dispatch(request,*args,**kwargs)
def get(self, request, *args, **kwargs):
self.fetch_url_name(**kwargs)
self.fetch_page()
if self.page_is_visible():
return render(request, self.page.template_file)
else:
return redirect('denied')
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import errno
from typing import List
from typ | ing import Optional
from typing import Text
def relative_normpath(f, path):
# type: (Optional[Text], Text) -> Optional[Text]
"""Return the path of file relative to `path`."""
if f is not | None:
return os.path.normpath(os.path.relpath(f, path))
else:
return None
def create_dir(dir_path):
# type: (Text) -> None
"""Creates a directory and its super paths. Succeeds even if the path already exists."""
try:
os.makedirs(dir_path)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
def create_dir_for_file(file_path):
# type: (Text) -> None
"""Creates any missing parent directories of this files path."""
try:
os.makedirs(os.path.dirname(file_path))
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
def recursively_find_files(resource_name):
# type: (Optional[Text]) -> List[Text]
"""Traverse directory hierarchy to find files.
`resource_name` can be a folder or a file. In both cases we will return a list of files."""
if not resource_name:
raise ValueError("Resource name '{}' must be an existing directory or file.".format(resource_name))
elif os.path.isfile(resource_name):
return [resource_name]
elif os.path.isdir(resource_name):
resources = [] # type: List[Text]
# walk the fs tree and return a list of files
nodes_to_visit = [resource_name]
while len(nodes_to_visit) > 0:
# skip hidden files
nodes_to_visit = [f for f in nodes_to_visit if not f.split("/")[-1].startswith('.')]
current_node = nodes_to_visit[0]
# if current node is a folder, schedule its children for a visit. Else add them to the resources.
if os.path.isdir(current_node):
nodes_to_visit += [os.path.join(current_node, f) for f in os.listdir(current_node)]
else:
resources += [current_node]
nodes_to_visit = nodes_to_visit[1:]
return resources
else:
raise ValueError("Could not locate the resource '{}'.".format(os.path.abspath(resource_name)))
def lazyproperty(fn):
"""Allows to avoid recomputing a property over and over. Instead the result gets stored in a local var.
Computation of the property will happen once, on the first call of the property. All succeeding calls will use
the value stored in the private property."""
attr_name = '_lazy_' + fn.__name__
@property
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazyprop
def list_to_str(l, delim=", ", quote="'"):
return delim.join([quote + e + quote for e in l])
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
|
import re
import string
import sys
sys.path.append('/Users/exu/PlayGround/readinglists/')
from key.keys import *
from amazon.api import AmazonAPI
from html2text import html2text
pattern = re.compile("https?://.*amazon.com/gp/product/([0-9]+)/.*")
amazon = AmazonAPI(AMAZON_ACCESS_KEY_ID, AMAZON_SECRET_ACCESS_KEY, AMAZON_ASSOC_TAG, MaxQPS=0.9)
def uprint(s):
print s.encode('utf-8')
def get_asin(url):
global pattern
m = pattern.match(url)
if m and len(m.groups()) > 0:
return m.groups()[0]
def read_file():
if (len(sys.argv) < 1):
print "Please provide a file that includes a list of Amazon links."
sys.exit(-1)
fname = sys.argv[1]
f = open(fname, 'r')
products = []
for l in f.readlines():
product = amazon.lookup(ItemId=get_asin(l))
products.append([product.title, product.editorial_review, product.large_image_url, product.offer_url])
print "Got product", product.title
return products
rtitle = re.compile('(.*)(\(.*\))')
def normalize_title(title):
""" Book titles are long. We crop out the last part that is in (part)"""
splits = re.findall(rtitle, title)
if splits:
new_title = splits[0][0]
else:
new_title = title
return new_title
def sanitize_text(t):
s = html2text(t)
s = string.replace(s, "'", "’")
s = string.replace(s, "**", "*")
return s
if __name__ == '__main__':
import os.path
import cPickle
pickle_file = 'pro | ducts.pickle'
products = None
if os.path.isfile(pickle_file):
products = cPickle.load(open(pickle_file, 'r'))
else:
products = read_file()
f = open(pickle_file, "wb")
cPickle.dump(products, f)
for product in products:
title = normalize_title(product[0])
| uprint(title)
print '=' * len(title)
review = sanitize_text(product[1])
uprint(review)
print
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from ckeditor.fields import RichTextFormField
from django import forms
from base.models.admission_condition import CONDITION_ADMISSION_ACCESSES
class UpdateLineForm(forms.Form):
admission_condition_line = forms.IntegerField(widget=forms.HiddenInput())
section = forms.CharField(widget=forms.HiddenInput())
language = forms.CharField(widget=forms.HiddenInput())
diploma = forms.CharField(widget=forms.Textarea, required=False)
conditions = forms.CharField(widget=forms.Textarea, required=False)
access = forms.ChoiceField(choices=CONDITION_ADMISSION_ACCESSES, required=False)
remarks = forms.CharField(widget=forms.Textarea, required=False)
class UpdateTextForm(forms.Form):
PARAMETERS_FOR_RICH_TEXT = d | ict(required=False, config_name='minimal')
text_fr = RichTextFormField(**PARAMETERS_FOR_RICH_TEXT)
text_en = RichTextFormField(**PARAMETERS_FOR_RICH_TEXT)
section = forms.CharField(widget=form | s.HiddenInput())
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudwatchlogs_log_group_info
short_description: Get information about log_group in CloudWatchLogs
description:
- Lists the specified log groups. You can list all your log groups or filter the results by prefix.
- This module was called C(cloudwatchlogs_log_group_facts) before Ansible 2.9. The usage did not change.
version_added: "2.5"
author:
- Willian Ricardo (@willricardo) <willricardo@gmail.com>
requirements: [ botocore, boto3 ]
options:
log_group_name:
description:
- The name or prefix of the log group to filter by.
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- cloudwatchlogs_log_group_info:
log_group_name: test-log-group
'''
RETURN = '''
log_groups:
description: Return the list of complex objects representing log groups
returned: success
type: complex
contains:
log_group_name:
description: The name of the log group.
returned: always
type: str
creation_time:
description: The creation time of the log group.
returned: always
type: int
retention_in_days:
description: The number of days to retain the log events in the specified log group.
returned: always
type: int
metric_filter_count:
description: The number of metric filters.
returned: always
type: int
arn:
description: The Amazon Resource Name (ARN) of the log group.
returned: always
type: str
stored_bytes:
description: The number of bytes stored.
returned: always
type: str
kms_key_id:
description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
returned: always
type: str
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
def describe_log_group(client, log_group_name, module):
params = {}
if log_group_name:
params['logGroupNamePrefix'] = log_group_name
try:
paginator = client.get_paginator('describe_log_groups')
desc_log_group = paginator.paginate(**params).build_full_result()
return desc_log_group
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
log_group_name=dict(),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if module._na | me == 'cloudwatchlogs_log_group_facts':
module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
desc_log_group = describe_log | _group(client=logs,
log_group_name=module.params['log_group_name'],
module=module)
final_log_group_snake = []
for log_group in desc_log_group['logGroups']:
final_log_group_snake.append(camel_dict_to_snake_dict(log_group))
desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)
module.exit_json(**desc_log_group_result)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distribut | ed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundatio | n, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
"""
Module to create topo and qinit data files for this example.
"""
from clawpack.geoclaw import topotools
from pylab import *
def maketopo_hilo():
x = loadtxt('x.txt')
y = loadtxt('y.txt')
z = loadtxt('z.txt')
# modify x and y so that cell size is truly uniform:
dx = 1. / (3.*3600.) # 1/3"
xx = linspace(x[0], x[-1], len(x))
yy = linspace(y[-1], y[0], len(y))
zz = flipud(z)
topo = topotools.Topography()
topo.x = xx
topo.y = yy
topo.Z = zz
topo.write('hilo_flattened.tt | 2',topo_type=2)
def maketopo_flat():
"""
Output topography fil | e for the entire domain
"""
nxpoints = 201
nypoints = 301
xlower = 204.812
xupper = 205.012
ylower = 19.7
yupper = 20.0
outfile= "flat.tt2"
topotools.topo2writer(outfile,topo_flat,xlower,xupper,ylower,yupper,nxpoints,nypoints)
def topo_flat(x,y):
"""
flat
"""
z = where(x < 204.91213, 30., -30.)
return z
def plot_topo_big():
figure(figsize=(8,12))
topo1 = topotools.Topography()
topo1.read('flat.tt2',2)
contourf(topo1.x,topo1.y,topo1.Z,linspace(-30,20,51), extend='both')
topo2 = topotools.Topography()
topo2.read('hilo_flattened.tt2',2)
contourf(topo2.x,topo2.y,topo2.Z,linspace(-30,20,51), extend='both')
x1 = 204.90028
x2 = 204.96509
y1 = 19.71
y2 = 19.95
plot([x1,x2,x2,x1,x1],[y1,y1,y2,y2,y1],'w')
axis('scaled')
colorbar()
def plot_topo():
figure(figsize=(12,8))
topo1 = topotools.Topography()
topo1.read('flat.tt2',2)
contourf(topo1.x,topo1.y,topo1.Z,linspace(-30,20,51), extend='both')
topo2 = topotools.Topography()
topo2.read('hilo_flattened.tt2',2)
contourf(topo2.x,topo2.y,topo2.Z,linspace(-30,20,51), extend='both')
colorbar()
x1 = 204.9
x2 = 204.955
y1 = 19.715
y2 = 19.755
axis([x1,x2,y1,y2])
gca().set_aspect(1./cos(y1*pi/180.))
ticklabel_format(format='plain',useOffset=False)
contour(topo2.x,topo2.y,topo2.Z,[0.],colors='k')
plot([204.9447],[19.7308], 'ko') # from BM description
plot([204.9437],[19.7307], 'ro') # closer to pier
# from <http://tidesandcurrents.noaa.gov/stationhome.html?id=1617760>
# location is listed as: 19 degrees 43.8' N, 155 degrees, 3.3' W
xg = 360 - (155 + 3.3/60.)
yg = 19 + 43.8/60.
plot([xg],[yg], 'bo')
#gauges.append([1125, 204.91802, 19.74517, 0., 1.e9]) #Hilo
#gauges.append([1126, 204.93003, 19.74167, 0., 1.e9]) #Hilo
#gauges.append([3333, 204.93, 19.7576, 0., 1.e9])
if __name__=='__main__':
maketopo_hilo()
maketopo_flat()
|
elif isinstance(value, set):
if not value <= superset[key]:
return False
else:
if not value == superset[key]:
return False
return True
def update_qs(params):
''' Append key-value pairs to self.filter_string '''
accepted_params = dict((k, v) for (k, v) in params.items() if v is not None)
return '?' + urlencode(accepted_params)
def msc_argument_spec():
return dict(
host=dict(type='str', required=True, aliases=['hostname']),
port=dict(type='int', required=False),
username=dict(type='str', default='admin'),
password=dict(type='str', required=True, no_log=True),
output_level=dict(type='str', default='normal', choices=['normal', 'info', 'debug']),
timeout=dict(type='int', default=30),
use_proxy=dict(type='bool', default=True),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
)
class MSCModule(object):
def __init__(self, module):
self.module = module
self.params = module.params
self.result = dict(changed=False)
self.headers = {'Content-Type': 'text/json'}
# normal output
self.existing = dict()
# info output
self.previous = dict()
self.proposed = dict()
self.sent = dict()
# debug output
self.filter_string = ''
self.method = None
self.path = None
self.response = None
self.status = None
self.url = None
# Ensure protocol is set
self.params['protocol'] = 'https' if self.params.get('use_ssl', True) else 'http'
# Set base_uri
if 'port' in self.params and self.params['port'] is not None:
self.baseuri = '{protocol}://{host}:{port}/api/v1/'.format(**self.params)
else:
self.baseuri = '{protocol}://{host}/api/v1/'.format(**self.params)
if self.module._debug:
self.module.warn('Enable debug output because ANSIBLE_DEBUG was set.')
self.params['output_level'] = 'debug'
if self.params['password']:
# Perform password-based authentication, log on using password
self.login()
else:
self.module.fail_json(msg="Parameter 'password' is required for authentication")
def login(self):
''' Log in to MSC '''
# Perform login request
self.url = urljoin(self.baseuri, 'auth/login')
payload = {'username': self.params['username'], 'password': self.params['password']}
resp, auth = fetch_url(self.module,
self.url,
data=json.dumps(payload),
method='POST',
headers=self.headers,
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
# Handle MSC response
if auth['status'] != 201:
self.response = auth['msg']
self.status = auth['status']
self.fail_json(msg='Authentication failed: {msg}'.format(**auth))
payload = json.loads(resp.read())
self.headers['Authorization'] = 'Bearer {token}'.format(**payload)
def request(self, path, method=None, data=None, qs=None):
''' Generic HTTP method for MSC requests. '''
self.path = path
if method is not None:
self.method = method
self.url = urljoin(self.baseuri, path)
if qs is not None:
self.url = self.url + update_qs(qs)
resp, info = fetch_url(self.module,
self.url,
headers=self.headers,
data=json.dumps(data),
method=self.method,
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'],
)
self.response = info['msg']
self.status = info['status']
# 200: OK, 201: Created, 202: Accepted, 204: No Content
if self.status in (200, 201, 202, 204):
output = resp.read()
# if self.method in ('DELETE', 'PATCH', 'POST', 'PUT') and self.status in (200, 201, 204):
# self.result['changed'] = True
if output:
return json.loads(output)
# 404: Not Found
elif self.method == 'DELETE' and self.status == 404:
return {}
# 400: Bad Request, 401: Unauthorized, 403: Forbidden,
# 405: Method Not Allowed, 406: Not Acceptable
# 500: Internal Server Error, 501: Not Implemented
elif self.status >= 400:
try:
payload = json.loads(resp.read())
except Exception:
payload = json.loads(info['body'])
if 'code' in payload:
self.fail_json(msg='MSC Error {code}: {message}'.format(**payload), data=data, info=info, payload=payload)
else:
| self.fail_json(msg='MSC Error:'.format(**payload), data=data, info=info, payload=payload)
return {}
def query_objs(self, path, key=None, **kwargs):
''' Query the MSC REST API for objects in a path '''
found = []
objs = self.request(path | , method='GET')
if key is None:
key = path
for obj in objs[key]:
for kw_key, kw_value in kwargs.items():
if kw_value is None:
continue
if obj[kw_key] != kw_value:
break
else:
found.append(obj)
return found
def get_obj(self, path, **kwargs):
''' Get a specific object from a set of MSC REST objects '''
objs = self.query_objs(path, **kwargs)
if len(objs) == 0:
return {}
if len(objs) > 1:
self.fail_json(msg='More than one object matches unique filter: {0}'.format(kwargs))
return objs[0]
def lookup_domain(self, domain):
''' Look up a domain and return its id '''
if domain is None:
return domain
d = self.get_obj('auth/domains', key='domains', name=domain)
if not d:
self.module.fail_json(msg="Domain '%s' is not valid." % domain)
if 'id' not in d:
self.module.fail_json(msg="Domain lookup failed for '%s': %s" % (domain, d))
return d['id']
def lookup_roles(self, roles):
''' Look up roles and return their ids '''
if roles is None:
return roles
ids = []
for role in roles:
r = self.get_obj('roles', name=role)
if not r:
self.module.fail_json(msg="Role '%s' is not valid." % role)
if 'id' not in r:
self.module.fail_json(msg="Role lookup failed for '%s': %s" % (role, r))
ids.append(dict(roleId=r['id']))
return ids
def create_label(self, label, label_type):
''' Create a new label '''
return self.request('labels', method='POST', data=dict(displayName=label, type=label_type))
def lookup_labels(self, labels, label_type):
''' Look up labels and return their ids (create if necessary) '''
if labels is None:
return None
ids = []
for label in labels:
l = self.get_obj('labels', displayName=label)
if not l:
l = self.create_label(label, label_type)
if 'id' not in l:
self.module.fail_json(msg="Label lookup failed for '%s': %s" % (label, l))
ids.append(l['id'])
return ids
def sanitize(self, updates, collate=False, required_keys=None):
''' Clean up unset keys from a request payload '''
if required_keys is None:
required_keys = []
self.proposed = deepcopy(self.existing)
self.sent = deepcopy(self.existing)
# Clean up self.sent
for key in updates:
# Always retain 'id'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
patterns = [r'^.*?/bc_jpg_makerDrop/(crop_fullsize_pad_center)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bc_jpg_makerDrop/(crop_fullsize_pad_anchor)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bfly_jpg_makerDrop/(crop_fullsize_center)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bfly_jpg_makerDrop/(crop_fullsize_anchor)/?.*?/(\d{9}(.*?))\.(.*?)$']*10
strings = ["/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_anchor/346470409.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_center/346470408_1.jpg",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_anchor/346470407_alt01.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_center/346470406_1.png",
| "/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_anchor/346880405.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_center/346470404_1.jpg",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_cente | r/346470403.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_anchor/336470402.jpg"]*10
def matches_pattern(str, patterns):
for pattern in patterns:
if pattern.match(str):
return pattern.match(str), pattern
return False
def regex_matcherator(strings,patterns):
import re
compiled_patterns = list(map(re.compile, patterns))
for s in strings:
if matches_pattern(s, compiled_patterns):
print matches_pattern(s, compiled_patterns)[1].pattern
print '--'.join(s.split('/')[-2:])
print matches_pattern(s, compiled_patterns)[0].groups()
print '\n'
r = regex_matcherator(strings,patterns)
#print r.next() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Doit task definitions."""
DOIT_CONFIG = {
'default_tasks': [
'flake8',
'docs',
],
'continue': True,
'verbosity': 1,
'num_process': 2,
'par_type': 'thread',
}
def task_flake | 8():
return {
'actions': ['flake8 m2r tests'],
}
def task_do | cs():
return {
'actions': ['sphinx-build -q -W -E -n -b html docs docs/_build/html'],
}
|
"""Conveni | ent imports"""
from pswingw2.client import send_simple_message # noqa
from pswingw2.client import send # noqa
from pswingw2.client import send_single # noqa
from pswingw2.client import send_batch # noqa
from pswingw2.client import Client # noqa
from pswingw2.config_defaults import get_simple_confi | g as config # noqa
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# scrapenhl2 documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 1 17:47:07 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to | make it absolute, like shown here.
#
import sys
sys.path.insert(0, '../../')
sys.path.insert(0, '../')
sys.path.insert(0, './')
sys.path.in | sert(0, '../../scrapenhl2/')
sys.path.insert(0, '../../scrapenhl2/scrape/')
sys.path.insert(0, '../../scrapenhl2/manipulate/')
sys.path.insert(0, '../../scrapenhl2/plot/')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'scrapenhl2'
copyright = '2017, Muneeb Alam'
author = 'Muneeb Alam'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4.1'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'scrapenhl2doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'scrapenhl2.tex', 'scrapenhl2 Documentation',
'Muneeb Alam', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scrapenhl2', 'scrapenhl2 Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'scrapenhl2', 'scrapenhl2 Documentation',
author, 'scrapenhl2', 'One line description of project.',
'Miscellaneous'),
]
|
h.SetProjection( pj )
#out_fh.GetRasterBand(1).SetRasterColorTable(flinfos[0].ct)
nodata = None
iband = 1
for fi in flinfos:
fi.copy_into( out_fh, 1, iband, nodata )
iband=iband+1
iband = 0
def names_to_fileinfos( self, name ):
file_infos = []
fi = file_info()
if fi.init_from_name( name ) == 1:
file_infos.append( fi )
return file_infos
def OnFileInError(self):
dlg = wx.MessageDialog(self,
'Minimum files to add:\n\n Input files => NDVI and Modis Band7\n One Output file',
'Error',wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def OnQCInError(self):
dlg = wx.MessageDialog(self,
'QC type error\n\n Please check your input',
'Error',wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
# Path+filename seek and set
def make_fb(self):
# get current working directory
self.dirnm = os.getcwd()
self.cc2 = filebrowse.FileBrowseButton(
self, -1, size=(50, -1), labelText='QC File:',
startDirectory = self.dirnm,
fileMode=wx.OPEN,
changeCallback = self.fbbCallback2,
)
self.cc6 = filebrowse.FileBrowseButton(
self, -1, size=(50, -1), labelText='OUT File: ',
startDirectory = self.dirnm,
fileMask='*.tif',
fileMode=wx.SAVE,
changeCallback = self.fbbCallback6
)
# Collect path+filenames
def fbbCallback2(self, evt):
self.qc = str(evt.GetString())
def fbbCallback6(self, evt):
self.output = str(evt.GetString())
# Front text
def make_text(self):
self.text = wx.StaticText(self, -1, "This is processing MODIS Quality Assessment Bits through the use of gdal and numeric.")
# QC type radio buttons
def make_radiobuttons1(self):
self.rbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.rb1 = wx.RadioBox(self, -1, "Select MODIS Type",
wx.DefaultPosition, wx.DefaultSize,
self.NameMOD, 2, wx.RA_SPECIFY_COLS)
self.rb1.SetToolTip(wx.ToolTip("Select MODIS type"))
self.rb1.SetLabel("MODIS Type")
self.rbox1.Add(self.rb1,1,wx.ALL,10)
def EvtRadioBox1(self, evt):
self.nb = evt.GetInt()
self.pixelres = NameMOD[self.nb]
#print self.pixelres
def make_radiobuttons2(self):
self.rbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.rb2 = wx.RadioBox(self, -1, "Select Band number (data quality only)",
wx.DefaultPosition, wx.DefaultSize,
self.bandno, 7, wx.RA_SPECIFY_COLS)
self.rb2.SetToolTip(wx.ToolTip("Select Band number (for data_quality)"))
self.rb2.SetLabel("Band Number (for \"data quality\" only)")
self.rbox2.Add(self.rb2,1,wx.ALL,10)
def EvtRadioBox2(self, evt):
self.nb = evt.GetInt()
self.band_no = self.bandno[self.nb]
#print self.band_no
def make_radiobuttons3(self):
self.rbox3 = wx.BoxSizer(wx.HORIZONTAL)
self.rb3 = wx.RadioBox(self, -1, "Select QC Type",
wx.DefaultPosition, wx.DefaultSize,
self.NameQC, 2, wx.RA_SPECIFY_COLS)
self.rb3.SetToolTip(wx.ToolTip("Select QC type"))
self.rb3.SetLabel("QC Type")
self.rbox3.Add(self.rb3,1,wx.ALL,10)
def EvtRadioBox3(self, evt):
self.nb = evt.GetInt()
self.qc_type = NameQC[self.nb]
#print self.qc_type
# Bottom buttons
def make_buttons(self):
self.bbox = wx.BoxSizer(wx.HORIZONTAL)
# OnOK
bmp0 = images.getPngDialogOKBitmap()
self.b0 = wx.BitmapButton(self, 20, bmp0, (20, 20),
(bmp0.GetWidth()+50, bmp0.GetHeight()+10), style=wx.NO_BORDER)
self.b0.SetToolTipString("Process")
self.bbox.Add(self.b0,1,wx.CENTER,10)
# OnCancel
bmp1 = images.getPngDialogCancelBitmap()
self.b1 = wx.BitmapButton(self, 30, bmp1, (20, 20),
(bmp1.GetWidth()+50, bmp1.GetHeight()+10), style=wx.NO_BORDER)
self.b1.SetToolTipString("Abort")
self.bbox.Add(self.b1,1,wx.CENTER,10)
# OnInfo
bmp2 = images.getPngHelpAboutBitmap()
self.b2 = wx.BitmapButton(self, 40, bmp2, (20, 20),
(bmp2.GetWidth()+50, bmp2.GetHeight()+10), style=wx.NO_BORDER)
self.b2.SetToolTipString("Help/Info.")
self.bbox.Add(self.b2,1,wx.CENTER,10)
def bindEvents(self):
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Bind(wx.EVT_BUTTON, self.OnOK, self.b0)
self.Bind(wx.EVT_BUTTON, self.OnCancel, self.b1)
self.Bind(wx.EVT_BUTTON, self.OnInfo, self.b2)
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox1, self.rb1)
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox2, self.rb2)
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox3, self.rb3)
def OnCloseWindow(self, event):
self.Destroy()
def OnCancel(self, event):
self.Destroy()
def OnInfo(self,event):
dlg = wx.MessageDialog(self, overview,
'Help', wx.OK | wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
class file_info:
"""A class holding information about a GDAL file."""
def init_from_name(self, filename):
"""
Initialize file_info from filename
filename -- Name of file to read.
Returns 1 on success or 0 if the file can't be opened.
"""
fh = gdal.Open( str(filename) )
if fh is None:
return 0
self.filename = filename
self.bands = fh.RasterCount
self.xsize = fh.RasterXSize
self.ysize = fh.RasterYSize
self.band_type = fh.GetRasterBand(1).DataType
self.projection = fh.GetProjection()
self.geotransform = fh.GetGeoTransform()
self.ulx = self.geotransform[0]
self.uly = self.geotransform[3]
self.lrx = self.ulx + self.geotransform[1] * self.xsize
self.lry = self.uly + self.geotransform[5] * self.ysize
ct = fh.GetRasterBand(1).GetRasterColorTable()
if ct is not None:
self.ct = ct.Clone()
else:
self.ct = None
return 1
def copy_into( self, t_fh, s_band = 1, t_band = 1, nodata_arg=None ):
"""
Copy this files image into target file.
"""
t_geotransform = t_fh.GetGeoTransform()
t_ulx = t_geotransform[0]
t_uly = t_geotransform[3]
t_lrx = t_geotransform[0] + t_fh.RasterXSize * t_geotransform[1]
t_lry = t_geotransform[3] + t_fh.RasterYSize * t_geotransform[5]
# figure out intersection region
tgw_ulx = max(t_ulx,self.ulx)
tgw_lrx = min(t_lrx,self.lrx)
if t_geotransform[5] < 0:
tgw_uly = min(t_uly,self.uly)
tgw_lry = max(t_lry,self.lry)
else:
tgw_uly = max(t_uly,self.uly)
tgw_lry = min(t_lry,self.lry)
# do they even intersect?
if tgw_ulx >= tgw_lrx:
return 1
if t_geotransform[5] < 0 and tgw_uly <= tgw_lry:
return 1
if t_geotransform[5] > 0 and tgw_uly >= tgw_lry:
return 1
# compute target window in pixel coordinates.
tw_xoff = int((tgw_ulx - t_geotransform[0]) / t_geotransform[1] + 0.1)
tw_yoff = int((tgw_uly - t_geotransform[3]) / t_geotransform[5] + 0.1)
tw_xsize = int((tgw_lrx-t_geotransform[0])/t_geotransform[1] + 0.5) - tw_xoff
tw_ysize = int((tgw_lry-t_geotransform[3])/t_geotransform[5] + 0.5) - tw_yoff
if tw_xsize < 1 or tw_ysize < 1:
return 1
# Compute source window in pixel coordinates.
sw_xoff = int((tgw_ulx - self.geotransform[0]) / self.geotransform[1])
sw_yoff = int((tgw_uly - self.geotransform[3]) / self.geotransform[5])
sw_xsize = int((tgw_lrx - self.geotransform[0]) / self.geotransform[1] + 0.5) - sw_xoff
sw_ysize = int((tgw_lry - self.geotransform[3]) / self.geotransform[5] + 0.5) - sw_yoff
if sw_xsize < 1 or sw_ysize < 1:
return 1
# Open the source file, and copy the selected region.
s_fh = gdal.Open( str(self.filename) )
return self.raster_copy( s_fh, sw_xoff, sw_yoff, sw_xsize, sw_ysize, s_band, t_fh, tw_xoff, tw_yoff, tw_xsize, tw_ysize, t_band, nodata_arg )
def raster_copy( self, s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n, t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n, nodata=None ):
if nodata is not None:
return self | .raster_copy_with_nodata(
s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n,
t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n,
nodata )
s_band = s_fh.GetRasterBand( s_band_n )
t_band = t_fh.GetRasterBand( t_band_n )
data = s_band.ReadRaster( s_xoff, s_yoff, s_xsize, s_ysize, t_xsize, t_ysize, t_band.DataType )
t_band.WriteRaster( t_xoff, t_yoff, t_xsize, t_ysize, data, t_xsize, t_ysize, t_band.DataType )
retur | n 0
def raster_copy_with_nodata( self, s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n,t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n, nodata ):
import Numeric as |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERC | HANTABILITY or FITN | ESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class CustomXlsForm(forms.Form):
required_entity = forms.BooleanField(required=False, label=_('Requirement entity'))
allocation_entity = forms.BooleanField(required=False, label=_('Attribution entity'))
credits = forms.BooleanField(required=False, label=_('Credits'))
periodicity = forms.BooleanField(required=False, label=_('Periodicity'))
active = forms.BooleanField(required=False, label=_('Active'))
quadrimester = forms.BooleanField(required=False, label=_('Quadrimester'))
session_derogation = forms.BooleanField(required=False, label=_('Session derogation'))
volume = forms.BooleanField(required=False, label=_('Volume'))
teacher_list = forms.BooleanField(required=False, label=_('Tutors (scores responsibles included)'))
proposition = forms.BooleanField(required=False, label=_('Proposals'))
english_title = forms.BooleanField(required=False, label=_('Title in English'))
language = forms.BooleanField(required=False, label=_('Language'))
specifications = forms.BooleanField(required=False, label=_('Specifications'))
description_fiche = forms.BooleanField(required=False, label=_('Description fiche'))
force_majeure = forms.BooleanField(required=False, label=_('Description fiche (including force majeure)'))
def __init__(self, *args, year: int = None, code: str = None, **kwargs):
super().__init__(*args, **kwargs)
self.url_action = reverse('education_group_learning_units_contains',
kwargs={'year': year, 'code': code})
def get_optional_data(self):
data = []
if self.is_valid():
for field in self.fields:
if self.cleaned_data[field]:
data.append(field)
return data
|
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.core.exceptions import ValidationError
from ordered_model.models import OrderedModel
from django.contrib.auth.models import User, Group
from django.conf import settings
# Useful for attempting full-text search on fields
class SearchManager(models.Manager):
def __init__(self, search_fields):
super().__init__()
self.search_fields = search_fields
def search(self, terms):
# Return everything for an empty search
if not terms.strip():
return self.all()
# Currently, field__search='foo' (full text search) is supported
# only on postgres, but fake it on other backends
if settings.HAS_FULL_TEXT_SEARCH:
suffix = '__search'
else:
suffix = '__icontains'
query = None
for search_field in self.search_fields:
q = Q(**{search_field + suffix: terms})
if query is None:
query = q
else:
query = query | q
return self.filter(query)
class NavbarEntry(OrderedModel):
URL_CHOI | CES = (('ghu_main:toolkits', 'Toolkits listing'),
('ghu_main:organizations', 'Organizations'))
label = models.CharField(max_length=256)
page = models.ForeignKey('Page', on_delete=models.CASCADE, null=True,
blank=True)
url = models.CharField(max_length=256, verbose_name='Special page',
choices=URL_CHOICES, bl | ank=True)
class Meta(OrderedModel.Meta):
verbose_name = 'Navigation bar entry'
verbose_name_plural = 'Navigation bar entries'
def __str__(self):
return '{}, {}, {}'.format(self.label, self.order, self.page)
def clean(self):
if (not self.page and not self.url) or (self.page and self.url):
raise ValidationError('Must specify either a Page or Special '
'page, but not both')
class Page(models.Model):
slug = models.SlugField(blank=True, unique=True)
title = models.CharField(max_length=256)
contents = models.TextField()
template = models.ForeignKey('PageTemplate', null=True, blank=True)
def __str__(self):
return 'Page "{}": /{}/'.format(self.title, self.slug)
class PageTemplate(models.Model):
name = models.CharField(max_length=256, verbose_name='User-friendly title')
template = models.CharField(max_length=256, verbose_name='Template to execute')
def __str__(self):
return '{} ({})'.format(self.name, self.template)
class Toolkit(models.Model):
slug = models.SlugField(unique=True)
title = models.CharField(max_length=256)
summary = models.TextField()
def __str__(self):
return 'Toolkit: {}'.format(self.title)
class ToolkitPage(OrderedModel):
toolkit = models.ForeignKey(Toolkit, related_name='pages')
slug = models.SlugField()
title = models.CharField(max_length=256)
contents = models.TextField()
order_with_respect_to = 'toolkit'
class Meta(OrderedModel.Meta):
unique_together = (('toolkit', 'slug'),)
def __str__(self):
return '{}. Order: {}'.format(self.toolkit, self.order)
class OrgProfile(models.Model):
slug = models.SlugField(blank=True, unique=True)
name = models.CharField(max_length=256)
email = models.EmailField(max_length=254)
location = models.CharField(max_length=256, null=True)
phone = models.CharField(max_length=256)
summary = models.CharField(max_length=256, null=True)
description = models.TextField()
objects = SearchManager(('name', 'summary', 'description'))
def __str__(self):
return 'OrgProfile: {}, slug: {}'.format(self.name, self.slug)
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
The script can be used to setup a virtual environment for running Firefox UI Tests.
It will automatically install the firefox ui test package, all its dependencies,
and optional packages if specified.
"""
import argparse
import os
import shutil
import subprocess
import sys
import urllib2
import zipfile
# Link to the folder, which contains the zip archives of virtualenv
VIRTUALENV_URL = 'https://github.com/pypa/virtualenv/archive/%(VERSION)s.zip'
VIRTUALENV_VERSION = '12.1.1'
here = os.path.dirname(os.path.abspath(__file__))
venv_script_path = 'Scripts' if sys.platform == 'win32' else 'bin'
venv_activate = os.path.join(venv_script_path, 'activate')
venv_activate_this = os.path.join(venv_script_path, 'activate_this.py')
venv_python_bin = os.path.join(venv_script_path, 'python')
usage_message = """
***********************************************************************
To run the Firefox UI Tests, activate the virtual environment:
{}{}
See firefox-ui-tests --help for all options
***********************************************************************
"""
def download(url, target):
"""Downloads the specified url to the given target."""
response = urllib2.urlopen(url)
with open(target, 'wb') as f:
f.write(response.read())
return target
def create_virtualenv(target, python_bin=None):
script_path = os.path.join(here, 'virtualenv-%s' % VIRTUALENV_VERSION,
'virtualenv.py')
print 'Downloading virtualenv %s' % VIRTUALENV_VERSION
zip_path = download(VIRTUALENV_URL % {'VERSION': VIRTUALENV_VERSION},
os.path.join(here, 'virtualenv.zip'))
try:
with zipfile.ZipFile(zip_path, 'r') as f:
f.extractall(here)
print 'Creating new virtual environment'
cmd_args = [sys.executable, script_path, target]
if python_bin:
cmd_args.extend(['-p', python_bin])
subprocess.check_call(cmd_args)
finally:
try:
os.remove(zip_path)
except OSError:
pass
shutil.rmtree(os.path.dirname(script_path), ignore_errors=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python',
dest='python',
metavar='BINARY',
help='The Python interpreter to use.')
parser.add_argument('venv',
metavar='PATH',
help='Path to the environment to be created.')
args = parser.parse_args()
# Remove an already existent virtual environment
if os.path.exists(args.venv):
print 'Remov | ing already existent virtual environment at: %s' % args.venv
shutil.rmtree(args.venv, True)
create_virtualenv(args.venv, python_bin=args.python)
# Activate the environment
venv = os.path.join(args.venv, venv_activate_this)
exe | cfile(venv, dict(__file__=venv))
# Install Firefox UI tests, dependencies and optional packages
command = ['pip', 'install',
'-r', 'requirements.txt',
'-r', 'requirements_optional.txt',
]
print 'Installing Firefox UI Tests and dependencies...'
print 'Command: %s' % command
subprocess.check_call(command, cwd=os.path.dirname(here))
# Print the user instructions
print usage_message.format('' if sys.platform == 'win32' else 'source ',
os.path.join(args.venv, venv_activate))
if __name__ == "__main__":
main()
|
# -*- coding:utf-8 -*-
from socket import socket
import threading
import json
# id : [사용자 이름]
# action : [create | join | send_msg | broadcast | out ]
# action_value : [action에 따른 수행 값]
class Server :
def __init__(self):
self.server_sock = socket()
self.clients = []
self.rooms = {} #{ room : [clients] }
def __client_th__(self, client_sock):
while True :
data = client_sock.recv()
protocol = json.loads(data)
#json 유효성 검사를 해야할듯
id = protocol['id']
action = protocol['action']
value = protocol['action_value']
response = {'id': id,
'action': '',
'action_value': ''}
if action == 'create':
response['action'] = 'resp'
if value not in self.rooms:
self.rooms[value] = [client_sock]
client_sock.room = value
response['action_value'] = 'OK'
else:
response['action_value'] = 'ERR'
client_sock.send(json.dumps(response))
elif action == 'join':
response['action'] = 'resp'
if value in self.rooms:
self.rooms[value].append(client_sock)
client_sock.room = value
response['action_value'] = 'OK'
else:
response['action_value'] = 'ERR'
client_sock.send(json.dumps(response))
elif action == 'send_msg':
response['action'] = action
response['action_value'] = value
msg = json.dumps(response)
if hasattr(client_sock, 'room') :
for client in self.rooms :
if client != client_sock :
client.send(msg)
else: #client가 join|craete 후에만 하면 이럴일 없지
pass #잘못된 프로토콜이라는 리스폰을 줄 필요있을까? 프로그래밍 잘못하면 에러가 나지만, 사용자의 반응에 의해 이런 예외가 발생할 일은 없다.
elif action == 'broadcast':
| response['action'] = action
response['action_value'] = value
msg = json.dumps(response)
for client in self.clients:
if client != client_sock :
client.send(msg)
elif action == 'exit':
if hasattr(client_sock, 'room'):
self.rooms[client_sock.room].remove(client_sock)
client_sock.close()
elif action == 'out' : #방장이 나가면 방장위 | 임문제도 생기네~~
pass
else :
pass # 잘못된 protocol
def run(self, ip, port, backlog=10):
self.server_sock.bind((ip, port))
self.server_sock.listen(backlog)
while True:
client = self.server_sock.accept()
clients.append(client)
threading.Thread(target=self.__client_th__, args=client[0]).start()
HOST = ''
PORT = 8000
clients = [] #socket list
s = socket()
s.bind((HOST, PORT))
s.listen(10)
while True :
client_socket = s.accept()
client_name = client_socket[0].recv(1024) # reccive name
clients.append(client_socket[0])
threading.Thread(target=client_th, args=(client_socket[0], client_name)).start()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008 Jan lehnardt <jan@apache.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Simple functional test for the replication notifi | cation trigger"""
import time
from couchdb import client
def set_up_database(server, database):
"""Deletes and creates a `database` on a `server`"""
if database in server:
del server[database]
return server.create(database)
def run_tests():
"""Inserts a doc into database a, waits and tries to read it back from
database b
"""
# set things up
database = 'replication_notification_test'
server_a = client.Server('http://localhost | :5984')
server_b = client.Server('http://localhost:5985')
# server_c = client.Server('http://localhost:5986')
db_a = set_up_database(server_a, database)
db_b = set_up_database(server_b, database)
# db_c = set_up_database(server_c, database)
doc = {'jan':'cool'}
docId = 'testdoc'
# add doc to node a
print 'Inserting document in to database "a"'
db_a[docId] = doc
# wait a bit. Adjust depending on your --wait-threshold setting
time.sleep(5)
# read doc from node b and compare to a
try:
db_b[docId] == db_a[docId] # == db_c[docId]
print 'SUCCESS at reading it back from database "b"'
except client.ResourceNotFound:
print 'FAILURE at reading it back from database "b"'
def main():
print 'Running functional replication test...'
run_tests()
print 'Done.'
if __name__ == '__main__':
main()
|
#
# lastilePro.py
#
# (c) 2013, martin isenburg - http://rapidlasso.com
# rapidlasso GmbH - fast tools to catch reality
#
# uses lastile.exe to compute a tiling for a folder
# worth of LiDAR files with a user-specified tile
# size (and an optional buffer)
#
# LiDAR input: LAS/LAZ/BIN/TXT/SHP/BIL/ASC/DTM
# LiDAR output: LAS/LAZ/BIN/TXT
#
|
# for licensing see http://lastools.org/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_ | newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting lastile production ...")
### get number of arguments
argc = len(sys.argv)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get the path to LAStools
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))
### make sure the path does not contain spaces
if lastools_path.count(" ") > 0:
gp.AddMessage("Error. Path to .\\lastools installation contains spaces.")
gp.AddMessage("This does not work: " + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
### complete the path to where the LAStools executables are
lastools_path = lastools_path + "\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\\lastools\\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lastile executable
lastile_path = lastools_path+"\\lastile.exe"
### check if executable exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lastile.exe at " + lastile_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastile_path + " ...")
### create the command string for lastile.exe
command = ['"'+lastile_path+'"']
### maybe use '-verbose' option
if sys.argv[argc-1] == "true":
command.append("-v")
### counting up the arguments
c = 1
### add input LiDAR
wildcards = sys.argv[c+1].split()
for wildcard in wildcards:
command.append("-i")
command.append('"' + sys.argv[c] + "\\" + wildcard + '"')
c = c + 2
### maybe the input files are flightlines
if sys.argv[c] == "true":
command.append("-files_are_flightlines")
c = c + 1
### maybe use a user-defined tile size
if sys.argv[c] != "1000":
command.append("-tile_size")
command.append(sys.argv[c].replace(",","."))
c = c + 1
### maybe create a buffer around the tiles
if sys.argv[c] != "0":
command.append("-buffer")
command.append(sys.argv[c].replace(",","."))
c = c + 1
### maybe the output will be over 2000 tiles
if sys.argv[c] == "true":
command.append("-extra_pass")
c = c + 1
### maybe an output format was selected
if sys.argv[c] != "#":
if sys.argv[c] == "las":
command.append("-olas")
elif sys.argv[c] == "laz":
command.append("-olaz")
elif sys.argv[c] == "bin":
command.append("-obin")
elif sys.argv[c] == "txt":
command.append("-otxt")
elif sys.argv[c] == "xyzi":
command.append("-otxt")
command.append("-oparse")
command.append("xyzi")
elif sys.argv[c] == "txyzi":
command.append("-otxt")
command.append("-oparse")
command.append("txyzi")
c = c + 1
### maybe an output file name was selected
if sys.argv[c] != "#":
command.append("-o")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe an output directory was selected
if sys.argv[c] != "#":
command.append("-odir")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe there are additional input options
if sys.argv[c] != "#":
additional_options = sys.argv[c].split()
for option in additional_options:
command.append(option)
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lastile
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. lastile failed.")
sys.exit(1)
### report happy end
gp.AddMessage("Success. lastile done.")
|
import csv
def get_name_score(name):
score = 0
for ch in name:
score += ord(ch.lower()) - ord('a') + 1
return score
def find_total_name_scores_from_file(filename):
# read/parse from file
names = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
names = row # just one row in this file
names.sort()
# calc/add name scores
total_score = 0
for i, name in enumerate(names):
position = i+1
name_score = get_name_score(name)
total_score += position * name_score
re | turn total_score
#################################### | ###############
if __name__ == '__main__':
filename = "p022_names.txt"
print find_total_name_scores_from_file(filename)
|
from ROOT import TH1I, gROOT, kRed, kBlue
import unittest
import tempfile
import shutil
import os
from varial.extensions.cmsrun import Sample
from varial.wrappers import HistoWrapper
from varial.history import History
from varial import analysis
from varial import settings
from varial import diskio
class TestHistoToolsBase(unittest.TestCase):
def setUp(self):
super(TestHistoToolsBase, self).setUp()
test_fs = "fileservice/"
if not os.path.exists(test_fs):
test_fs = "varial/test/" + test_fs
settings.DIR_FILESERVICE = test_fs
if (not os.path.exists(test_fs + "tt.root")) \
or (not os.path.exists(test_fs + "ttgamma.root")) \
or (not os.path.exists(test_fs + "zjets.root")):
self.fail("Fileservice testfiles not present!")
# create samples
analysis.all_samples["tt"] = Sample(
name = "tt",
is_data = True,
lumi = 3.,
legend = "pseudo data",
input_files = ["none"],
)
analysis.all_samples["ttgamma"] = Sample(
name = "ttgamma",
lumi = 4.,
legend = "tt gamma",
input_files = ["none"],
)
analysis.all_samples["zjets"] = Sample(
name = "zjets",
lumi = 0.1,
legend = "z jets",
input_files = ["none"],
)
analysis.colors = {
"tt gamma": kRed,
"z jets": kBlue
}
settings.stacking_order = [
"tt gamma",
"z jets"
]
analysis.active_samples = analysis.all_samples.keys()
# create a test wrapper
h1 = TH1I("h1", "H1", 2, .5, 4.5)
h1.Fill(1)
h1.Fill(3,2)
| hist = History("test_op") # create some fake history
hist.add_args([History("fake_input_A"), History("fake_input_B")])
hist.add_kws({"john": "cleese"})
self.test_wrp = HistoWrapper(
h1,
name="Nam3",
title="T1tl3",
history=hist
)
self.test_dir = tempfile.mkdtemp()
analysis.cwd = self.test_dir
def tearDown(self):
super(TestHistoToolsBase, self).tearDown()
| del self.test_wrp
diskio.close_open_root_files()
gROOT.Reset()
if os.path.exists(self.test_dir):
os.system('rm -r %s' % self.test_dir)
|
import collections
from .settings import preferences_settings
from .exceptions import CachedValueNotFound, DoesNotExist
class PreferencesManager(collections.Mapping):
"""Handle retrieving / caching of preferences"""
def __init__(self, model, registry, **kwargs):
self.model = model
self.registry = registry
self.queryset = self.model.objects.all()
self.instance = kwargs.get('instance')
if self.instance:
self.queryset = self.queryset.filter(instance=self.instance)
@property
def cache(self):
from django.core.cache import caches
return caches['default']
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
section, name = self.parse_lookup(key)
self.update_db_pref(section=section, name=name, value=value)
def __repr__(self):
return repr(self.all())
def __iter__(self):
return self.all().__iter__()
def __len__(self):
return len(self.all())
def get_cache_key(self, section, name):
"""Return the cache key corresponding to a given preference"""
if not self.instance:
return 'dynamic_preferences_{0}_{1}_{2}'.format(self.model.__name__, section, name)
return 'dynamic_preferences_{0}_{1}_{2}_{3}'.format(self.model.__name__, section, name, self.instance.pk)
def from_cache(self, section, name):
"""Return a preference raw_value from cache"""
cached_value = self.cache.get(
self.get_cache_key(section, name), CachedValueNotFound)
if cached_value is CachedValueNotFound:
raise CachedValueNotFound
return self.registry.get(section=section, name=name).serializer.deserialize(cached_value)
def to_cache(self, pref):
"""Update/create the cache value for the given preference model instance"""
self.cache.set(
self.get_cache_key(pref.section, pref.n | ame), pref.raw_value, None)
def pref_obj(self, section, name):
return self.registry.get(section=section, name=na | me)
def parse_lookup(self, lookup):
try:
section, name = lookup.split(
preferences_settings.SECTION_KEY_SEPARATOR)
except ValueError:
name = lookup
section = None
return section, name
def get(self, key, model=False):
"""Return the value of a single preference using a dotted path key"""
section, name = self.parse_lookup(key)
if model:
return self.get_db_pref(setion=section, name=name)
try:
return self.from_cache(section, name)
except CachedValueNotFound:
pass
db_pref = self.get_db_pref(section=section, name=name)
self.to_cache(db_pref)
return db_pref.value
def get_db_pref(self, section, name):
try:
pref = self.queryset.get(section=section, name=name)
except self.model.DoesNotExist:
pref_obj = self.pref_obj(section=section, name=name)
pref = self.create_db_pref(
section=section, name=name, value=pref_obj.default)
return pref
def update_db_pref(self, section, name, value):
try:
db_pref = self.queryset.get(section=section, name=name)
db_pref.value = value
db_pref.save()
except self.model.DoesNotExist:
return self.create_db_pref(section, name, value)
return db_pref
def create_db_pref(self, section, name, value):
if self.instance:
db_pref = self.model(
section=section, name=name, instance=self.instance)
else:
db_pref = self.model(section=section, name=name)
db_pref.value = value
db_pref.save()
return db_pref
def all(self):
"""Return a dictionnary containing all preferences by section
Loaded from cache or from db in case of cold cache
"""
a = {}
try:
for preference in self.registry.preferences():
a[preference.identifier()] = self.from_cache(
preference.section, preference.name)
except CachedValueNotFound:
return self.load_from_db()
return a
def load_from_db(self):
"""Return a dictionnary of preferences by section directly from DB"""
a = {}
db_prefs = {p.preference.identifier(): p for p in self.queryset}
for preference in self.registry.preferences():
try:
db_pref = db_prefs[preference.identifier()]
except KeyError:
db_pref = self.create_db_pref(
section=preference.section, name=preference.name, value=preference.default)
self.to_cache(db_pref)
a[preference.identifier()] = self.from_cache(
preference.section, preference.name)
return a
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-16 15:30
from __future__ import unicode_literals
f | rom django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0006_auto_20170316_1444'),
]
operations = [
migrations.AlterField(
model_name='book',
name='isbn',
| field=models.CharField(help_text='''13 Character\n
<a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>''',
max_length=13, verbose_name='ISBN'),
),
]
|
(self, user, channel, command=None):
"""Return a list of chair commands that we currently understand.
If a specific command is given, print its docstring."""
return self._help(user, channel, 'chair', command=command)
def private_help(self, user, command=None):
"""Return a list of private message commands that we currently understand.
If a specific command is specified, print its docstring."""
return self._help(user, user, 'private', command=command)
def _help(self, user, channel, command_type, command=None):
# if an argument is given, print help about that specific command
if command:
command = command.replace(',', '')
method = getattr(self, '%s_%s' % (command_type, command), None)
# sanity check: does this method actually exist?
if not method:
help_command = 'help'
if command_type == 'chair':
help_command = ',%s' % help_command
self.msg(channel, 'This command does not exist. Issue `%s` by itself for a command list.' % help_command)
return
# okay, now take the docstring and present it as help; however
# we need to reformat my docstrings to be more IRC friendly -- specifically:
# - change single `\n` to just spaces
# - change double `\n` to single `\n`
help_text = method.__doc__
help_text = re.sub(r'\\n[ ]+\\n', '|---|', help_text)
help_text = re.sub(r'\s+', ' ', help_text)
help_text = help_text.replace('|---|', '\n')
self.msg(channel, help_text)
return
# okay, give a list of the commands available
commands = []
for attr in dir(self):
if callable(getattr(self, attr)) and attr.startswith('%s_' % command_type):
if command_type == 'chair':
command_name = ',%s' % attr[len(command_type) + 1:]
else:
command_name = attr[len(command_type) + 1:]
commands.append(command_name)
commands.sort()
# now print out the list of commands to the channel
self.msg(channel, 'I recognize the following %s commands:' % command_type)
msg_queue = ' '
for i in range(0, len(commands)):
command = commands[i]
msg_queue += command
if i % 3 != 2 and i != len(commands) - 1:
msg_queue += (' ' * (20 - (len(command) * 2)))
else:
self.msg(channel, msg_queue)
msg_queue = ' '
class BaseMode(SkeletonMode):
"""Base class for all modes, handling all the base commands."""
def __init__(self, bot):
super(BaseMode, self).__init__(bot)
self.reported_in = set()
self.nonvoters = set()
@property
def nonvoter_list(self):
return ', '.join(self.nonvoters) if self.nonvoters else 'none'
def names(self, channel):
"""Prompt everyone in the channel to write their names.
Note who has done so in order to easily compile a non-voter list."""
self.msg(channel, 'Please write your full name in the channel, for the meeting records.')
self.bot.state_handler = self.handler_user_names
def chair_nonvoter(self, user, channel, *users):
"""Set the given user to a non-voter. If no user is specified,
| then print the list of all non-voters.
Exception: If we're just starting the meeting, then set anyone
who has not reported in to be a non-voter."""
# this is a special command if we're in the "reporting in" phase; |
# set as a non-voter everyone who hasn't reported in yet
# note: also adds as a non-voter the person who ran the command
if self.bot.state_handler == self.handler_user_names and not users:
def _(names):
laggards = set(names) - self.reported_in - self.nonvoters
laggards.remove(self.bot.nickname)
laggards.add(user)
if laggards:
self.nonvoters.update(laggards)
self.msg(channel, 'Will no longer pester %s.' % ', '.join(laggards))
self.bot.names(channel).addCallback(_)
return
# run normally
users = set(users)
users.discard(self.bot.nickname)
if not users:
self.msg(channel, "Nonvoters: %s.", self.nonvoter_list)
return
self.nonvoters.update(users)
self.msg(channel, "Will no longer pester %s.", ', '.join(users))
def chair_voter(self, user, channel, *users):
"""Set a given user to be a voter. If no user is specified,
print the list of all voters."""
users = set(users)
users.discard(self.bot.nickname)
if not users:
self.msg(channel, "Nonvoters: %s.", self.nonvoter_list)
return
if '*' in users:
self.nonvoters.clear()
self.msg(channel, "Will now pester everyone.")
else:
self.nonvoters.difference_update(users)
self.msg(channel, "Will now pester %s.", ', '.join(users))
def chair_pester(self, user, channel):
"""Pester the laggards."""
# special case: if we're in the "reporting in" phase, then check for that
# instead of checking for votes like we'd normally do
if self.bot.state_handler == self.handler_user_names:
def _(names):
laggards = set(names) - self.reported_in - self.nonvoters
laggards.remove(self.bot.nickname)
if laggards:
self.msg(channel, '%s: ping' % ', '.join(laggards))
else:
self.msg(channel, 'Everyone is accounted for!')
self.bot.names(channel).addCallback(_)
return
else:
# okay, this is the normal situation case
def _(names):
laggards = (set(names) - set(self.current_votes.keys()) - self.nonvoters)
laggards.remove(self.bot.nickname)
if laggards:
self.msg(channel, "Didn't vote: %s.", ", ".join(laggards))
else:
self.msg(channel, "Everyone voted.")
# actually do the pestering
self.bot.names(channel).addCallback(_)
def handler_user_names(self, user, channel, message):
"""As users write their names, note that they've reported in,
so we can see who isn't here and set them as non-voters."""
# this user has now reported in
self.reported_in.add(user)
# if this user is in the non-voter list, fix that
if user in self.nonvoters and user not in self.bot.superusers:
self.chair_voter(user, channel, user)
def _seconds_to_text(self, seconds):
"""Convert a number of seconds, specified as an int or string,
to a pretty string."""
# let's get started
seconds = int(seconds)
time_text = ''
# sanity check: 0 seconds is a corner case; just return it back statically
if seconds == 0:
return '0 seconds'
# deal with the minutes portion
if seconds // 60 > 0:
time_text += '%d minute' % (seconds // 60)
if seconds // 60 != 1:
time_text += 's'
if seconds % 60:
time_text += ', '
# deal with the seconds portion
if seconds % 60:
time_text += '%d second' % (seconds % 60)
if seconds % 60 != 1:
time_text += 's'
return time_text
def _minutes_to_text(self, minutes):
"""Convert a number of minutes, specified as a float, int, or string,
to a pretty string."""
|
from __future__ import division
from libtbx.path import walk_source_tree
from libtbx.str_utils import show_string
from libtbx.utils import Sorry
from libtbx.option_parser import option_parser
from fnmatch import fnmatch
import re
import sys, os
def read_lines_if_possible(file_path):
try: f = open(file_path, "r")
except IOError: return []
return f.read().splitlines()
def run(args, command_name="libtbx.find_files"):
if (len(args) == 0): args = ["--help"]
command_line = (option_parser(
usage="%s [options] pattern ..." % command_name,
description="Recursively finds all files matching patterns,\n"
"excluding CVS and .svn directories and .pyc files.")
.option("-t", "--top",
action="append",
type="string",
metavar="PATH",
help="top-level directory where search starts"
" (default is current working directory)")
.option("-g", "--grep",
action="append",
type="string",
metavar="PATTERN",
help="find regular expression pattern in each file (multiple"
" -g/--grep options can be given)")
.option("-i", "--ignore_case",
action="store_true",
default=False,
help="with -g/--grep: case-insensitive match")
.option("-f", "--file_names_only",
action="store_true",
default=False,
help="with -g/--grep: show file names only, not the matching lines")
.option("-q", "--quote",
action="store_true",
default=False,
help="quote file names")
).process(args=args)
fn_patterns = command_line.args
co = command_line.options
grep_flags = 0
if (co.ignore_case):
grep_flags |= re.IGNORECASE
if (len(fn_patterns) == 0):
fn_patterns = ["*"]
tops = co.top
if (tops is None):
tops = ["."]
for top in tops:
if (not os.path.isdir(top)):
raise Sorry("Not a directory: %s" % show_string(top))
for file_path in walk_source_tree(top=top):
file_name = os.path.basename(file_path)
for fn_pattern in fn_patterns:
if (fnmatch(file_name, fn_pattern)):
if (co.quote): fp = show_string(file_path)
else: fp = file_path
if (co.grep is None):
print fp
else:
is_b | inary_file = co.file_names_only
for line in read_lines_if_possible(file_path=file_path):
if (not is_binary_file):
i | s_binary_file = "\0" in line
def line_matches_all_grep_patterns():
for grep_pattern in co.grep:
if (re.search(
pattern=grep_pattern,
string=line,
flags=grep_flags) is None):
return False
return True
if (line_matches_all_grep_patterns()):
if (co.file_names_only):
print fp
break
elif (is_binary_file):
print "%s: match in binary file" % fp
break
else:
print "%s: %s" % (fp, line)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import struct
from yabgp.message.attribute import Attribute
from yabgp.message.attribute import AttributeID
from yabgp.message.attribute import AttributeFlag
from yabgp.common import constants as bgp_cons
from yabgp.common import exception as excep
class AtomicAggregate(Attribute):
"""
ATOMIC_AGGREGATE is a well-known discretionary attribute of length 0.
"""
ID = AttributeID.ATOMIC_AGGREGATE
FLAG = AttributeFlag.TRANSITIVE
@classmethod
def parse(cls, value):
"""
parse bgp ATOMIC_AGGREGATE attribute
:param value:
"""
if not value:
# return value
# return str(value, encoding="utf-8")
return bytes.decode(value)
else:
rais | e excep.UpdateMessageError(
sub_error=bgp_cons.ERR_MSG_UPDATE_OPTIONAL_ATTR,
data=value)
@classmethod
def construct(cl | s, value):
"""construct a ATOMIC_AGGREGATE path attribute
:param value:
"""
if value:
raise excep.UpdateMessageError(
sub_error=bgp_cons.ERR_MSG_UPDATE_OPTIONAL_ATTR,
data='')
else:
value = 0
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!B', value)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys
import tempfile
from winsys._compat import unittest
import uuid
import win32file
from winsys.tests.test_fs import utils
from winsys import fs
class TestFS (unittest.TestCase):
filenames = ["%d" % i for i in range (5)]
def setUp (self):
utils.mktemp ()
for filename in self.filenames:
with open (os.path.join (utils.TEST_ROOT, filename), "w"):
pass
def tearDown (self):
utils.rmtemp ()
def test_glob (self):
import glob
pattern = os.path.join (utils.TEST_ROOT, "*")
self.assertEquals (list (fs.glob (pattern)), glob.glob (pattern))
def test_listdir (self):
import os
fs_version = list (fs.listdir (utils.TEST_ROOT))
| os_version = os.listdir (utils.TEST_ROOT)
self.assertEquals (fs_version, os_version, "%s differs from %s" % (fs_version, os_version))
#
# All the other module-level functions are hand-offs
# to the corresponding Entry methods.
#
if __name__ == "__main__":
unittest.main ()
if sys.stdout.isatty (): raw_input ("Press | enter...")
|
rial_score = funct(trial_position)
if self.better_than(trial_score, self.best_score):
self.best_score = trial_score
self.position = trial_position
current = funct(self.position)
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score))
iteration_number += 1
if self.display_final:
print("Finished after " + str(iteration_number) + " iterations, final score is " + str(self.best_score))
return self.position
def perform_randomization(self, vec):
for i in xrange(0, len(vec)):
vec[i] = np.random.uniform(self.low, self.high)
class TrainHillClimb(Train):
"""
Train using hill climbing. Hill climbing can be used to optimize the long term memory of a Machine Learning
Algorithm. This is done by moving the current long term memory values to a new location if that new location
gives a better score from the scoring function.
http://en.wikipedia.org/wiki/Hill_climbing
"""
def __init__(self, goal_minimize=True):
Train.__init__(self, goal_minimize)
def train(self, x0, funct, acceleration=1.2, step_size=1.0):
"""
Train up to the specified maximum number of iterations using hill climbing.
@param x0: The initial vector for long-term memory.
@param funct: The score function. We attempt to minimize or maximize this.
@param acceleration: The acceleration (default=1.2)
@param step_size: The step size (default=1.0)
@return: The trained long-term memory vector.
"""
iteration_number = 1
self.position = list(x0)
self.best_score = funct(self.position)
step_size = [step_size] * len(x0)
candidate = [0] * 5
candidate[0] = -acceleration
candidate[1] = -1 / acceleration
candidate[2] = 0
candidate[3] = 1 / acceleration
candidate[4] = acceleration
while not self.should_stop(iteration_number, self.best_score):
if self.goal_minimize:
best_step_score = sys.float_info.max
else:
best_step_score = sys.float_info.min
for dimension in xrange(0, len(self.position)):
best = -1
for i in xrange(0, len(candidate)):
# Take a step
self.position[dimension] += candidate[i] * step_size[dimension]
# Obtain new trial score.
trial_score = funct(self.position)
# Step back, we only want to try movement in one dimension.
self.position[dimension] -= candidate[i] * step_size[dimension]
# Record best step taken
if self.better_than(trial_score, best_step_score):
best_step_score = trial_score
best = i
if best != -1:
self.best_score = best_step_score
self.position[dimension] += candidate[best] * step_size[dimension]
step_size[dimension] += candidate[best]
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score))
iteration_number += 1
if self.display_final:
print("Finished after " + str(iteration_number) + " iterations, final score is " + str(self.best_score))
return self.position
class TrainAnneal(Train):
"""
Train a Machine Learning Algorithm using Simulated Annealing. Simulated Annealing is a Monte Carlo algorithm
that is based on annealing in metallurgy, a technique involving heating and controlled cooling of a
material to increase the size of its crystals and reduce their defects, both are attributes of the material
that depend on its thermodynamic free energy.
The Simulated Annealing algorithm works by randomly changing a vector of doubles. This is the long term memory
of the Machine Learning algorithm. While this happens a temperature is slowly decreased. When this
temperature is higher, the Simulated Annealing algorithm is more likely to accept changes that have a higher
error (or energy) than the current state.
There are several important components to any Simul | ated Learning Algorithm:
First, the randomization technique. This is performed by the method performRandomize. To randomize
differently, override this method.
Secondly, the cooling schedule. This determines how quickly the current temperature will fall. This is
controlled by the coolingSchedule. To define a different cooling schedule, override this method.
Finally, the probability of accepting a higher-error (energy) solu | tion. This is defined by a Probability
Distribution Function (PDF) contained in calcProbability. To define a different PDF, override this method.
http://en.wikipedia.org/wiki/Simulated_annealing
"""
def __init__(self, max_iterations=100, starting_temperature=400, ending_temperature=0.0001):
"""
Create a simulated annealing trainer.
@param max_iterations: The maximum number of iterations.
@param starting_temperature: The starting temperature.
@param ending_temperature: The ending temperature.
"""
Train.__init__(self, True)
self.max_iterations = max_iterations
self.starting_temperature = starting_temperature
self.ending_temperature = ending_temperature
self.cycles = 100
self.last_probability = 0
def train(self, x0, funct):
"""
Train for the specified number of iterations using simulated annealing. The temperature will be lowered
between the specified range at each iteration. You can also use the cycles property to set how many cycles
are executed at each iteration. Simulated annealing can only be used to minimize the score function.
@param x0: The initial long-term memory.
@param funct: The score function.
@return: The trained long-term memory.
"""
iteration_number = 1
self.position = list(x0)
self.best_score = funct(self.position)
current_score = self.best_score
current_position = list(x0)
while not self.should_stop(iteration_number, self.best_score):
# Clone current position, create a new array of same size.
current_temperature = self.cooling_schedule(iteration_number)
for c in xrange(0, self.cycles):
trial_position = list(current_position)
# Randomize trial position.
self.perform_randomization(trial_position)
# Obtain new trial score.
trial_score = funct(trial_position)
keep = False
if self.better_than(trial_score, current_score):
keep = True
else:
self.last_probability = self.calc_probability(current_score, trial_score, current_temperature)
if self.last_probability > np.random.uniform():
keep = True
if keep:
current_score = trial_score
current_position = list(trial_position)
if self.better_than(current_score, self.best_score):
self.best_score = current_score
self.position = list(current_position)
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score)
+ ",k=" + str(iteration_number)
+ ",kMax=" + str(self.max_iterations)
+ ",t=" + str(current_temperature) + ",prob=" + str(self.last_probability) + ","
+ str(current_score))
iteration_number += 1
if self.display_final:
|
import os
PUPPETDB_HOST = 'localhost'
PUPPETDB_PORT = 8080
PUPPETDB_SSL_VERIFY = True
PUPPETDB_KEY = None
PUPPETDB_CERT = None
PUPPETDB_TIMEOUT = 20
SECRET_KEY = os.urandom(24)
DEV_LISTEN_HOST = '127.0.0.1'
DEV_LISTEN_PORT = 5000
DEV_COFFEE_LOCATION = 'coffee'
UNRESPONSIVE_HOURS = 2
ENABLE_QUERY = True
LOCALISE_TIMESTAMP = True
LOGLEVEL = 'info'
REPORTS_COUNT = 10
OFFLINE_MODE = False
ENABLE_CATALOG = False
GRAPH_FACTS = ['architecture',
'domain',
'lsbcodename',
'lsbdistcodename',
'lsbdistid',
'lsbdistrelease',
'lsbmajdistrelease',
'netmask',
'osfamily',
'puppetversion',
'processorcount']
INVENTORY_FACTS = [ ('Hostname', 'fqdn' ),
('IP Address', 'ipaddress' ),
| ('OS', 'lsbdistdescription'),
('Architecture', 'hardwaremodel' ),
| ('Kernel Version', 'kernelrelease' ),
('Puppet Version', 'puppetversion' ), ]
|
a262222dae42a1a374fac00000000',
'010000004a823aa83595a947a91df4dffa27d24a4211c1c1b352614d5128cc6400000000d9a7f8d838220becec1802eec1430b4769d6b4699eb95fecb7d1d5c86e6613d80d106a49ffff001d1c45247d0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d012cffffffff0100f2052a01000000434104313655b2aed748f03cf6e718fa87ce259f35033b5f0933bf990019f5b7a5e1725c5388686186b2bda9512d90d920fdfd998d690622f2e9e43005942f80e205ccac00000000',
'01000000fad05be324f6c411800e9195bde30549522668db30be952e523e9c49000000001d01af6c4716b28de4e5e3385a5846bac1fa90a565579065d904b354da4f8765de116a49ffff001d065c70e20101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d012dffffffff0100f2052a01000000434104a624f45c0b4b75bc784748ce8affe84de70556cfdb9cb69f861363699047701b9336ecc9f34eab351b28b2691bf69e5afc2e40b05cc6fa4a8ff377cff0d9dfb2ac00000000',
'010000001d1f73cbfd47c38aefe270faf05873ddaeae469eb57976b067cfb8d800000000d5273768443e9e48d86acc6a1c5fc3925e69d64f11aa17297c1c16e6e339960e5b176a49ffff001d2ac199990101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d014dffffffff0100f2052a010000004341040890f2990714393514c898eaa61353163aa4c2ac5be0cb78c8ffe0b3486d580c5c3ec584f303c04429ecb537144ca28def1a61e0213fbb3c15de9572f8cacdc0ac00000000',
'0100000095033bbd6e41afe1eb283ef23cacd5d72c5551a60c081f2f341698b000000000448490d4ce520ae01c822d2f2b489a3b2805416c21b558cf99cd1dfa1e29a8b0141a6a49ffff001d2bf5326c0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d014effffffff0100f2052a01000000434104f435a4df01fa9a93b3051eaf12357927e0b314816edc5b76ef1f96aaf04b833bbb4ba44072dac924ef37795dcf23a2e0e525550e4ea2bbe24d6ac3b045afc100ac00000000',
'01000000e50eaf73b308c0b468bf3a8ab80d9fc6a350e6d998ec0e2869ac3da800000000ba98b85bb12baeffda12c2d2263a701e572219f3c93972e17c07b2aa71cea4731e1d6a49ffff001d208ef6990101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d0153ffffffff0100f2052a010000004341047bff1aa919559a9dc8f040ba59e45760f157e46bba653171e34aa45cd1e60872ab910651666fd7c5784a8eb496796e1732573ac5c33e40b5c01e4d428c4c8765ac00000000',
'01000000a0001921bc03feda5874a7954f914cbc7a8a6449e1fa40439b4c3ed9000000004206fdefe7da3b2c5cb0c8dbb12d834ef3ac8d6e2c68050eb716babe95f4169d48216a49ffff001d2a17c9a50101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d015cffffffff0100f2052a01000000434104ca826620e9c39691fa76854004ad10f848d0f750701bdfe5d1d367d6eb85246647d325185307bf085d68d44bf697e9e430867e1b910f5fb7ed8c38e852094c6fac00000000',
'0100000061188712afd4785d18ef15d | b57fb52dd150b56c8b547fc6bbf23ec49000000003658f907b55bf0e46ac560d0cd6ebb1d141c311c00193ad69a98b4f6b9b6b87058256a49ffff001d2608971801010000000100000000000000000000000000000000000000000000000000 | 00000000000000ffffffff0704ffff001d0172ffffffff0100f2052a0100000043410440a92015df9284b2ce38644cd761abd99566af0d998c3f67b760b1c1534f17cc44602abe879eb01fe19f01c42e8d49047bf112bf122b35fab1f16255f93fe87cac00000000',
'0100000002a8bd45fab7e40d8207ef95762e8578589a1961a9f9991aefb4477f0000000039359e15c0251a9162151f681d2b23c71e734595db271846aeed8736c2ef443f84276a49ffff001d17d2841d0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d028700ffffffff0100f2052a010000004341048b5f7c6e17394fa995cd22c387de26362f26aa2bd524581d2a76d5aaf7d540b825fbab64db7a537b05ba68c90471d9ed49cf97bef6c9d4b4d7c48b37cbab3193ac00000000',
'0100000012052719601a039f27921ef35a24c82cae5f5024f326a56c8ee8762e00000000a683374124eb823197b4caab534ac172d7da016d06e35ab5465fa883f7b69c42fb286a49ffff001d08d5a80e0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d028800ffffffff0100f2052a010000004341049ffcef9e42d9c8ba9394ff3a105810e8eccfb014bdb34d425a2df526be8de06c554311f92ac36904ddadc93cdef0d666aee8a9fc7b51c471836bd6d1efeb62deac00000000',
'010000001c084a379912af47ef38e75d8eec1f6f698b0cead98fe1baebe17f6e000000006f6168c5809c18ab102a28087ed22a5aac41e5c531b39b8a0975ebaf1fc044ae102d6a49ffff001d1434411f0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d028a00ffffffff0100f2052a0100000043410457e68d6407f0e0a5c3d25a76954f8319b5b7b1bac58fefde3404f3deeb3f0a9d5201e6e891ce592a19bf90e6cad362df09929f6e2bace8820d279a569c3eb2b1ac00000000',
'010000007e9651bb2d6a7298248c64cea76c14c02c1603c1f2961e9f4e487a1a000000008ac11bbf709fd20c4a6adc39dc9827402a4df48149090c38bfa393f20deca655a82e6a49ffff001d373d76ae0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d028c00ffffffff0100f2052a01000000434104c024196cfc6ad36800b5522ffba6dd1c4d58c96cae378bdcddb52c081d742eb219d0cabdd7ef00f0010e18aba03483767ecbcf0bbe428dc92285d7f072c68379ac00000000',
'0100000035a83bdde524407a7bcdfac3232d2bf6710f5559d532bd2c7534b8e700000000474fbb76278470f31077953b66d0ce967e1b3e2e3a4041553b82cd3fe1a2cb5aee316a49ffff001d0354b0b20101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d029100ffffffff0100f2052a01000000434104f3ac806f3c4afdcac540251e2cd7b61f0d60b1195bf6f7a7ce7bae5d966bcfdb17d0ba62190a162aafe7538df373b92c7037e5a8ac820e928498d341240706e4ac00000000',
'01000000933d9038fe5264f9453951d40e55c91504e1920801c85dbb5c27c81100000000b9253cf4f366a018182bab5a30a54c700db0736b540e3ad16fc1a109a81929b530346a49ffff001d19fd5aa00101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d02a600ffffffff0100f2052a0100000043410488f83cdf5c83e9fc59c2d92092ea32945b880f19c9da12ead521cb7874ada0b8fcb5688470ceabe8289d53c8964e77a5b738be69da305d6208c6725202de68a4ac00000000',
'010000004d1f55defafd65567a149e8cd32e1a2019b993e17f52cfb43357a79c00000000fe0c90dd69f7661425eebf7c913f0dd932691f3b1e3741105dd3600b69b9a9a0d5366a49ffff001d018cf4760101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d02ae00ffffffff0100f2052a01000000434104dcecccde1d125055b600ae12e2da68b11a10c9655d60e9315c013b3e04e5d32e681613765c37fee024d6bf49bd62a34a31ac021aab9ec4e67cc9fb061de6548cac00000000',
'010000006af39676eb24f1eaece7abea73ce12d06667c7c3f463de9513c54ef000000000ca7e0cb6eb209f2b422bc3c764e25c656065027dfd60224ee448d12c57fa74b785396a49ffff001d309eaf130101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d02b700ffffffff0100f2052a01000000434104f0418233581f77491230adffb4f086fcf029aeae6beef25d7200325e6a2904a89004b312c6215ded5eab744b9a24c16cf19423c9df8d8de02e692800ac90d234ac00000000',
'01000000c66244e034c9bfa52424ad91905b999cb6ff5d49dbe82021d59069ab00000000ca3fae9a5bdd728abb0b3e1953a589d945448dd452331c0199e3dc2b1c5935cb893c6a49ffff001d1c0a17e10101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d02b900ffffffff0100f2052a01000000434104e38dae71f853e6d2bd5f2b98f9e95105100f6354e65c5946da5640b6cd8ab3f56140d92934d69697b2f1ba12e8c24d817d69b348ddfac83d856918ba8c0aa441ac00000000',
'010000002dab0bfb58146516975016ad954f4d359d683e07fb648a10edcc805300000000ce3465d7e5317988c75a41ea862dc489918005c3d90f7e4590ab3ac04dc1836e28406a49ffff001d08c317350101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d02bd00ffffffff0100f2052a010000004341042173053be010806a7ff0cbadb4d30ee3cd7f2035a8d53c002ab0b59c218dac6ac7c8bcd0dfa096eceb093e570c03e8762ce113488897e3b3ec703075a6bcf563ac00000000',
'01000000a966418ed4f17c3c3c561f2bdfb169edceeae84cf1ac526d89918bd30000000052fa7ddc59d3574bbf404011820e1e48cfbee2aa6e8f2f5b472bbfbfab874fe9d2416a49ffff001d17e6dbf80101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d02c400ffffffff0100f2052a01000000434104d562908511d1d59bc645a3ba0e385616f00094b4d9b22913e170ac37e0efc7d258734bb476edc17be2c8bb7276dfb11cff10b89bc9db81cf48075a37d379d30bac00000000',
'01000000844617f4b214028227f2a12e1f51713fa9c0b5221bb2dee5bad355ae00000000dc3ebd22794574257ffbfd27b91a86dd7012b2ed308f406523074da63465cccbf4436a49ffff001d34b4a57b0 |
AdoptForm,
InterestForm,
Technology,
ErrataList,
PrivacyPolicy,
PrintOrder,
ResearchPage,
TeamPage,
Careers,
Impact,
InstitutionalPartnership,
HeroJourneyPage,
InstitutionalPartnerProgramPage,
CreatorFestPage,
PartnersPage,
WebinarPage,
MathQuizPage,
LLPHPage,
TutorMarketing,
TutorLanding,
| Subjects,
Subject)
from news.models import NewsIndex, PressIndex
from books.models import BookIndex
from shared.test_utilities import assertPathDoesNotRedirectToTrailingS | lash
class HomePageTests(WagtailPageTests):
def test_cant_create_homepage_under_homepage(self):
self.assertCanNotCreateAt(HomePage, HomePage)
def test_homepage_return_correct_page(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_can_create_homepage(self):
root_page = Page.objects.get(title="Root")
homepage = HomePage(title="Hello World",
slug="hello-world",
)
root_page.add_child(instance=homepage)
retrieved_page = Page.objects.get(id=homepage.id)
self.assertEqual(retrieved_page.title, "Hello World")
def test_allowed_subpages(self):
self.assertAllowedSubpageTypes(HomePage, {
HigherEducation,
ContactUs,
AboutUsPage,
GeneralPage,
NewsIndex,
PressIndex,
BookIndex,
Supporters,
MapPage,
Give,
TermsOfService,
AP,
FAQ,
Support,
GiveForm,
Accessibility,
Licensing,
CompCopy,
AdoptForm,
InterestForm,
Technology,
ErrataList,
PrivacyPolicy,
PrintOrder,
ResearchPage,
TeamPage,
Careers,
Impact,
InstitutionalPartnership,
HeroJourneyPage,
InstitutionalPartnerProgramPage,
CreatorFestPage,
PartnersPage,
WebinarPage,
MathQuizPage,
LLPHPage,
TutorMarketing,
TutorLanding,
Subjects
})
class PageTests(WagtailPageTests):
def setUp(self):
root_page = Page.objects.get(title="Root")
self.homepage = HomePage(title="Hello World",
slug="hello-world",
)
root_page.add_child(instance=self.homepage)
def test_can_create_ipp_page(self):
self.assertCanCreateAt(HomePage, InstitutionalPartnerProgramPage)
def test_can_create_llph_page(self):
llph_page = LLPHPage(title="LLPH",
heading="Heading",
subheading="Subheading",
signup_link_href="http://rice.edu",
signup_link_text="Click me",
info_link_slug="/llph-slug",
info_link_text="Click me",
book_heading="Book heading",
book_description="I should accept <b>HTML</b>.")
self.homepage.add_child(instance=llph_page)
self.assertCanCreateAt(HomePage, LLPHPage)
retrieved_page = Page.objects.get(id=llph_page.id)
self.assertEqual(retrieved_page.title, "LLPH")
def test_can_create_team_page(self):
team_page = TeamPage(title="Team Page",
header="Heading",
subheader="Subheading",
team_header="Our Team")
self.homepage.add_child(instance=team_page)
self.assertCanCreateAt(HomePage, TeamPage)
revision = team_page.save_revision()
revision.publish()
team_page.save()
retrieved_page = Page.objects.get(id=team_page.id)
self.assertEqual(retrieved_page.title, "Team Page")
class ErrataListTest(WagtailPageTests):
def test_can_create_errata_list_page(self):
root_page = Page.objects.get(title="Root")
homepage = HomePage(title="Hello World",
slug="hello-world",
)
root_page.add_child(instance=homepage)
errata_list_page = ErrataList(title="Errata List Template",
correction_schedule="Some sample correction schedule text.",
new_edition_errata_message="New edition correction text.",
deprecated_errata_message="Deprecated errata message.",
about_header="About our correction schedule.",
about_text="Errata receieved from March through...",
about_popup="Instructor and student resources..."
)
homepage.add_child(instance=errata_list_page)
retrieved_page = Page.objects.get(id=errata_list_page.id)
self.assertEqual(retrieved_page.title, "Errata List Template")
class SubjectsPageTest(WagtailPageTests):
def test_can_create_subjects_page(self):
root_page = Page.objects.get(title="Root")
homepage = HomePage(title="Hello World",
slug="hello-world",
)
root_page.add_child(instance=homepage)
subjects_page = Subjects(title="Subjects",
heading="Testing Subjects Page",
description="This is a Subjects page test",
philanthropic_support="Please support us",
)
homepage.add_child(instance=subjects_page)
retrieved_page = Page.objects.get(id=subjects_page.id)
self.assertEqual(retrieved_page.title, "Subjects")
class SubjectPageTest(WagtailPageTests):
def test_can_create_subject_page(self):
root_page = Page.objects.get(title="Root")
homepage = HomePage(title="Hello World",
slug="hello-world",
)
root_page.add_child(instance=homepage)
subjects_page = Subjects(title="Subjects",
heading="Testing Subjects Page",
description="This is a Subjects page test",
philanthropic_support="Please support us",
)
homepage.add_child(instance=subjects_page)
subject_page = Subject(title="Business",
page_description="Business page",
os_textbook_heading="OpenStax Business Textbooks",
philanthropic_support="Please support us",
)
subjects_page.add_child(instance=subject_page)
retrieved_page = Page.objects.get(id=subject_page.id)
self.assertEqual(retrieved_page.title, "Business")
class AdminPages(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
@property
def target(self):
def test_redirect(path):
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
return response
return test_redirect
def test_admin_link(self):
self.target('/admin/')
def test_slashless_admin_link(self):
self.target('/admin')
def test_images_link(self):
self.target('/admi |
from core import messages
from core.weexceptions import FatalException
from mako import template
from core.config import sessions_path, sessions_ext
from core.loggers import log, stream_handler
from core.module import Status
import os
import yaml
import glob
import logging
import urlparse
import atexit
import ast
print_filters = [
'debug',
'channel'
]
set_filters = [
'debug',
'channel'
]
class Session(dict):
def _session_save_atexit(self):
yaml.dump(
dict(self),
open(self['path'], 'w'),
default_flow_style = Fal | se
)
def print_to_user(self, module_filter = ''):
for mod_name, mod_value in self.items():
if isinstance(mod_value, dict):
mod_args = mod_value.get('stored_args')
# Is a module, print all the storable s | tored_arguments
for argument, arg_value in mod_args.items():
if not module_filter or ("%s.%s" % (mod_name, argument)).startswith(module_filter):
log.info("%s.%s = '%s'" % (mod_name, argument, arg_value))
else:
# If is not a module, just print if matches with print_filters
if any(f for f in print_filters if f == mod_name):
log.info("%s = '%s'" % (mod_name, mod_value))
def get_connection_info(self):
return template.Template(messages.sessions.connection_info).render(
url = self['url'],
user = self['system_info']['results'].get('whoami', ''),
host = self['system_info']['results'].get('hostname', ''),
path = self['file_cd']['results'].get('cwd', '.')
)
def action_debug(self, module_argument, value):
if value:
stream_handler.setLevel(logging.DEBUG)
else:
stream_handler.setLevel(logging.INFO)
def set(self, module_argument, value):
"""Called by user to set or show the session variables"""
# I safely evaluate the value type to avoid to save only
# strings type. Dirty but effective.
# TODO: the actual type of the argument could be acquired
# from modules[module].argparser.
try:
value = ast.literal_eval(value)
except Exception as e:
# If is not evalued, just keep it as string
pass
# If action_<module_argument> function exists, trigger the action
action_name = 'action_%s' % (module_argument.replace('.','_'))
if hasattr(self, action_name):
action_func = getattr(self, action_name)
if hasattr(action_func, '__call__'):
action_func(module_argument, value)
if module_argument.count('.') == 1:
module_name, arg_name = module_argument.split('.')
if arg_name not in self[module_name]['stored_args']:
log.warn(messages.sessions.error_storing_s_not_found % ( '%s.%s' % (module_name, arg_name) ))
else:
self[module_name]['stored_args'][arg_name] = value
log.info("%s.%s = '%s'" % (module_name, arg_name, value))
else:
module_name = module_argument
if module_name not in self or module_name not in set_filters:
log.warn(messages.sessions.error_storing_s_not_found % (module_name))
else:
self[module_name] = value
log.info("%s = %s" % (module_name, value))
# If the channel is changed, the basic shell_php is moved
# to IDLE and must be setup again.
if module_name == 'channel':
self['shell_php']['status'] = Status.IDLE
class SessionFile(Session):
def __init__(self, dbpath, volatile = False):
try:
sessiondb = yaml.load(open(dbpath, 'r').read())
except Exception as e:
log.warn(
messages.generic.error_loading_file_s_s %
(dbpath, str(e)))
raise FatalException(messages.sessions.error_loading_sessions)
saved_url = sessiondb.get('url')
saved_password = sessiondb.get('password')
if saved_url and saved_password:
if not volatile:
# Register dump at exit and return
atexit.register(self._session_save_atexit)
self.update(sessiondb)
return
log.warn(
messages.sessions.error_loading_file_s %
(dbpath, 'no url or password'))
raise FatalException(messages.sessions.error_loading_sessions)
class SessionURL(Session):
def __init__(self, url, password, volatile = False):
if not os.path.isdir(sessions_path):
os.makedirs(sessions_path)
# Guess a generic hostfolder/dbname
hostname = urlparse.urlparse(url).hostname
if not hostname:
raise FatalException(messages.generic.error_url_format)
hostfolder = os.path.join(sessions_path, hostname)
dbname = os.path.splitext(os.path.basename(urlparse.urlsplit(url).path))[0]
# Check if session already exists
sessions_available = glob.glob(
os.path.join(
hostfolder,
'*%s' %
sessions_ext))
for dbpath in sessions_available:
try:
sessiondb = yaml.load(open(dbpath, 'r').read())
except Exception as e:
log.warn(
messages.generic.error_loading_file_s_s %
(dbpath, str(e)))
else:
saved_url = sessiondb.get('url')
saved_password = sessiondb.get('password')
if not saved_url or not saved_password:
log.warn(
messages.generic.error_loading_file_s_s %
(dbpath, 'no url or password'))
if saved_url == url and saved_password == password:
# Found correspondent session file.
# Register dump at exit and return
if not volatile:
atexit.register(self._session_save_atexit)
self.update(sessiondb)
return
# If no session was found, create a new one with first available filename
index = 0
while True:
dbpath = os.path.join(
hostfolder, '%s_%i%s' %
(dbname, index, sessions_ext))
if not os.path.isdir(hostfolder):
os.makedirs(hostfolder)
if not os.path.exists(dbpath):
sessiondb = {}
sessiondb.update(
{ 'path': dbpath,
'url': url,
'password': password,
'debug': False,
'channel' : None,
'default_shell' : None
}
)
# Register dump at exit and return
if not volatile:
atexit.register(self._session_save_atexit)
self.update(sessiondb)
return
else:
index += 1
raise FatalException(messages.sessions.error_loading_sessions)
|
from six.moves import xrange
import tensorflow as tf
from .var_layer import VarLayer
from ..tf import rescaled_laplacian
def conv(features, adj, weights):
K = weights.get_shape()[0].value - 1
# Create and rescale normalized laplacian.
lap = rescaled_laplacian(adj)
Tx_0 = features
output = tf.matmul(Tx_0, weights[0])
if K > 0:
Tx_1 = tf.sparse_tensor_dense_matmul(lap, features)
output += tf.matmul(Tx_1, weights[1])
for k in xrange(2, K + 1):
Tx_2 = 2 * tf.sparse_tensor_dense_matmul(lap, Tx_1) - Tx_0
output += tf.matmul(Tx_2, weights[k])
Tx_0, Tx_1 = Tx_1, Tx_2
return output
class ChebyshevGCNN(VarLayer):
def __init__(self, in_channels, out_channels, adjs, degree, **kwargs):
self.adjs = adjs
super(ChebyshevGCNN, self).__init__(
weight_shape=[degree + 1, in_channels, out_channels],
bias_shape=[out_channe | ls],
| **kwargs)
def _call(self, inputs):
batch_size = len(inputs)
outputs = []
for i in xrange(batch_size):
output = conv(inputs[i], self.adjs[i], self.vars['weights'])
if self.bias:
output = tf.nn.bias_add(output, self.vars['bias'])
output = self.act(output)
outputs.append(output)
return outputs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.