prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
import os, sys, array
import numpy as np
class BigFile:
def __init__(self, datadir):
self.nr_of_images, self.ndims = map(int, open(os.path.join(datadir,'shape.txt')).readline().split())
id_file = os.path.join(datadir, "id.txt")
self.names = open(id_file).read().strip().split()
assert(len(self.names) == self.nr_of_images)
self.name2index = dict(zip(self.names, range(self.nr_of_images)))
self.binary_file = os.path.join(datadir, "feature.bin")
print ("[%s] %dx%d instances loaded from %s" % (self.__class__.__name__, self.nr_of_images, self.ndims, datadir))
def read(self, requested, isname=True):
requested = set(requested)
if isname:
index_name_array = [(self.name2index[x], x) for x in requested if x in self.name2index]
else:
assert(min(requested)>=0)
assert(max(requested)<len(self.names))
index_name_array = [(x, self.names[x]) for x in requested]
if len(index_name_array) == 0:
return [], []
index_name_array.sort(key=lambda v:v[0])
sorted_index = [x[0] for x in index_name_array]
nr_of_images = len(index_name_array)
vecs = [None] * nr_of_images
offset = np.float32(1).nbytes * self.ndims
res = array.array('f')
fr = open(self.binary_file, 'rb')
fr.seek(index_name_array[0][0] * offset)
res.fromfile(fr, self.ndims)
previous = index_name_array[0][0]
for next in sorted_index[1:]:
move = (next-1-previous) * offset
#print next, move
fr.seek(move, 1)
res.fromfile(fr, self.ndims)
previous = next
fr.close()
return [x[1] for x in index_name_array], [ res[i*self.ndims:(i+1)*self.ndims].tolist() for i in range(nr_of_images) ]
def read_one(self, | name):
renamed, vectors = self.read([name])
return vectors[0]
def shape(self):
return [self.nr_of_images, self.ndims]
class StreamFile:
def __init__(self, datadir):
self.feat_dir | = datadir
self.nr_of_images, self.ndims = map(int, open(os.path.join(datadir,'shape.txt')).readline().split())
id_file = os.path.join(datadir, "id.txt")
self.names = open(id_file).read().strip().split()
assert(len(self.names) == self.nr_of_images)
self.name2index = dict(zip(self.names, range(self.nr_of_images)))
self.binary_file = os.path.join(datadir, "feature.bin")
print ("[%s] %dx%d instances loaded from %s" % (self.__class__.__name__, self.nr_of_images, self.ndims, datadir))
self.fr = None
self.current = 0
def open(self):
self.fr = open(os.path.join(self.feat_dir,'feature.bin'), 'rb')
self.current = 0
def close(self):
if self.fr:
self.fr.close()
self.fr = None
def __iter__(self):
return self
def next(self):
if self.current >= self.nr_of_images:
self.close()
raise StopIteration
else:
res = array.array('f')
res.fromfile(self.fr, self.ndims)
_id = self.names[self.current]
self.current += 1
return _id, res.tolist()
if __name__ == '__main__':
bigfile = BigFile('toydata/FeatureData/f1')
imset = str.split('b z a a b c')
renamed, vectors = bigfile.read(imset)
for name,vec in zip(renamed, vectors):
print name, vec
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\
csgraph_to_dense, csgraph_from_dense
def test_graph_breadth_first():
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfir | st_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
def test_graph_breadth_first_trivial_graph():
csgraph = np.array([[0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0]])
for directed in [True, False]:
bfirst_test | = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first_trivial_graph():
csgraph = np.array([[0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0]])
for directed in [True, False]:
bfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
|
from HG_Code.HG_Test import UnitTest as UT
from HG_Code import Model as mo
from HG_Code.Visualize import Visualizer
from HG_Code.SimManager import sim_manager
from HG_Code.Hunger_Grid import hunger_grid
from HG_Code.Kat import Kat
from HG_Code import hg_settings
from HG_Code import Mutate as mu
unitTest = UT.Run_Unit_Test()
unitTest.run_test()
#mo.run_model(from_lava = .02, # START LAVA CHANCE
# to_lava = .02, # END FROM LAVA CHANCE
# from_berry = .05, # START BERRY CHANCE
# to_berry = .05, # END BERRY CHANCE
# from_mut=10, # START MUTATION CHANCE
# | to_mut=10, # END MUTATION CHANCE
# from_gen = 33, # START GENERATE CHANCE
# to_gen = 33, # END GENERATE CHANCE
#t_name = 'Default' # TITLE OF TEST
# frames = -1 # Defaults to -1 (-1:Don't, 0:Only Last, N:every N)
#mo.run_model() #Default
mo.run_model(.02,.5,.05,.05, 10, 10, 33, 33, 'Lava World', -1)
#mo.run_model(.2,.2,.05,.01, 10, 50, 33, 33, 'Nuclear Wasteland')
#mo.run_model(.02,.5,.05,.5, 10, 10, 33, | 33, 'Berry World')
#mo.run_model(.00,.00,.1,.1, 10, 10, 33, 33, "No Lava")
#mo.run_model(.1,.1,0.0,0.0, 10, 10, 33, 33, "No Berries")
#mo.run_model(.1,.1,.1,.1,10,10,33,33,"Lava & Berries")
|
"""Start a tcp gateway."""
import click
from mysensors.cli.helper import (
common_gateway_options,
handle_msg,
run_async_gateway,
run_gateway,
)
from mysensors.gateway_tcp import AsyncTCPGateway, TCPGateway
def common_tcp_options(func):
"""Supply common tcp gateway options."""
func = click.option(
| "-p",
"--port",
default=5003,
show_default=True,
type=int,
help="TCP | port of the connection.",
)(func)
func = click.option(
"-H", "--host", required=True, help="TCP address of the gateway."
)(func)
return func
@click.command(options_metavar="<options>")
@common_tcp_options
@common_gateway_options
def tcp_gateway(**kwargs):
"""Start a tcp gateway."""
gateway = TCPGateway(event_callback=handle_msg, **kwargs)
run_gateway(gateway)
@click.command(options_metavar="<options>")
@common_tcp_options
@common_gateway_options
def async_tcp_gateway(**kwargs):
"""Start an async tcp gateway."""
gateway = AsyncTCPGateway(event_callback=handle_msg, **kwargs)
run_async_gateway(gateway)
|
base=None, # type: TypeVar
derived_func=None, # type: str
specials=None # type: SpecialSpec
):
# type: (...) -> None
self.name = name
self.__doc__ = doc
self.is_derived = isinstance(base, TypeVar)
if base:
assert self.is_derived
assert derived_func
self.base = base
self.derived_func = derived_func
self.name = '{}({})'.format(derived_func, base.name)
else:
min_lanes = 1 if scalars else 2
lanes = decode_interval(simd, (min_lanes, MAX_LANES), 1)
self.type_set = TypeSet(
lanes=lanes,
ints=ints,
floats=floats,
bools=bools,
bitvecs=bitvecs,
specials=specials)
@staticmethod
def singleton(typ):
# type: (types.ValueType) -> TypeVar
"""Create a type variable that can only assume a single type."""
scalar = None # type: types.ValueType
if isinstance(typ, types.VectorType):
scalar = typ.base
lanes = (typ.lanes, typ.lanes)
elif isinstance(typ, types.LaneType):
scalar = typ
lanes = (1, 1)
elif isinstance(typ, types.SpecialType):
return TypeVar(typ.name, typ.__doc__, specials=[typ])
else:
assert isinstance(typ, types.BVType)
scalar = typ
lanes = (1, 1)
ints = None
floats = None
bools = None
bitvecs = None
if isinstance(scalar, types.IntType):
ints = (scalar.bits, scalar.bits)
elif isinstance(scalar, types.FloatType):
floats = (scalar.bits, scalar.bits)
elif isinstance(scalar, types.BoolType):
bools = (scalar.bits, scalar.bits)
elif isinstance(scalar, types.BVType):
bitvecs = (scalar.bits, scalar.bits)
tv = TypeVar(
typ.name, typ.__doc__,
ints=ints, floats=floats, bools=bools,
bitvecs=bitvecs, simd=lanes)
return tv
def __str__(self):
# type: () -> str
return "`{}`".format(self.name)
def __repr__(self):
# type: () -> str
if self.is_derived:
return (
'TypeVar({}, base={}, derived_func={})'
.format(self.name, self.base, self.derived_func))
else:
return (
'TypeVar({}, {})'
.format(self.name, self.type_set))
def __hash__(self):
# type: () -> int
if (not self.is_derived):
return object.__hash__(self)
return hash((self.derived_func, self.base))
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, TypeVar):
return False
if self.is_derived and other.is_derived:
return (
self.derived_func == other.derived_func and
self.base == other.base)
else:
return self is other
def __ne__(self, other):
# type: (object) -> bool
return not self.__eq__(other)
# Supported functions for derived type variables.
# The names here must match the method names on `ir::types::Type`.
# The camel_case of the names must match `enum OperandConstraint` in
# `instructions.rs`.
LANEOF = 'lane_of'
ASBOOL = 'as_bool'
HALFWIDTH = 'half_width'
DOUBLEWIDTH = 'double_width'
HALFVECTOR = 'half_vector'
DOUBLEVECTOR = 'double_vector'
TOBITVEC = 'to_bitvec'
@staticmethod
def is_bijection(func):
# type: (str) -> bool
return func in [
TypeVar.HALFWIDTH,
TypeVar.DOUBLEWIDTH,
TypeVar.HALFVECTOR,
TypeVar.DOUBLEVECTOR]
@staticmethod
def inverse_func(func):
# type: (str) -> str
return {
TypeVar.HALFWIDTH: TypeVar.DOUBLEWIDTH,
TypeVar.DOUBLEWIDTH: TypeVar.HALFWIDTH,
TypeVar.HALFVECTOR: TypeVar.DOUBLEVECTOR,
TypeVar.DOUBLEVECTOR: TypeVar.HALFVECTOR
}[func]
@staticmethod
def derived(base, derived_func):
# type: (TypeVar, str) -> TypeVar
"""Create a type variable that is a function of another."""
# Safety checks to avoid over/underflows.
ts = base.get_typeset()
assert len(ts.specials) == 0, "Can't derive from special types"
if derived_func == TypeVar.HALFWIDTH:
if len(ts.ints) > 0:
assert min(ts.ints) > 8, "Can't halve all integer types"
if len(ts.floats) > 0:
assert min(ts.floats) > 32, "Can't halve all float types"
if len(ts.bools) > 0:
assert min(ts.bools) > 8, "Can't halve all boolean types"
elif derived_func == TypeVar.DOUBLEWIDTH:
if len(ts.ints) > 0:
assert max(ts.ints) < MAX_BITS,\
"Can't double all integer types."
if len(ts.floats) > 0:
assert max(ts.floats) < MAX_BITS,\
"Can't double all float types."
if len(ts.bools) > 0:
assert max(ts.bools) < MAX_BITS, "Can't double all bool types."
elif derived_func == TypeVar.HALFVECTOR:
assert min(ts.lanes) > 1, "Can't halve a scalar type"
elif derived_func == TypeVar.DOUBLEVECTOR:
assert max(ts.lanes) < MAX_LANES, "Can't double 256 lanes."
return TypeVar(None, None, base=base, derived_func=derived_func)
@staticmethod
def from_typeset(ts):
# type: (TypeSet) -> TypeVar
""" Create a type variable from a type set."""
tv = TypeVar(None, None)
tv.type_set = ts
return tv
def lane_of(self):
# type: () -> TypeVar
"""
Return a derived type variable that is the scalar lane type of this
type variable.
When this type variable assumes a scalar type, the derived type will be
the same scalar type.
"""
return TypeVar.derived(self, self.LANEOF)
def as_bool(self):
# type: () -> Typ | eVar
"""
Return a derived type variable that has the same vector geometry as
this type variable, but with boolean lanes. Scalar types map to `b1`.
"""
return TypeVar.derived(self, self.ASBOOL)
def half_width(self):
# type: () -> TypeVar
"""
Return a derived type variable that has th | e same number of vector lanes
as this one, but the lanes are half the width.
"""
return TypeVar.derived(self, self.HALFWIDTH)
def double_width(self):
# type: () -> TypeVar
"""
Return a derived type variable that has the same number of vector lanes
as this one, but the lanes are double the width.
"""
return TypeVar.derived(self, self.DOUBLEWIDTH)
def half_vector(self):
# type: () -> TypeVar
"""
Return a derived type variable that has half the number of vector lanes
as this one, with the same lane type.
"""
return TypeVar.derived(self, self.HALFVECTOR)
def double_vector(self):
# type: () -> TypeVar
"""
Return a derived type variable that has twice the number of vector
lanes as this one, with the same lane type.
"""
return TypeVar.derived(self, self.DOUBLEVECTOR)
def to_bitvec(self):
# type: () -> TypeVar
"""
Return a derived type variable that represent a flat bitvector with
the same size as self
"""
return TypeVar.derived(self, self.TOBITVEC)
def singleton_type(self):
# type: () -> types.ValueType
"""
If the associated typeset has a single type return it. Otherwise return
None
"""
ts = self.get_typeset()
if ts.size() != 1:
return None
return ts.get_singleton()
def free_typevar(self):
|
reverse, astart_point, aend_point)
if direction_check == 1:
cur2 = con.cursor()
st = self._get_steptime(route_id, astart_point, aend_point)
sql2 = """select id, TIMESTAMP WITH TIME ZONE '%s' + time '%s' from ride_requests where id = %d and
earliest_start_time <= TIMESTAMP WITH TIME ZONE '%s' + time '%s' and
latest_start_time >= TIMESTAMP WITH TIME ZONE '%s' + time '%s'
"""
cur2.execute(sql2 % (timestamp, st, aid, timestamp, st, timestamp, st))
res = cur2.fetchone()
if res != None:
matching_route_id, point_starttime_db, = res
l.append(matching_route_id)
return l
def get_matches_for_request(self, request_id):
"""
find all matches for a specific request
"""
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "select id, to_char(earliest_start_time, 'YYYY-Mon-DD HH24:MI:SS'), to_char(latest_start_time, 'YYYY-Mon-DD HH24:MI:SS'), to_char(request_time, 'YYYY-Mon-DD HH24:MI:SS'), status, start_point, end_point, user_number_id from ride_requests where id=%d"
cur.execute(sql % request_id)
res = cur.fetchone()
start_time = res[1]
latest_start_time = res[2]
start_point = res[5]
end_point = res[6]
sql = "select id, location_id from pickuppoint where id=%d"
cur.execute(sql % start_point)
res_l = cur.fetchone()
location_id = res_l[1]
l = []
offer_text = ''
cur.execute(self._POSSIBLE_RIDES_STATEMENT % (start_point, end_point))
for roid, in cur.fetchall():
cur2 = con.cursor()
ride_res = self._get_ride_offer_info(roid)
if ride_res != False:
route_id, reverse, driver_name, driver_cell_phone_nr = ride_res
st = self._get_steptime(route_id, start_point, end_point)
if self._check_direction(route_id, reverse, start_point, end_point):
sql2 = """SELECT start_time + time '%s'
FROM ride_offers WHERE
start_time + time '%s' >= TIMESTAMP WITH TIME ZONE '%s'
AND start_time + time '%s' <= TIMESTAMP WITH TIME ZONE '%s' AND id = %d"""
timematch = sql2 % (st, st, start_time, st, latest_start_time, roid)
cur2.execute(timematch)
amatch = cur2.fetchone()
if amatch != None:
start_time_db = amatch[0]
point_starttime = self._dbtime_to_localtime(start_time_db, self.get_timezone_name(location_id))
offer_text += '%s, %s, %s\n' % (point_starttime.strftime(self._timeformat), driver_name, driver_cell_phone_nr)
l.append(roid)
cur2.close()
return l
def request_ride(self,
user_number_id,
location_id,
start_key,
end_key,
earliest_start_time,
latest_start_time,
start_date=None,
send_sms=1):
"""
This function is for requesting a ride for a route.
It starts the match process in the backend,
this may work asynchronous as it doesn't return anything.
It closes all requests for this user_id and this route_id before
earliest_start_time.
@param user_id: backend specific user-id
@param loaction_id: backaend specific id of the location
@param start_key: key of the start pickuppoint
@param end_key: key of the end pickuppoint
@param earliest_start_time: 4 digit integer HHMM
@param latest_start_time: 4 digit integer HHMM
"""
##open db connection
con = psycopg2.connect(self._dsn)
cur = con.cursor()
##validate paramters
if not verify_user_number_id(user_number_id, con):
raise Call2RideError("ERR_UNKNOWN_USER_ID")
## check location_id
if location_id == 0:
location_id = self._get_default_location_id(user_number_id)
lang = self._get_lang_by_user_number_id(user_number_id)
##translate start_key and end_key to ids start_point and end_point
start_point, start_name = self._point_key_info(location_id, start_key)
end_point, e | nd_name = self._point_key_info(location_id, end_key)
##convert times
tzname = self.get_timezone_name(location_id)
check_est = c2r_time_to_datetime(start_date, earliest_start_time, tzname, True)
start_time = c2r_time_to_datetime(start_date, earliest_start_time, tzname)
lates | t_start_time = c2r_time_to_datetime(start_date, latest_start_time, tzname, False, check_est)
##close all request before this
self.close_request(user_number_id, start_point, end_point, False, start_date)
##insert into db
cur.execute(self._INSERT_REQUEST_STATEMENT % (user_number_id,
start_point,
end_point,
start_time,
latest_start_time))
con.commit()
## Find possible ride offers
offer_text = ''
cur.execute(self._POSSIBLE_RIDES_STATEMENT % (start_point, end_point))
for roid, in cur.fetchall():
cur2 = con.cursor()
ride_res = self._get_ride_offer_info(roid)
if ride_res != False:
route_id, reverse, driver_name, driver_cell_phone_nr = ride_res
st = self._get_steptime(route_id, start_point, end_point)
if self._check_direction(route_id, reverse, start_point, end_point):
sql2 = """SELECT start_time + time '%s'
FROM ride_offers WHERE
start_time + time '%s' >= TIMESTAMP WITH TIME ZONE '%s'
AND start_time + time '%s' <= TIMESTAMP WITH TIME ZONE '%s' AND id = %d"""
timematch = sql2 % (st, st, start_time, st, latest_start_time, roid)
cur2.execute(timematch)
amatch = cur2.fetchone()
if amatch != None:
start_time_db = amatch[0]
point_starttime = self._dbtime_to_localtime(start_time_db, self.get_timezone_name(location_id))
offer_text += '%s, %s, %s\n' % (point_starttime.strftime(self._timeformat), driver_name, driver_cell_phone_nr)
cur2.close()
##match transport
cur.execute(self._REQ_PUBLIC_TRANSPORT_MATCH % (start_time,
latest_start_time,
start_point, end_point))
for deptime, means in cur.fetchall():
offer_text += '%s, %s\n' % (deptime.strftime(self._timeformat), means)
##match taxi
cur.execute(self._GET_TAXI_NUMBER_STATEMENT % location_id)
res = cur.fetchone()
if res is not None:
offer_text += self._t(self._SMS_REQ_TAXI_TEXT, lang) % res[0]
##send sms
sql_p = 'SELECT name from pickuppoint where id = %d'
cur.execute(sql_p % start_point)
res = cur.fetchone()
p1name = res[0]
cur.execute(sql_p % end_point)
res = cur.fetchone()
p2name = res[0]
XSMS_FOUND_TEXT = """Your next rides from %s to %s are:
%s
"""
XSMS_FOUND_TEXT = self._t(XSMS_FOUND_TEXT, lang)
smstext = XSMS_FOUND_TEXT % (p1name.decode("utf-8"), p2name.decode("utf-8"), offer_text)
xdriver_name, xdriver_cellphone = self._get_user_info(user_number_id)
if send_sms==1:
self._send_sms(xdriver_cellphone, smstext)
cur.close()
con.close()
return smstext
def close_request_route(self, user_number_id, location_id, route_key, reverse, throw=True, start_date=None):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
##find route id
sql = "select r.id from routes r, user_number un where key='%s' and r.user_id=un.user_id and un.id = %d and location_id=%d"
cur.execute(sql % (route_key, user_number_id, location_id))
res = cur.fetchone()
if res == None:
sql = "select id from routes wher |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
CSV_DEFAULT_SEP = ','
CSV_DEFAULT_LF = '\n'
def append_same_length(columns, data):
if columns != []:
return len(columns) == len(data)
return True
class DataBuffer(object):
def __init__(self, csv_filename, columns=None, csv_sep=CSV_DEFAULT_SEP, csv_lf=CSV_DEFAULT_LF):
self._fd = open(csv_filename, "w")
self.csv_sep = csv_sep
self.csv_lf = csv_lf
if columns is None:
self._columns = []
else:
self._columns = columns
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, value):
self._columns = value
self._append_columns()
def _append_columns(self):
if self.columns != []:
self.append(*self.columns)
def append(self, *data):
assert append_same_length(self.columns, data), "data and columns must have same length"
s = self.csv_sep.join(map(lambda x: str(x), data)) + self.csv_lf
self._fd.write(s)
def close(self):
self._fd.close()
#def to_csv(self, filename):
# pass
def __enter__(self):
return self
def __exit__(self, typ, value, traceback):
self.close()
def main():
data = DataBuffer("data.csv")
data.columns = ["a", "b", "c"]
data.append(1, 2.5, 3)
data.append(4, 5, 6)
data.append(7, 8, 9)
data.close()
def main_with_context_manager():
"""
Using a context manager (with ...) will
automatically close file descrip | tor
"""
with DataBuffer("data.csv") as data:
data.columns = ["a", "b", "c"]
| data.append(1, 2.5, 3)
data.append(4, 5, 6)
data.append(7, 8, 9)
if __name__ == '__main__':
main_with_context_manager() |
})".format(mpc_obs.comment.x,
mpc_obs.comment.y,
x.value,
y.value))
except Exception as ex:
logging.error(str(ex))
# Don't use the new X/Y for Hand measured entries. (although call to get_observed_magnitude should have changed)
if str(mpc_obs.note1) != "H" and not skip_centroids:
mpc_obs.comment.x = x.value
mpc_obs.comment.y = y.value
try:
mag = float(mag)
except:
return mpc_obs
if math.isnan(mag):
return mpc_obs
if mag > 10:
mpc_obs._band = filter_value
mpc_obs.comment.mag = mag
mpc_obs.comment.mag_uncertainty = merr
# Update the mpc record magnitude if previous value existed here.
if (mpc_obs.mag is not None or (mpc_obs.mag is None and mpc_in.comment.photometry_note[0] == "Z")) and mag > 10:
mpc_obs.mag = mag
return mpc_obs
def run(mpc_file, cor_file,
skip_discovery=True, skip_mags=False,
skip_centroids=False, compare_orbits=False):
"""
:param mpc_file: A file containing the astrometric lines to be updated.
:param cor_file: The base name for the updated astrometry and diagnostic files.
:param skip_mags: Should we skip recomputing the magnitude of sources?
:return: :raise ValueError: If actions on the mpc_obs indicate this is not a valid OSSOS observations
"""
observations = mp_ephem.EphemerisReader().read(mpc_file)
logging.debug("Read in Observations: {}".format(observations))
original_obs = []
modified_obs = []
logging.info("ASTROMETRY FILE: {} --> {}.tlf".format(mpc_file, cor_file))
for mpc_in in observations:
try:
if not isinstance(mpc_in.comment, mp_ephem.ephem.OSSOSComment):
logging.info(type(mpc_in.comment))
logging.info("Skipping: {}".format(mpc_in.to_string()))
continue
if ((skip_discovery and mpc_in.discovery) or
(not skip_discovery and not mpc_in.discovery)):
logging.info("Discovery mis-match")
logging.info("Skipping: {}".format(mpc_in.to_string()))
continue
logging.info("="*220)
logging.info(" orig: {}".format(mpc_in.to_string()))
if mpc_in.comment.astrometric_level == 4:
logging.info("Already at maximum AstLevel, skipping.")
continue
if mpc_in.null_observation:
logging.info("Skipping NULL observation.")
continue
mpc_obs = remeasure(mpc_in)
logging.info("new wcs: {}".format(mpc_obs.to_string()))
if not skip_mags:
# and not mpc_obs.comment.photometry_note[0] == "Z":
mpc_mag = remeasure(recompute_mag(mpc_obs,
skip_centroids=skip_centroids),
reset_pixel_coordinates=not skip_centroids)
else:
mpc_mag = mpc_obs
sep = mpc_in.coordinate.separation(mpc_mag.coordinate)
if sep > TOLERANCE:
logging.error("Large offset: {} arc-sec".format(sep))
logging.error("orig: {}".format(mpc_in.to_string()))
logging.error(" new: {}".format(mpc_mag.to_string()))
new_comment = "BIG SHIFT HERE"
mpc_mag.comment.comment = mpc_mag.comment.comment + " " + new_comment
logging.info("new cen: {}".format(mpc_mag.to_string()))
original_obs.append(mpc_in)
modified_obs.append(mpc_mag)
logging.info("="*220)
except:
logging.error("Skipping: {}".format(mpc_in))
optr = open(cor_file + ".tlf", 'w')
for idx in range(len(modified_obs)):
inp = original_obs[idx]
out = modified_obs[idx]
if inp != out:
optr.write(out.to_tnodb()+"\n")
optr.close()
if not compare_orbits:
return True
try:
compare_orbits(original_obs, modified_obs, cor_file)
except Exception as ex:
logging.error("Orbit comparison failed: {}".format(ex))
logging.info("="*220)
return True
def compare_orbits(original_obs, modified_obs, cor_file):
"""Compare the orbit fit given the oringal and modified astrometry."""
origin = orbfit.Orbfit(original_obs)
modified = orbfit.Orbfit(modified_obs)
orbpt = file(cor_file+".orb", 'w')
# Dump summaries of the orbits
orbpt.write("#"*80+"\n")
orbpt.write("# ORIGINAL ORBIT\n")
orbpt.write(origin.summarize()+"\n")
orbpt.write("#"*80+"\n")
orbpt.write("# MODIFIED ORBIT\n")
orbpt.write(modified.summarize()+"\n")
orbpt.write("#"*80+"\n")
# Create a report of the change in orbit parameter uncertainty
for element in ['a', 'e', 'inc', 'om', 'Node', 'T']:
oval = getattr(origin, element).value
doval = getattr(origin, "d"+element).value
mval = getattr(modified, element).value
dmval = getattr(modified, "d"+element).value
precision = max(int(-1*math.floor(math.log10(dmval))), int(-1*math.floor(math.log10(doval)))) + 1
precision = max(0, precision)
vdigits = 12
ddigits = 6
vpadding = " "*int(vdigits-precision)
dpadding = " "*int(ddigits-precision)
orbpt.write("{element:>5s}: "
"{oval[0]:>{vdigits}.{vdigits}}.{oval[1]:<{precision}.{precision}} {vpadding} +/- "
"{doval[0]:>{ddigits}.{ddigits}}.{doval[1]:<{precision}.{precision}} {dpadding} ==> "
"{mval[0]:>{vdigits}.{vdigits}}.{mval[1]:<{precision}.{precision}} {vpadding} +/- "
"{dmval[0]:>{ddigits}.{ddigits}}.{dmval[1]:<{precision}.{precision}}\n".format(
element=element,
dpadding=dpadding,
vpadding=vpadding,
vdigits=vdigits,
ddigits=ddigits,
oval="{:12.12f}".format(oval).split("."),
doval="{:12.12f}".format(doval).split("."),
mval="{:12.12f}".format(mval).split("."),
dmval="{:12.12f}".format(dmval).split("."),
precision=precision)
)
delta = math.fabs(oval - mval)
if delta > 3.5 * doval:
logging.warn("large delta for element {}: {} --> {}".format(element, oval, mval))
# Compute the stdev of the residuals and report the change given the new observations
orbpt.write("*"*80+"\n")
orbpt.write("Change in orbital parameters \n")
sep = "Change in scatter between initial and recalibrated obseravtions. \n"
for orb in [origin, modified]:
orbpt.write(sep)
sep = "\n ==> becomes ==> \n"
residuals = orb.residuals
dra = []
ddec = []
mags = {}
for observation in orb.observations:
if not observation.null_observation:
dra.append(observation.ra_residual)
ddec.append(observation.dec_residual)
filter = observation.band
if filter is not None:
if filter not in mags:
mags[filter] = []
try:
mags[filter].append(float(observa | tion.mag))
except:
pass
if observation.comment.plate_uncertainty * 5.0 < \
((observation.ra_r | esidual ** 2 + observation.dec_residual ** 2) ** 0.5):
logging.warn("LARGE RESIDUAL ON: {}".format(observation.to_string()))
logging.warn("Fit residual unreasonably large.")
dra = numpy.array(dra)
ddec = numpy.array(ddec)
merr_str = ""
for filter in mags:
mag = numpy.percentile(numpy.array(mags[filter]), (50))
mags[filter] = numpy.percentile(numpy.array(mags[filter]), (5,95))
merr = (mags[filter][1] - mags[filter][0])/6.0
merr_str += " {}: {:8.2f} +/- {:8.2f}".format(filter, mag, merr)
orbpt.write("ra_std:{:8.4} dec_std:{:8.4} mag: {}".format(dra.std(), ddec.std(), merr_st |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import table
import genetics
def main(argv):
int_mass = table.integer_mass(argv[0])
lines = files.read_lines(argv[1])
leaderboard = [([int_mass[p] for p in peptide], peptide) for peptide in lines[0].split()]
spectrum = [int(m) for m in lines[1].split()]
N | = int(line | s[2])
print ' '.join(leader[1] for leader in genetics.trim_leaderboard(leaderboard, spectrum, N))
if __name__ == "__main__":
main(sys.argv[1:])
|
from .mdl_user import *
from .mdl_club import *
from .mdl_event import | *
from .mdl_receipt import *
from .mdl_budget import *
from .mdl_division import *
from .mdl_eventsignin import *
from .mdl_joinrequ | est import *
|
# -*- coding: utf-8 -*-
# Copyright 2008-2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
'''
This is the development documentation of zim.
B{NOTE:} There is also some generic development documentation in the
"HACKING" folder in the source distribution. Please also have a look
at that if you want to help with zim development.
In this API documentation many of the methods with names starting with
C{do_} and C{on_} are not documented. The reason is that these are
signal handlers that are not part of the external API. They act upon
a signal but should never be called directly by other objects.
Overview
========
The script C{zim.py} is a thin wrapper around the C{main()} function
defined in L{zim.main}. This main function constructs a C{Command}
object that implements a specific commandline command. The C{Command}
object then either connects to a running instance of zim, or executes
the application.
To execute the application, the command typically constructs a
C{Notebook} object, a C{PluginManager} and a C{ConfigManager}. Then
depending on the command the graphical interface is constructed, a
webserver is started or some other action is executed on the notebook.
The C{Notebook} object is found in L{zim.notebook} and implements the
API for accessing and storing pages, attachments and other data in
the notebook folder.
The notebook works together with an C{Index} object which keeps a
SQLite database of all the pages to speed up notebook access and allows
to e.g. show a list of pages in the side pane of the user interface.
Another aspect of the notebook is the parsing of the wiki text in the
pages such that it can be shown in the interface or exported to another
format. See L{zim.formats} for implementations of different parsers.
All classes related to configuration are located in L{zim.config}.
The C{ConfigManager} handles looking up config files and provides them
for all components.
Plugins are defined as sub-modules of L{zim.plugins}. The
C{PluginManager} manages the plugins that are loaded and objects that
can be extended by plugins.
The graphical user interface is implemented in the L{zim.gui} module
and it's sub-modules. The webinterface is implemented in L{zim.www}.
The graphical interface uses a background process to coordinate
between running instances, this is implemented in L{zim.ipc}.
Functionality for exporting content is implemented in L{zim.exporter}.
And search functionality can be found in L{zim.search}.
Many classes in zim have signals which allow other objects to connect
to a listen for specific events. This allows for an event driven chain
of control, which is mainly used in the graphical interface, but is
also used elsewhere. If you are not familiar with event driven programs
please refer to a Gtk manual.
Infrastructure classes
----------------------
All functions and objects to interact with the file system can be
found in L{zim.fs}.
For executing external applications see L{zim.applications} or
L{zim.gui.applications}.
Some generic base classes and functions can be found in L{zim.utils}
@newfield signal: Signal, Signals
@newfield emits: Emits, Emits
@newfield implementation: Implementation
'''
# New epydoc fields defined above are inteded as follows:
# @signal: signal-name (param1, param2): description
# @emits: signal
# @implementation: must implement / optional for sub-classes
# Bunch of meta data, used at least in the about dialog
__version__ = '0.62'
__url__='http://www.zim-wiki.org'
__author__ = 'Jaap Karssenberg <jaap.karssenberg@gmail.com>'
__copyright__ = 'Copyright 2008 - 2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>'
__license__='''\
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
'''
import os
import sys
import gettext
import logging
import locale
logger = logging.getLogger('zim')
#: This parameter can be set by ./setup.py, can be e.g. "maemo"
PLATFORM = None
########################################################################
## Note: all init here must happen before importing any other zim
## modules, so can not use zim.fs utilities etc.
## therefore ZIM_EXECUTABLE is a string, not an object
## Check executable and relative data | dir
## (sys.argv[0] should always be correct, even for compiled exe)
if os.name == "nt":
# See notes in zim/fs.py about encoding expected by abspath
ZIM_EXECUTABLE = os.path.abspath(
unicode(sys.argv[0], sys.getfilesystemencoding())
)
else:
ZIM_EXECUTABLE = unicode(
os.path.abspath(sys.argv[0]),
sys.getfilesystemencoding()
)
## Initialize locale (needed e.g. for natural_sort)
locale.setlocale(locale.LC_ALL, '')
## Initialize gettext (maybe | make this optional later for module use ?)
if os.name == "nt" and not os.environ.get('LANG'):
# Set locale config for gettext (other platforms have this by default)
# Using LANG because it is lowest prio - do not override other params
lang, enc = locale.getlocale()
os.environ['LANG'] = lang + '.' + enc
logging.info('Locale set to: %s', os.environ['LANG'])
_localedir = os.path.join(os.path.dirname(ZIM_EXECUTABLE), 'locale')
if not os.name == "nt":
_localedir = _localedir.encode(sys.getfilesystemencoding())
if os.path.isdir(_localedir):
# We are running from a source dir - use the locale data included there
gettext.install('zim', _localedir, unicode=True, names=('_', 'gettext', 'ngettext'))
else:
# Hope the system knows where to find the data
gettext.install('zim', None, unicode=True, names=('_', 'gettext', 'ngettext'))
########################################################################
## Now we are allowed to import sub modules
import zim.environ # initializes environment parameters
import zim.config
# Check if we can find our own data files
_file = zim.config.data_file('zim.png')
if not (_file and _file.exists()): #pragma: no cover
raise AssertionError(
'ERROR: Could not find data files in path: \n'
'%s\n'
'Try setting XDG_DATA_DIRS'
% map(str, zim.config.data_dirs())
)
def get_zim_revision():
'''Returns multiline string with bazaar revision info, if any.
Otherwise a string saying no info was found. Intended for debug
logging.
'''
try:
from zim._version import version_info
return '''\
Zim revision is:
branch: %(branch_nick)s
revision: %(revno)s %(revision_id)s
date: %(date)s''' % version_info
except ImportError:
return 'No bzr version-info found'
|
t symmetric difference, infinite set"""
self._helper_test_inifinite_set(self.fncs_list[2])
def test_set_symmetric_difference_infinite_empty(self):
"""intbitset - set symmetric difference, infinite vs empty"""
self._helper_test_infinite_vs_empty(self.fncs_list[2])
def test_set_difference(self):
"""intbitset - set difference, normal set"""
self._helper_test_normal_set(self.fncs_list[3])
def test_set_difference_empty(self):
"""intbitset - set difference, empty set"""
self._helper_test_empty_set(self.fncs_list[3])
def test_set_difference_infinite(self):
"""intbitset - set difference, infinite set"""
self._helper_test_inifinite_set(self.fncs_list[3])
def test_set_difference_infinite_empty(self):
"""intbitset - set difference, infinite vs empty"""
self._helper_test_infinite_vs_empty(self.fncs_list[3])
def test_set_intersection_in_place(self):
"""intbitset - set intersection, normal set in place"""
self._helper_test_normal_set(self.fncs_list[4])
def test_set_intersection_empty_in_place(self):
"""intbitset - set intersection, empty set in place"""
self._helper_test_empty_set(self.fncs_list[4])
def test_set_intersection_infinite_in_place(self):
"""intbitset - set intersection, infinite set in place"""
self._helper_test_inifinite_set(self.fncs_list[4])
def test_set_intersection_infinite_empty_in_place(self):
"""intbitset - set intersection, infinite vs empty in place"""
self._helper_test_infinite_vs_empty(self.fncs_list[4])
def test_set_union_in_place(self):
"""intbitset - set union, normal set in place"""
self._helper_test_normal_set(self.fncs_list[5])
def test_set_union_empty_in_place(self):
"""intbitset - set union, empty set in place"""
self._helper_test_empty_set(self.fncs_list[5])
def test_set_union_infinite_in_place(self):
"""intbitset - set union, infinite set in place"""
self._helper_test_inifinite_set(self.fncs_list[5])
def test_set_union_infinite_empty_in_place(self):
"""intbitset - set union, infinite vs empty in place"""
self._helper_test_infinite_vs_empty(self.fncs_list[5])
def test_set_symmetric_difference_in_place(self):
"""intbitset - set symmetric difference, normal set in place"""
self._helper_test_normal_set(self.fncs_list[6])
def test_set_symmetric_difference_empty_in_place(self):
"""intbitset - set symmetric difference, empty set in place"""
self._helper_test_empty_set(self.fncs_list[6])
def test_set_symmetric_difference_infinite_in_place(self):
"""intbitset - set symmetric difference, infinite set in place"""
self._helper_test_inifinite_set(self.fncs_list[6])
def test_set_symmetric_difference_infinite_empty_in_place(self):
"""intbitset - set symmetric difference, infinite vs empty in place"""
self._helper_test_infinite_vs_empty(self.fncs_list[6])
def test_set_difference_in_place(self):
"""intbitset - set difference, normal set in place"""
self._helper_test_normal_set(self.fncs_list[7])
def test_set_difference_empty_in_place(self):
"""intbitset - set difference, empty set in place"""
self._helper_test_empty_set(self.fncs_list[7])
def test_set_difference_infinite_in_place(self):
"""intbitset - set difference, infinite set in place"""
self._helper_test_inifinite_set(self.fncs_list[7])
def test_set_difference_infinite_empty_in_place(self):
"""intbitset - set difference, infinite vs empty in place"""
self._helper_test_infinite_vs_empty(self.fncs_list[7])
def test_list_dump(self):
"""intbitset - list dump"""
for set1 in self.sets + [[]]:
self.assertEqual(list(self.intbitset(set1)), set1)
def test_ascii_bit_dump(self):
"""intbitset - ascii bit dump"""
for set1 in self.sets + [[]]:
tot = 0
count = 0
for bit in self.intbitset(set1).strbits():
if bit == '0':
self.assertFalse(count in set1)
elif bit == '1':
self.assertFalse(count not in set1)
tot += 1
else:
self.fail()
count += 1
self.assertEqual(tot, len(set1))
def test_tuple_of_tuples(self):
"""intbitset - support tuple of tuples" | ""
for set1 in self.sets + [[]]:
tmp_tuple = tuple([(elem, ) for elem in set1])
self.assertEqual(list(self.intbitset(set1)), list(self.intbitset(tmp_tuple)))
for set1 in self.sets + [[]]:
tmp_tuple = tuple([(elem, ) for elem in set1])
self.assertEqual(self.intbitset(set1, trailing_bits=True), self.intbitset(tmp_tuple, trailing_bits= | True))
def test_marshalling(self):
"""intbitset - marshalling"""
for set1 in self.sets + [[]]:
self.assertEqual(self.intbitset(set1), self.intbitset(self.intbitset(set1).fastdump()))
for set1 in self.sets + [[]]:
self.assertEqual(self.intbitset(set1, trailing_bits=True), self.intbitset(self.intbitset(set1, trailing_bits=True).fastdump()))
def test_pickling(self):
"""intbitset - pickling"""
from six.moves import cPickle
for set1 in self.sets + [[]]:
self.assertEqual(self.intbitset(set1), cPickle.loads(cPickle.dumps(self.intbitset(set1), -1)))
for set1 in self.sets + [[]]:
self.assertEqual(self.intbitset(set1, trailing_bits=True), cPickle.loads(cPickle.dumps(self.intbitset(set1, trailing_bits=True), -1)))
def test_set_emptiness(self):
"""intbitset - tests for emptiness"""
for set1 in self.sets + [[]]:
self.assertEqual(not set(set1), not self.intbitset(set1))
def test_set_len(self):
"""intbitset - tests len()"""
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
pythonset1 = set(set1)
self.assertEqual(len(pythonset1), len(intbitset1))
intbitset1.add(76543)
pythonset1.add(76543)
self.assertEqual(len(pythonset1), len(intbitset1))
intbitset1.remove(76543)
pythonset1.remove(76543)
self.assertEqual(len(pythonset1), len(intbitset1))
def test_set_clear(self):
"""intbitset - clearing"""
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
intbitset1.clear()
self.assertEqual(list(intbitset1), [])
intbitset1 = self.intbitset(set1, trailing_bits=True)
intbitset1.clear()
self.assertEqual(list(intbitset1), [])
def test_set_repr(self):
"""intbitset - Pythonic representation"""
if False:
big_examples = self.big_examples
else:
big_examples = []
for set1 in self.sets + [[]] + big_examples:
intbitset1 = self.intbitset(set1)
intbitset = self.intbitset
self.assertEqual(intbitset1, eval(repr(intbitset1)))
for set1 in self.sets + [[]] + big_examples:
intbitset1 = self.intbitset(set1, trailing_bits=True)
self.assertEqual(intbitset1, eval(repr(intbitset1)))
def test_set_cmp(self):
"""intbitset - (non infinite) set comparison"""
for set1 in self.sets + [[]]:
for set2 in self.sets + [[]]:
for op in self.cmp_list:
self.assertEqual(op[0](self.intbitset(set1), self.intbitset(set2)), op[1](set(set1), set(set2)), "Error in comparing %s %s with comparing function %s" % (set1, set2, op[0].__name__))
def test_set_update_with_signs(self):
"""intbitset - set update with signs"""
dict1 = {10 : -1, 20 : 1, 23 : -1, 27 : 1, 33 : -1, 56 : 1, 70 : -1, 74 : 1}
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
intbitset1.update_with_signs(dict1)
up_ |
import logging
import winreg
from nativeconfig.configs.base_config import BaseConfig
LOG = logging.getLogger('nativeconfig')
ERROR_NO_MORE_ITEMS = 259
ERROR_NO_MORE_FILES = 18
def traverse_registry_key(key, sub_key):
"""
Traverse registry key and yield one by one.
@raise WindowsError: If key cannot be opened (e.g. does not exist).
"""
current_key = winreg.OpenKey(key, sub_key, 0, winreg.KEY_ALL_ACCESS)
try:
i = 0
while True:
next_key = winreg.EnumKey(current_key, i)
for k in traverse_registry_key(key, r'{}\{}'.format(sub_key, next_key)):
yield k
i += 1
except OSError:
yield sub_key
class RegistryConfig(BaseConfig):
"""
Store config in Windows Registry.
@cvar REGISTRY_KEY: Key in the registry where config will be stored.
@cvar REGISTRY_PATH: Path relative to REGISTRY_KEY that points to the config.
"""
LOG = LOG.getChild('RegistryConfig')
REGISTRY_KEY = winreg.HKEY_CURRENT_USER
def __init__(self):
k = winreg.CreateKey(self.REGISTRY_KEY, self. | REGISTRY_PATH)
winreg.CloseKey(k)
super(RegistryConfig, self).__init__()
#{ BaseConfig
def get_value_cache_free(self, name):
try:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH) as app_key:
try:
value, value_type = | winreg.QueryValueEx(app_key, name)
if not value_type == winreg.REG_SZ:
raise ValueError("value must be a REG_SZ")
return value
except OSError:
pass
except:
self.LOG.exception("Unable to get \"%s\" from the registry:", name)
return None
def set_value_cache_free(self, name, raw_value):
try:
if raw_value is not None:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH, 0, winreg.KEY_WRITE) as app_key:
winreg.SetValueEx(app_key, name, 0, winreg.REG_SZ, raw_value)
else:
self.del_value_cache_free(name)
except:
self.LOG.exception("Unable to set \"%s\" in the registry:", name)
def del_value_cache_free(self, name):
try:
try:
for k in traverse_registry_key(self.REGISTRY_KEY, r'{}\{}'.format(self.REGISTRY_PATH, name)):
winreg.DeleteKey(self.REGISTRY_KEY, k)
except OSError:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH, 0, winreg.KEY_ALL_ACCESS) as app_key:
winreg.DeleteValue(app_key, name)
except:
self.LOG.info("Unable to delete \"%s\" from the registry:", name)
def get_array_value_cache_free(self, name):
try:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH) as app_key:
value, value_type = winreg.QueryValueEx(app_key, name)
if not value_type == winreg.REG_MULTI_SZ:
raise ValueError("value must be a REG_MULTI_SZ")
return value
except:
self.LOG.info("Unable to get array \"%s\" from the registry:", name, exc_info=True)
return None
def set_array_value_cache_free(self, name, value):
try:
if value is not None:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH, 0, winreg.KEY_WRITE) as app_key:
winreg.SetValueEx(app_key, name, 0, winreg.REG_MULTI_SZ, value)
else:
self.del_value_cache_free(name)
except:
self.LOG.exception("Unable to set \"%s\" in the registry:", name)
def get_dict_value_cache_free(self, name):
try:
with winreg.OpenKey(self.REGISTRY_KEY, r'{}\{}'.format(self.REGISTRY_PATH, name), 0, winreg.KEY_ALL_ACCESS) as app_key:
v = {}
try:
i = 0
while True:
name, value, value_type = winreg.EnumValue(app_key, i)
if value_type != winreg.REG_SZ:
raise ValueError("value must be a REG_SZ")
if value is not None:
v[name] = value
i += 1
except OSError as e:
if e.winerror != ERROR_NO_MORE_ITEMS and e.winerror != ERROR_NO_MORE_FILES:
raise
else:
pass # end of keys
return v
except:
self.LOG.info("Unable to get dict '%s' from the registry:", name, exc_info=True)
return None
def set_dict_value_cache_free(self, name, value):
try:
self.del_value_cache_free(name)
if value is not None:
with winreg.CreateKey(self.REGISTRY_KEY, r'{}\{}'.format(self.REGISTRY_PATH, name)) as app_key:
for k, v in value.items():
winreg.SetValueEx(app_key, k, 0, winreg.REG_SZ, v)
except:
self.LOG.exception("Unable to set \"%s\" in the registry:", name)
#}
|
se.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
from __future__ import print_function
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['argparser', 'run_flow', 'run', 'message_if_missing']
import logging
import socket
import sys
import webbrowser
from six.moves import BaseHTTPServer
from six.moves import urllib
from oauth2client import client
from oauth2client import util
_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
def _CreateArgumentParser():
try:
import argparse
except ImportError:
return None
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--auth_host_name', default='localhost',
help='Hostname when running a local web server.')
parser.add_argument('--noauth_local_webserver', action='store_true',
default=False, help='Do not run a local web server.')
parser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
nargs='*', help='Port web server should listen on.')
parser.add_argument('--logging_level', default='ERROR',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level of detail.')
return parser
# argparser is an ArgumentParser that contains command-line options expected
# by tools.run(). Pass it in as part of the 'parents' argument to your own
# ArgumentParser.
argparser = _CreateArgumentParser()
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(self):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
query = self.path.split('?', 1)[-1]
query = dict(urllib.parse.parse_qsl(query))
self.server.query_params = query
self.wfile.write("<html><head><title>Authentication Status</title></head>")
self.wfile.write("<body><p>The authentication flow has completed.</p>")
self.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
@util.positional(3)
def run_flow(flow, storage, flags, http=None):
"""Core code for a command-line application.
The run() function is called from your application and runs through all the
steps to obtain credentials. It takes a Flow argument and attempts to open an
authorization server page in the user's default web browser. The server asks
the user to grant your application access to the user's data. If the user
grants access, the | run() function returns new credentials. The new credentials
are also stored in the Storage argument, which updates the file associated
with the Storage object.
It presumes it is run from a command-line application and supports the
following flags:
--auth_host_nam | e: Host name to use when running a local web server
to handle redirects during OAuth authorization.
(default: 'localhost')
--auth_host_port: Port to use when running a local web server to handle
redirects during OAuth authorization.;
repeat this option to specify a list of values
(default: '[8080, 8090]')
(an integer)
--[no]auth_local_webserver: Run a local web server to handle redirects
during OAuth authorization.
(default: 'true')
The tools module defines an ArgumentParser the already contains the flag
definitions that run() requires. You can pass that ArgumentParser to your
ArgumentParser constructor:
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
flags: argparse.ArgumentParser, the command-line flags.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print('Failed to start a local webserver listening on either port 8080')
print('or port 9090. Please check your firewall settings and locally')
print('running programs that may be blocking or using those ports.')
print()
print('Falling back to --noauth_local_webserver and continuing with')
print('authorization.')
print()
if not flags.noauth_local_webserver:
oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print('Your browser has been opened to visit:')
print()
print(' ' + authorize_url)
print()
print('If your browser is on a different machine then exit and re-run this')
print('application with the command-line parameter ')
print()
print(' --noauth_local_webserver')
print()
else:
print('Go to the following link in your browser:')
print()
print(' ' + authorize_url)
print()
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print('Failed to find "code" in the query parameters of the redirect.')
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError as e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print('Authentication successful.')
return credential
def message_if_missing(filename):
"""Helpful message to display if the CLIENT_SECRETS file is missing."""
return _CLIENT_SECRETS_MESSAGE % filename
try:
from oauth2client.old_run import run
from oauth2client.old_run import FLAGS
except ImportError:
def run(*args, **kwargs):
raise NotImplementedError(
'The gflags library must be installed to use t |
import os
import subprocess
from pymongo import MongoClient
from flask import Flask, redirect, url_for, request, flash
from flask_bootstrap import Bootstrap
from flask_mongoengine import MongoEngine
from flask_modular_auth import AuthManager, current_authenticated_entity, SessionBasedAuthProvider, KeyBasedAuthProvider
from .reverse_proxy import ReverseProxied
# Initialize app
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
# Generate or load secret key
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')
try:
app.secret_key = open(SECRET_FILE).read().strip()
except IOError:
try:
import random
app.secret_key = ''.join([random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
secret = open(SECRET_FILE, 'w')
secret.write(app.secret_key)
secret.close()
except IOError:
Exception('Please create a %s file with random characters \
to generate your se | cret key!' % SECRET_FILE)
# Load config
config_path = os.getenv('CONFIG_PATH', 'mass_flask_config.config_development.DevelopmentConfig')
app.config.from_object(config_path)
# Init db
db = MongoEngine(app)
# Init flask-bootstrap
Bootstrap(app)
# Init auth system
def setup_session_auth(user_loader):
app.session_provider = SessionBasedAuthProvider(user_loader)
auth_manager.register_auth_provider(app.session_provider)
def setup_key_based_auth(key_l | oader):
app.key_based_provider = KeyBasedAuthProvider(key_loader)
auth_manager.register_auth_provider(app.key_based_provider)
def unauthorized_callback():
if current_authenticated_entity.is_authenticated:
flash('You are not authorized to access this resource!', 'warning')
return redirect(url_for('mass_flask_webui.index'))
else:
return redirect(url_for('mass_flask_webui.login', next=request.url))
auth_manager = AuthManager(app, unauthorized_callback=unauthorized_callback)
# Set the version number. For the future we should probably read it from a file.
app.version = '1.0-alpha1'
|
this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx']
autodoc_mock_imports = ['pint',]
intersphinx_mapping = {
'python': ('https://docs.python.org/2', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('http://pandas-docs.github.io/pandas-docs-travis/', None),
'mdanalysis': ('http://pythonhosted.org/MDAnalysis', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'iago'
copyright = u'2016, Guido Falk von Rudorff'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is | shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'iagodoc'
# | -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'iago.tex', u'iago Documentation',
u'Guido Falk von Rudorff', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'iago', u'iago Documentation',
[u'Guido Falk von Rudorff'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'iago', u'iago Documentation',
u'Guido Falk von Rudorff', 'iago', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is ge |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
import requests_mock
from airflow.operators.http_operator import SimpleHttpOperator
try:
from unittest import mock
except ImportError:
import mock
class SimpleHttpOpTests(unittest.TestCase):
def setUp(self):
os.environ['AIRFLOW_CONN_HTTP_EXAMPLE'] = 'http://www.example.com'
@requests_mock.mock()
d | ef test_response_in_lo | gs(self, m):
"""
Test that when using SimpleHttpOperator with 'GET',
the log contains 'Example Domain' in it
"""
m.get('http://www.example.com', text='Example.com fake response')
operator = SimpleHttpOperator(
task_id='test_HTTP_op',
method='GET',
endpoint='/',
http_conn_id='HTTP_EXAMPLE',
log_response=True,
)
with mock.patch.object(operator.log, 'info') as mock_info:
operator.execute(None)
mock_info.assert_called_with('Example.com fake response')
|
# -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from ..client import Client
from .model import Model
from ..resources.resource import Resource
from ..resources.routerplan import RouterPlan
from ...util import Util
import saklient
str = six.text_type
# module saklient.cloud.models.model_routerplan
class Model_RouterPlan(Model):
## ルータ帯域プランを検索するための機能を備えたクラス。
## @private
# @return {str}
def _api_path(self):
return "/product/internet"
## @private
# @return {str}
def _root_key(self):
return "InternetPlan"
## @private
# @return {str}
def _root_key_m(self):
return "InternetPlans"
## @private
# @return {str}
def _class_name(self):
return "RouterPlan"
## @private
# @param {any} obj
# @param {bool} wrapped=False
# @return {saklient.cloud.resources.resource.Resource}
def _create_resource_impl(self, obj, wrapped=False):
Util.validate_type(wrapped, "bool")
return RouterPlan(self._client, obj, wrapped)
## 次に取得するリストの開始オフセットを指定します。
#
# @param {int} offset オフセット
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan} this
def offset(self, offset):
Util.validate_type(offset, "int")
| return self._offset(offset)
## 次に取得するリストの上限レコード数を指定します。
#
# @param {int} count 上限レコード数
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan} this
def limit(self, count):
Util.validate_type(count, "int | ")
return self._limit(count)
## Web APIのフィルタリング設定を直接指定します。
#
# @param {str} key キー
# @param {any} value 値
# @param {bool} multiple=False valueに配列を与え、OR条件で完全一致検索する場合にtrueを指定します。通常、valueはスカラ値であいまい検索されます。
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan}
def filter_by(self, key, value, multiple=False):
Util.validate_type(key, "str")
Util.validate_type(multiple, "bool")
return self._filter_by(key, value, multiple)
## 次のリクエストのために設定されているステートをすべて破棄します。
#
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan} this
def reset(self):
return self._reset()
## 指定したIDを持つ唯一のリソースを取得します。
#
# @param {str} id
# @return {saklient.cloud.resources.routerplan.RouterPlan} リソースオブジェクト
def get_by_id(self, id):
Util.validate_type(id, "str")
return self._get_by_id(id)
## リソースの検索リクエストを実行し、結果をリストで取得します。
#
# @return {saklient.cloud.resources.routerplan.RouterPlan[]} リソースオブジェクトの配列
def find(self):
return self._find()
## @ignore
# @param {saklient.cloud.client.Client} client
def __init__(self, client):
super(Model_RouterPlan, self).__init__(client)
Util.validate_type(client, "saklient.cloud.client.Client")
|
from tile_view import *
from table import *
from game_state import *
from circ_button import *
class TableView:
GREY = (100,100,100)
BLACK = (0,0,0)
WHITE = (255,255,255)
BGCOLOR = (60,60,100)
TILE_COLOR = (90, 255, 90)
TILE_RADIUS = 30
TABLE_POS = (245, 90)
# table : Table
# pl1, pl2: Player
# game_board: list(TileView)
# pl1_view pl2_view
# marble_stack: the available marbles
def __init__(self, state: State, surface: pygame.Surface):
pl1 = state.pl1
pl2 = state.pl2
table = state.t
self.game_board = []
r = TableView.TILE_RADIUS
for i in range(-3,4):
for j in range(-3,4):
if math.fabs(i+j)<=3:
self.game_board.append(TileView(table.get(i,j),\
TableView.TABLE_POS[0], TableView.TABLE_POS[1],\
| r, TableView.TILE_COLOR))#, lambda btn : print(btn.col, btn.row)))
W = surface.get_width()
H = surface.get_height()
self.pl1_view = PlayerView(pl1, (0,0))
self.pl2_view = PlayerView(pl2, (W-TableView.TILE_RADIUS*2,0))
self.marble_stack = []
self.marble_stack.append | (CircleButton(int(W/2-r*3), H-r*2, r, \
TableView.WHITE, str(table.marbles[0])))
self.marble_stack.append(CircleButton(int(W/2), H-r*2, r, \
TableView.GREY, str(table.marbles[1])))
self.marble_stack.append(CircleButton(int(W/2+r*3), H-r*2, r, \
TableView.BLACK, str(table.marbles[2])))
def draw(self, surface: pygame.Surface, state: State):
surface.fill(TableView.BGCOLOR)
for tile in self.game_board:
tile.draw_button(surface,state.t.get(tile.col, tile.row))
self.pl1_view.draw(surface, state.pl1)
self.pl2_view.draw(surface, state.pl2)
for i in range(len(state.t.marbles)):
btn = self.marble_stack[i]
btn.text = str(state.t.marbles[i])
btn.draw_button(surface)
def get_pressed_tile(self, pos):
for tile in self.game_board:
if tile.pressed(pos):
return (tile.col, tile.row)
return None
def get_pressed_marble(self, pos):
for (i,marble) in enumerate(self.marble_stack):
if marble.pressed(pos):
return i
return None
class PlayerView:
def __init__(self, pl: Player, pos: (int, int)):
self.pos = pos
r = int(TableView.TILE_RADIUS/2)
self.buttons = [CircleButton(pos[0]+r*2, \
r*3, r, \
TableView.WHITE, str(pl.marbles[0]))]
self.buttons.append(CircleButton(pos[0]+r*2, \
r*6, r, \
TableView.GREY, str(pl.marbles[1])))
self.buttons.append(CircleButton(pos[0]+r*2, \
r*9, r, \
TableView.BLACK, str(pl.marbles[2])))
def draw(self, surface: pygame.Surface, player: Player):
for i in range(len(self.buttons)):
btn = self.buttons[i]
btn.text = str(player.marbles[i])
btn.draw_button(surface)
font_size = int(TableView.TILE_RADIUS*3//len(player.name))
myFont = pygame.font.SysFont("Calibri", font_size)
myText = myFont.render(player.name, 1, (0,0,0))
surface.blit(myText, (self.pos[0]+TableView.TILE_RADIUS/2,\
self.pos[1] + TableView.TILE_RADIUS/2))
|
import flask
import MemeRepo.db as db
import MemeRepo.funcs as fnc
from MemeRepo.config import config
def handle(code, uri):
|
result = db.get_file(uri)
if result == None:
return flask.render_template("error.html", msg="That file does not exist", code="400"), 400
else:
if result['owner'] == | code:
db.delete_file(uri)
return 'deleted'
else:
return flask.render_template("error.html", msg="You do not own that file", code="403"), 403 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do square matrix multiplication on Android Phone."""
import tvm
from tvm import te
import os
from tvm import rpc
from tvm.contrib import util, ndk
import numpy as np
# Set to be address of tvm proxy.
proxy_host = os.environ["TVM_ANDROID_RPC_PROXY_HOST"]
proxy_port = 9090
key = "android"
# Change target configuration.
# Run `adb shell cat /proc/cpuinfo` to find the arch.
arch = "arm64"
target = "llvm -mtriple=%s-linux-android" % arch
def ngflops(N):
return 2.0 * float(N * N * N) / (10 ** 9)
dtype = "float32"
def evaluate(func, ctx, N, times):
a_np = np.random.uniform(size=(N, N)).astype(dtype)
b_np = np.random.uniform(size=(N, N)).astype(dtype)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((N, N), dtype=dtype), ctx)
time_f = func.time_evaluator(func.entry_name, ctx, number=times)
cost = time_f(a, b, c).mean
gf = ngflops(N) / cost
print("%g secs/op, %g GFLOPS" % (cost, gf))
np.testing.assert_almost_equal(c.asnumpy(), a_np.dot(b_np), decimal=2)
def test_gemm_gpu(N, times, bn, num_block, num_thread):
assert bn <= N
assert num_thread * num_thread * 16 <= N
assert num_block * num_block * 2 <= N
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="Btmp")
k = te.reduce_axis((0, N), name="k")
packedB = te.compute((N, N / bn, bn), lambda x, y, z: B[x, y * bn + z], name="B")
C = te.compute(
(N, N), lambda ii, jj: te.sum(A[ii, k] * packedB[k, jj / bn, jj % bn], axis=k), name="C"
)
s = te.create_schedule(C.op)
CC = s.cache_write(C, "local")
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_xz = te.thread_axis((0, 2), "vthread", name="vx")
thread_yz = te.thread_axis((0, 2), "vthread", name="vy")
pby, pbi = s[packedB].split(packedB.op.axis[0], nparts=num_thread)
pbx, pbj = s[packedB].split(packedB.op.axis[1], nparts=num_thread)
s[packedB].bind(pby, thread_y)
s[packedB].bind(pbx, thread_x)
pbz, pbk = s[packedB].split(packedB.op.axis[2], factor=8)
s[packedB].vectorize(pbk)
by, yi = s[C].split(C.op.axis[0], nparts=num_block)
bx, xi = s[C].split(C.op.axis[1], nparts=num_thread)
s[C].bind(by, block_y)
s[C].bind(bx, thread_y)
s[C].reorder(by, bx, yi, xi)
tyz, yi = s[C].split(yi, nparts=2)
ty, yi = s[C].split(yi, nparts=num_block)
txz, xi = s[C].split(xi, nparts=2)
tx, xi = s[C].split(xi, nparts=num_thread)
s[C].reorder(tyz, txz, ty, tx, yi, xi)
s[C].bind(tyz, thread_yz)
s[C].bind(txz, thread_xz)
s[C].bind(ty, block_x)
s[C].bind(tx, thread_x)
xyi, xxi = s[C].split(xi, factor=8)
s[C].reorder(tyz, txz, ty, tx, yi, xyi, xxi)
s[C].vectorize(xxi)
s[CC].compute_at(s[C], yi)
yo, xo = CC.op.axis
s[CC].reorder(k, yo, xo)
xo, xi = s[CC].split(xo, factor=8)
| s[CC].vectorize(xi)
ko, ki = s[CC].split(k, factor=2)
s[CC].unroll(ki)
print(tvm.lower(s, [A, B, C], simple_mode=True))
f = tvm.build(s, [A, B, C], "opencl", target_host=target, name="gemm_gpu")
t | emp = util.tempdir()
path_dso = temp.relpath("gemm_gpu.so")
f.export_library(path_dso, ndk.create_shared)
# connect to the proxy
remote = rpc.connect(proxy_host, proxy_port, key=key)
ctx = remote.cl(0)
remote.upload(path_dso)
f = remote.load_module("gemm_gpu.so")
evaluate(f, ctx, N, times)
if __name__ == "__main__":
test_gemm_gpu(1024, times=5, bn=8, num_block=2, num_thread=8)
|
import arcpy, os, json, csv
from portal import additem, shareItem, generateToken, getUserContent, updateItem, getGroupID, deleteItem, getGroupContent
from metadata import metadata
from ESRImapservice import ESRImapservice
class csvportal(object):
def __init__(self, user, password, portal, worksspace, groups=[]):
"""Connect to portal with username and pasword, also set the local workspace"""
self.user = user
self.password = password
self.portal = portal
self.groups = groups
self.token = generateToken(self.user, self.password, self.portal)
self.groupIDs = [getGroupID(g, self.token, self.portal) for g in self.groups]
if len(self.groupIDs) == 0:
self.userContent = getUserContent(user, '', self.token, self.portal )
else:
self.userContent = getGroupContent(self.groups[0], self.token, self.portal)
self.existingIDs = { n['title'] : n['id'] for n in self.userContent["items"]}
self.LayersFoundinMXD = []
self.ws = worksspace
if worksspace: arcpy.env.workspace = worksspace
def updateToken(self):
"""refresh the token, might be necessary if becomes invalid"""
self.token = generateToken(self.user, self.password, self.portal)
return self.token
def uploadCsv(self, csvpath, sep=";", headerlines=1, nameCol=0, pathCol=1, urlCol=2):
"""upload every row in a csv"""
with open( csvpath , 'rb') as csvfile:
nr = 0
csv_reader = csv.reader(csvfile, dialect=csv.excel, delimiter=sep)
for n in range(headerlines): csv_reader.next()
for row in csv_reader:
line = [unicode(cell, 'latin-1') for cell in row]
name, ds, url = (line[nameCol], line[pathCol], line[urlCol])
if self.ws and os.path.dirname(ds).endswith('.sde'):
ds = os.path.join(self.ws , os.path.basename(ds) )
self.addLyr(ds, name, url, self.groupIDs)
#generate new token every 50 uses
if not nr%50 : self.token = generateToken(self.user, self.password, self.portal)
nr += 1
##TODO: DELETE layers in group and not in csv
def addLyr(self, dataSource, name, serviceUrl, groupIDs=[]):
"""Add *dataSource* to *portal* for *user* , as a item with *name*
representing a layer in *service* """
meta = metadata.metadataFromArcgis( dataSource )
author = meta.credits if len( meta.credits ) else "Stad Antwerpen"
descrip = ( "<strong>"+ meta.title +"</strong> <div><em>"+
meta.orgname + "</em></div> " + meta.description +
"\n<br/> Creatiedatum: " + meta.createDate +
"\n<br/> Publicatiedatum: " + meta.pubDate +
"\n<br/> Revisiedatum: " + meta.reviseDate +
"\n<br/> Beheer: " + meta.contacts +
"\n<br/> Contact: " + meta.eMails )
if name in self.existingIDs.keys():
self.LayersFoundinMXD.append(name)
arcpy.AddMessage( "updating " + name )
item = updateItem(self.user, self.token, self.portal, self.existingIDs[name], serviceUrl,
title=name, summary=meta.purpose, description=descrip, author=author, tags=",".join(meta.tags))
else:
arcpy.AddMessage( "adding " + name )
it | em = additem(self.user, self.token, self.portal, serviceUrl,
title=name, summary=meta.purpose, description=descrip, author=author, tags=",".join(meta.tags) )
if "success" in item.keys() and item["success"]:
id = item["id"]
arcpy.AddMessage( shareItem(id, self.token, self.portal, True, True, groupIDs) )
elif "success" in item.keys() and not item["success"]:
raise Excep | tion( "Error uploading "+ name +" "+ json.dumps(result))
else:
arcpy.AddMessage("unsure of success for layer "+ name +" "+ json.dumps(result))
def delLyr(self, name):
if name in self.existingIDs.keys():
result = deleteItem(self.existingIDs[name] , self.token, self.portal, self.user)
if "success" in result.keys() and result["success"]:
arcpy.AddMessage("Deleted layer: " + name )
elif "success" in result.keys() and not result["success"]:
raise Exception( "Error deleting "+ name +" "+ json.dumps(result))
else:
arcpy.AddMessage("unsure of success for layer "+ name +" "+ json.dumps(result)) |
ValueName=defaultNamedNotOptArg):
'DeleteValue'
return self._oleobj_.InvokeTypes(10, LCID, 1, (24, 0), ((8, 1),),ValueName
)
def EnumKeys(self, Index=defaultNamedNotOptArg):
'EnumKeys'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(11, LCID, 1, (8, 0), ((3, 1),),Index
)
def EnumValues(self, Index=defaultNamedNotOptArg):
'EnumValues'
# Result is a Unicode object
return self._ | oleobj_.InvokeTypes(12, LCID, 1, (8, 0), ((3, 1),),Index
)
def GetBinaryValue(self, ValueName=defaultNamedNotOptArg):
'GetBinaryValue'
return self._ApplyTypes_(2, 1, (12, 0), ((8, 1),), u'GetBinaryValue', None,ValueName
)
def GetLongValue(self, ValueName=defaultNamedNotOptArg):
'GetlongValue'
return self._oleobj_.InvokeTypes(6, LCID, 1, (3, 0), ((8, 1),),ValueName
)
def GetStringValue(self, ValueName=defaultNamedNotOptArg):
'GetStringValue | '
# Result is a Unicode object
return self._oleobj_.InvokeTypes(4, LCID, 1, (8, 0), ((8, 1),),ValueName
)
# Result is of type ISpeechDataKey
def OpenKey(self, SubKeyName=defaultNamedNotOptArg):
'OpenKey'
ret = self._oleobj_.InvokeTypes(7, LCID, 1, (9, 0), ((8, 1),),SubKeyName
)
if ret is not None:
ret = Dispatch(ret, u'OpenKey', '{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}')
return ret
def SetBinaryValue(self, ValueName=defaultNamedNotOptArg, Value=defaultNamedNotOptArg):
'SetBinaryValue'
return self._oleobj_.InvokeTypes(1, LCID, 1, (24, 0), ((8, 1), (12, 1)),ValueName
, Value)
def SetLongValue(self, ValueName=defaultNamedNotOptArg, Value=defaultNamedNotOptArg):
'SetLongValue'
return self._oleobj_.InvokeTypes(5, LCID, 1, (24, 0), ((8, 1), (3, 1)),ValueName
, Value)
def SetStringValue(self, ValueName=defaultNamedNotOptArg, Value=defaultNamedNotOptArg):
'SetStringValue'
return self._oleobj_.InvokeTypes(3, LCID, 1, (24, 0), ((8, 1), (8, 1)),ValueName
, Value)
_prop_map_get_ = {
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechFileStream(DispatchBaseClass):
'ISpeechFileStream Interface'
CLSID = IID('{AF67F125-AB39-4E93-B4A2-CC2E66E182A7}')
coclass_clsid = IID('{947812B3-2AE1-4644-BA86-9E90DED7EC91}')
def Close(self):
'Close'
return self._oleobj_.InvokeTypes(101, LCID, 1, (24, 0), (),)
def Open(self, FileName=defaultNamedNotOptArg, FileMode=0, DoEvents=False):
'Open'
return self._oleobj_.InvokeTypes(100, LCID, 1, (24, 0), ((8, 1), (3, 49), (11, 49)),FileName
, FileMode, DoEvents)
def Read(self, Buffer=pythoncom.Missing, NumberOfBytes=defaultNamedNotOptArg):
'Read'
return self._ApplyTypes_(2, 1, (3, 0), ((16396, 2), (3, 1)), u'Read', None,Buffer
, NumberOfBytes)
def Seek(self, Position=defaultNamedNotOptArg, Origin=0):
'Seek'
return self._ApplyTypes_(4, 1, (12, 0), ((12, 1), (3, 49)), u'Seek', None,Position
, Origin)
def Write(self, Buffer=defaultNamedNotOptArg):
'Write'
return self._oleobj_.InvokeTypes(3, LCID, 1, (3, 0), ((12, 1),),Buffer
)
_prop_map_get_ = {
# Method 'Format' returns object of type 'ISpeechAudioFormat'
"Format": (1, 2, (9, 0), (), "Format", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
}
_prop_map_put_ = {
"Format": ((1, LCID, 8, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechGrammarRule(DispatchBaseClass):
'ISpeechGrammarRule Interface'
CLSID = IID('{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')
coclass_clsid = None
def AddResource(self, ResourceName=defaultNamedNotOptArg, ResourceValue=defaultNamedNotOptArg):
'AddResource'
return self._oleobj_.InvokeTypes(6, LCID, 1, (24, 0), ((8, 1), (8, 1)),ResourceName
, ResourceValue)
# Result is of type ISpeechGrammarRuleState
def AddState(self):
'AddState'
ret = self._oleobj_.InvokeTypes(7, LCID, 1, (9, 0), (),)
if ret is not None:
ret = Dispatch(ret, u'AddState', '{D4286F2C-EE67-45AE-B928-28D695362EDA}')
return ret
def Clear(self):
'Clear'
return self._oleobj_.InvokeTypes(5, LCID, 1, (24, 0), (),)
_prop_map_get_ = {
"Attributes": (1, 2, (3, 0), (), "Attributes", None),
"Id": (4, 2, (3, 0), (), "Id", None),
# Method 'InitialState' returns object of type 'ISpeechGrammarRuleState'
"InitialState": (2, 2, (9, 0), (), "InitialState", '{D4286F2C-EE67-45AE-B928-28D695362EDA}'),
"Name": (3, 2, (8, 0), (), "Name", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechGrammarRuleState(DispatchBaseClass):
'ISpeechGrammarRuleState Interface'
CLSID = IID('{D4286F2C-EE67-45AE-B928-28D695362EDA}')
coclass_clsid = None
def AddRuleTransition(self, DestinationState=defaultNamedNotOptArg, Rule=defaultNamedNotOptArg, PropertyName=u'', PropertyId=0
, PropertyValue=u'', Weight=1.0):
'AddRuleTransition'
return self._ApplyTypes_(4, 1, (24, 32), ((9, 1), (9, 1), (8, 49), (3, 49), (16396, 49), (4, 49)), u'AddRuleTransition', None,DestinationState
, Rule, PropertyName, PropertyId, PropertyValue, Weight
)
def AddSpecialTransition(self, DestinationState=defaultNamedNotOptArg, Type=defaultNamedNotOptArg, PropertyName=u'', PropertyId=0
, PropertyValue=u'', Weight=1.0):
'AddSpecialTransition'
return self._ApplyTypes_(5, 1, (24, 32), ((9, 1), (3, 1), (8, 49), (3, 49), (16396, 49), (4, 49)), u'AddSpecialTransition', None,DestinationState
, Type, PropertyName, PropertyId, PropertyValue, Weight
)
def AddWordTransition(self, DestState=defaultNamedNotOptArg, Words=defaultNamedNotOptArg, Separators=u' ', Type=1
, PropertyName=u'', PropertyId=0, PropertyValue=u'', Weight=1.0):
'AddWordTransition'
return self._ApplyTypes_(3, 1, (24, 32), ((9, 1), (8, 1), (8, 49), (3, 49), (8, 49), (3, 49), (16396, 49), (4, 49)), u'AddWordTransition', None,DestState
, Words, Separators, Type, PropertyName, PropertyId
, PropertyValue, Weight)
_prop_map_get_ = {
# Method 'Rule' returns object of type 'ISpeechGrammarRule'
"Rule": (1, 2, (9, 0), (), "Rule", '{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}'),
# Method 'Transitions' returns object of type 'ISpeechGrammarRuleStateTransitions'
"Transitions": (2, 2, (9, 0), (), "Transitions", '{EABCE657-75BC-44A2-AA7F-C56476742963}'),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechGrammarRuleStateTransition(DispatchBaseClass):
'ISpeechGrammarRuleStateTransition Interface'
CLSID = IID('{CAFD1DB1-41D1-4A06-9863-E2E81DA17A9A}')
coclass_clsid = None
_prop_map_get_ = {
# Method 'NextState' returns object of type 'ISpeechGrammarRuleState'
"NextState": (8, 2, (9, 0), (), "NextState", '{D4286F2C-EE67-45AE-B928-28D695362EDA}'),
"PropertyId": (6, 2, (3, 0), (), "PropertyId", None),
"PropertyName": (5, 2, (8, 0), (), "PropertyName", None),
"PropertyValue": (7, 2, (12, 0), (), "PropertyValue", None),
# Method 'Rule' returns object of type 'ISpeechGrammarRule'
"Rule": (3, 2, (9, 0), (), "Rule", '{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}'),
"Text": (2, 2, (8, 0), (), "Text", None),
"Type": (1, 2, (3, 0), (), "Type", None),
"Weight": (4, 2, (12, 0), (), "Weight", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return w |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.nestedset import NestedSet, get_root_of
class SupplierGroup(NestedSet):
nsm_parent_field = 'parent_supplier_group'
def validat | e(self):
if not self.parent_supplier_group:
self.parent_supplier_group = get_root_of("Supplier Group")
def on_update(self):
NestedSet.on_update(self)
self.validate_one_root()
def on_trash(self):
NestedSet.validate_if_child_exists(self)
frap | pe.utils.nestedset.update_nsm(self)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 21:31:53 2015
Create random synthetic velocity profile + linear first guesses
@author: alex
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
plt.close('all')
random.seed(2)
X = range(256)
Z = np.linspace(0,1000,256)
pos = 50
posVpVs = 1.7
L = np.array([pos])
vpvs = np.array([posVpVs])
for x in X[1:]:
pos += random.choice((-0.9,1)) #random.choice((-1,1))
posVpVs += random.choice((-0.02,0.02))
L=np.append(L,pos)
vpvs = np.append(vpvs,posVpVs)
L=70*L
Vp = savitzky_golay(L, 51, 3) # window size 51, polynomial order 3
A = np.array([ Z, np.ones(256)])
Vs = Vp/savitzky_golay(vpvs, 51, 3) # window size 51, polynomial order 3
w = np.linalg.lstsq(A.T,Vp)[0] # obtaining the parameters
# plotting the line
lineP = w[0]*Z+w[1]+500 # regression line
w = np.linalg.lstsq(A.T,Vs)[0] # obtaining the parameters
# plotting the line
lineS = w[0]*Z+w[1]-250 # regression line
plt.figure()
plt.hold(True)
plt.plot(L,Z,label="Random walk")
plt.plot(Vp,Z,linewidth=4,label="P wave velocity from this random walk")
plt.plot(lineP,Z,linewidth=4,label="First guess")
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
plt.legend()
plt.figure()
plt.hold(True)
plt.plot(vpvs,Z,linewidth=4,label="Random walk vp/vs")
plt.legend()
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
plt.figure()
plt.hold(True)
plt.plot(Vs,Z,linewidth=4,label="S wave velocity from random vp/vs")
plt.plot(lineS,Z,linewidth=4,label="First guess")
plt.legend()
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
# Save profiles
np.savetxt("dataExample/realProfileP.txt",np.dstack((Z,Vp))[0])
np.savetxt("dataExample/realProfileS.txt",np.dstack((Z,Vs))[0])
np.savetxt("dataExample/firstGuessP.txt",np.dstack((Z,lineP))[0])
np.savetxt("dataExample/firstGuessS.txt",np.dstack((Z,lineS))[0])
#####################################################################
coordShotsX=[300,500]
coordShotsY=[400]
coordShotsZ=[650]
coordStatsX=[200,300,400,500,600]
coordStatsY=[200,300,400,500,600]
coordStatsZ=[200,300,400,500,600]
Xshots=[]
Yshots=[]
Zshots=[]
Xstats=[]
Ystats=[]
Zstats=[]
#Open a file in write mode:
fo = open("dataExample/coordShots.txt", "w+")
for coordX in coordShotsX:
for coordY in coordShotsY:
for coordZ in coordShotsZ:
Xsh | ots.append(coordX)
Yshots.append(coordY)
Zshots.append(coordZ)
fo.write(str(coordX)+" "+str(coordY)+" "+str(coordZ)+"\n")
# Close opened file
fo.close()
#Open a file in write mode:
fo = open("dataExample/coordStats.txt", "w+")
for coordX in coordStatsX:
for coordY in coordStatsY:
for coordZ in coordStatsZ:
Xstats.append(coordX)
Ystats.append(coordY)
Zstats.append(coordZ) |
fo.write(str(coordX)+" "+str(coordY)+" "+str(coordZ)+"\n")
# Close opened file
fo.close()
fig = plt.figure()
ax = fig.gca(projection='3d') #Axes3D(fig)
ax.hold(True)
ax.scatter(Xstats,Ystats,Zstats,zdir='z',s=20,c='b')
if (len(coordShotsX) > 3):
ax.scatter(Xshots,Yshots,Zshots,zdir='z',s=20,c='r',marker='^')
else:
ax.scatter(Xshots,Yshots,Zshots,zdir='z',s=200,c='r',marker='^')
ax.set_xlim3d(min(min(Xshots),min(Xstats))-100,max(max(Xshots),max(Xstats))+100)
ax.set_ylim3d(min(min(Yshots),min(Ystats))-100,max(max(Yshots),max(Ystats))+100)
ax.set_zlim3d(min(min(Zshots),min(Zstats))-100,max(max(Zshots),max(Zstats))+100)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
ax.set_zlabel('Z (m)')
ax.set_title('Geometry')
ax.invert_zaxis()
|
def get(self, spec, new=False):
"""Find a repo that contains the supplied spec's package.
Raises UnknownPackageError if not found.
"""
return self.repo_for_pkg(spec).get(spec)
def get_pkg_class(self, pkg_name):
"""Find a class for the spec's package and return the class object."""
return self.repo_for_pkg(pkg_name).get_pkg_class(pkg_name)
@_autospec
def dump_provenance(self, spec, path):
"""Dump provenance information for a spec to a particular path.
This dumps the package file and any associated patch files.
Raises UnknownPackageError if not found.
"""
return self.repo_for_pkg(spec).dump_provenance(spec, path)
def dirname_for_package_name(self, pkg_name):
return self.repo_for_pkg(pkg_name).dirname_for_package_name(pkg_name)
def filename_for_package_name(self, pkg_name):
return self.repo_for_pkg | (pkg_name).filename_for_package_name(pkg_name)
def exists(self, pkg_name):
"""Whether package with the give name exists in the path's repos | .
Note that virtual packages do not "exist".
"""
return any(repo.exists(pkg_name) for repo in self.repos)
def is_virtual(self, pkg_name):
"""True if the package with this name is virtual, False otherwise."""
return pkg_name in self.provider_index
def __contains__(self, pkg_name):
return self.exists(pkg_name)
class Repo(object):
"""Class representing a package repository in the filesystem.
Each package repository must have a top-level configuration file
called `repo.yaml`.
Currently, `repo.yaml` this must define:
`namespace`:
A Python namespace where the repository's packages should live.
"""
def __init__(self, root, namespace=repo_namespace):
"""Instantiate a package repository from a filesystem path.
Arguments:
root The root directory of the repository.
namespace A super-namespace that will contain the repo-defined
namespace (this is generally jsut `spack.pkg`). The
super-namespace is Spack's way of separating repositories
from other python namespaces.
"""
# Root directory, containing _repo.yaml and package dirs
# Allow roots to by spack-relative by starting with '$spack'
self.root = canonicalize_path(root)
# super-namespace for all packages in the Repo
self.super_namespace = namespace
# check and raise BadRepoError on fail.
def check(condition, msg):
if not condition:
raise BadRepoError(msg)
# Validate repository layout.
self.config_file = os.path.join(self.root, repo_config_name)
check(os.path.isfile(self.config_file),
"No %s found in '%s'" % (repo_config_name, root))
self.packages_path = os.path.join(self.root, packages_dir_name)
check(os.path.isdir(self.packages_path),
"No directory '%s' found in '%s'" % (repo_config_name, root))
# Read configuration and validate namespace
config = self._read_config()
check('namespace' in config, '%s must define a namespace.'
% os.path.join(root, repo_config_name))
self.namespace = config['namespace']
check(re.match(r'[a-zA-Z][a-zA-Z0-9_.]+', self.namespace),
("Invalid namespace '%s' in repo '%s'. "
% (self.namespace, self.root)) +
"Namespaces must be valid python identifiers separated by '.'")
# Set up 'full_namespace' to include the super-namespace
if self.super_namespace:
self.full_namespace = "%s.%s" % (
self.super_namespace, self.namespace)
else:
self.full_namespace = self.namespace
# Keep name components around for checking prefixes.
self._names = self.full_namespace.split('.')
# These are internal cache variables.
self._modules = {}
self._classes = {}
self._instances = {}
# Maps that goes from package name to corresponding file stat
self._fast_package_checker = None
# Index of virtual dependencies, computed lazily
self._provider_index = None
# Index of tags, computed lazily
self._tag_index = None
# make sure the namespace for packages in this repo exists.
self._create_namespace()
def _create_namespace(self):
"""Create this repo's namespace module and insert it into sys.modules.
Ensures that modules loaded via the repo have a home, and that
we don't get runtime warnings from Python's module system.
"""
parent = None
for l in range(1, len(self._names) + 1):
ns = '.'.join(self._names[:l])
if ns not in sys.modules:
module = SpackNamespace(ns)
module.__loader__ = self
sys.modules[ns] = module
# Ensure the namespace is an atrribute of its parent,
# if it has not been set by something else already.
#
# This ensures that we can do things like:
# import spack.pkg.builtin.mpich as mpich
if parent:
modname = self._names[l - 1]
setattr(parent, modname, module)
else:
# no need to set up a module
module = sys.modules[ns]
# but keep track of the parent in this loop
parent = module
def real_name(self, import_name):
"""Allow users to import Spack packages using Python identifiers.
A python identifier might map to many different Spack package
names due to hyphen/underscore ambiguity.
Easy example:
num3proxy -> 3proxy
Ambiguous:
foo_bar -> foo_bar, foo-bar
More ambiguous:
foo_bar_baz -> foo_bar_baz, foo-bar-baz, foo_bar-baz, foo-bar_baz
"""
if import_name in self:
return import_name
options = possible_spack_module_names(import_name)
options.remove(import_name)
for name in options:
if name in self:
return name
return None
def is_prefix(self, fullname):
"""True if fullname is a prefix of this Repo's namespace."""
parts = fullname.split('.')
return self._names[:len(parts)] == parts
def find_module(self, fullname, path=None):
"""Python find_module import hook.
Returns this Repo if it can load the module; None if not.
"""
if self.is_prefix(fullname):
return self
namespace, dot, module_name = fullname.rpartition('.')
if namespace == self.full_namespace:
if self.real_name(module_name):
return self
return None
def load_module(self, fullname):
"""Python importer load hook.
Tries to load the module; raises an ImportError if it can't.
"""
if fullname in sys.modules:
return sys.modules[fullname]
namespace, dot, module_name = fullname.rpartition('.')
if self.is_prefix(fullname):
module = SpackNamespace(fullname)
elif namespace == self.full_namespace:
real_name = self.real_name(module_name)
if not real_name:
raise ImportError("No module %s in %s" % (module_name, self))
module = self._get_pkg_module(real_name)
else:
raise ImportError("No module %s in %s" % (fullname, self))
module.__loader__ = self
sys.modules[fullname] = module
if namespace != fullname:
parent = sys.modules[namespace]
if not hasattr(parent, module_name):
setattr(parent, module_name, module)
return module
def _read_config(self):
"""Check for a YAML config file in this db's root directory."""
try:
with open(self.config_file) |
.path.join(origin_directory,quality_directory,
mode_directory,site_directory,
camera_directory, filter_directory,
field_directory)
self.output_directory = output_directory
self.catalog_directory = origin_directory.replace('images','catalog0')
if os.path.isdir(self.output_directory) == False:
os.makedirs(self.output_directory)
if os.path.isdir(self.catalog_directory) == False:
os.makedirs(self.catalog_directory)
self.logger.info('Successfully built the output directory : '+self.output_directory)
self.logger.info('Successfully built the catalog directory : '+self.catalog_directory)
except:
self.logger.error('I can not construct the output directory!')
def find_or_construct_the_output_directory(self):
try :
flag = os.path.isdir(self.output_directory)
if flag == True:
self.logger.info('Successfully found the output directory : '+self.output_directory)
else :
os.makedirs(self.output_directory)
self.logger.info('Successfully mkdir the output directory : '+self.output_directory)
except:
self.logger.error('I cannot find or mkdir the output directory!')
def find_WCS_offset(self):
try:
self.x_new_center,self.y_new_center,self.x_shift,self.y_shift = xycorr(os.path.join(self.template_directory,self.template_name), self.data, 0.4)
self.update_image_wcs()
self.x_shift = int(self.x_shift)
self.y_shift = int(self.y_shift)
self.logger.info('Successfully found the WCS correction')
except:
self.x_shift = 0
self.y_shift = 0
self.logger.error('I failed to find a WCS correction')
def generate_sextractor_catalog(self, config):
"""
extracting a catalog from a WCS-recalibrated (!) image
calling it through logging to obtain an astropy
compliant output with logging...
"""
try:
extractor_parameters=['X_IMAGE','Y_IMAGE','BACKGROUND',
'ELLIPTICITY','FWHM_WORLD','X_WORLD',
'Y_WORLD','MAG_APER','MAGERR_APER']
extractor_config={'DETECT_THRESH':2.5,
'ANALYSIS_THRESH':2.5,
'FILTER':'Y',
'DEBLEND_NTHRESH':32,
'DEBLEND_MINCOUNT':0.005,
'CLEAN':'Y',
'CLEAN_PARAM':1.0,
'PIXEL_SCALE':self.camera.pix_scale,
'SATUR_LEVEL':self.camera.ADU_high,
'PHOT_APERTURES':10,
'DETECT_MINAREA':7,
'GAIN':self.camera.gain,
'SEEING_FWHM':self.header_seeing,
'BACK_FILTERSIZE':3}
sew = sewpy.SEW(workdir=os.path.join(self.image_directory,'sewpy'),
sexpath=config['sextractor_path'],
params=extractor_parameters,
config=extractor_config)
sewoutput = sew(os.path.join(self.image_directory,self.image_name))
#APPEND JD, ATTEMPTING TO CALIBRATE MAGNITUDES..
catalog=sewoutput['table']
tobs=Time([self.header['DATE-OBS']],format='isot',scale='utc')
calibration_pars={'gp':[1.0281267,29.315002],'ip':[1.0198562,28.13711],'rp':[1.020762,28.854443]}
if self.filter!=None:
calmag=catalog['MAG_APER']*calibration_pars[self.filter][0]+calibration_pars[self.filter][1]
calmag[np.where(catalog['MAG_APER']==99.)]=99.
catalog['MAG_APER_CAL']=calmag
catalog['FILTER']=[self.filter]*len(calmag)
catalog['JD']=np.ones(len(catalog))*tobs.jd
#APPEND JD AND CALIBRATED MAGNITUDES...
#ROUGH CALIBRATION TO VPHAS+
#gmag=instmag*1.0281267+29.315002
#imag=instmag*1.0198562+28.13711
#rmag=instmag*1.020762+28.854443
self.compute_stats_from_catalog(catalog)
self.catalog = catalog
#ascii.write(catalog,os.path.join('./',catname))
self.logger.info('Sextractor catalog successfully produced')
except:
self.logger.error('I cannot produce the Sextractor catalog!')
def create_image_control_region(self):
w = wcs.WCS(self.header)
py,px = w.wcs_world2pix(self.x_center_thumbnail_world,
self.y_center_thumbnail_world,1 | )
py = int(py)
px = int(px)
try:
self.thumbnail=self.data[px-self.thumbnail_box_size/2:px+self.thumbnail_box_size/2,py-self.thumbnail_box_size/2:py+self.thumbnail_box_size/2]
self.logger.info('Thumbnail successfully produce around the good position')
ex | cept:
self.thumbnail=np.zeros((self.thumbnail_box_size,self.thumbnail_box_size))
self.logger.info('Thumbnail successfully produce around the center of the image')
def compute_stats_from_catalog(self,catalog):
try:
self.sky_level=np.median(catalog['BACKGROUND'])
self.sky_level_std=np.std(catalog['BACKGROUND'])
self.sky_minimum_level=np.percentile(catalog['BACKGROUND'],1)
self.sky_maximum_level=np.max(catalog['BACKGROUND'])
self.number_of_stars=len(catalog)
self.ellipticity=np.median(catalog['ELLIPTICITY'])
self.seeing=np.median(catalog['FWHM_WORLD']*3600)
self.logger.info('Image quality statistics well updated')
except:
self.logger.error('For some reason, I can not update the image quality statistics!')
def extract_header_statistics(self):
desired_quantities = [ key for key,value in self.__dict__.items() if 'header' in key]
for quantity in desired_quantities :
try:
dictionnary_key = quantity.replace('header_','')
setattr(self, quantity, self.header[self.camera.header_dictionnary[dictionnary_key]])
except:
pass
self.logger.info('Successfully obtained image header_quality statistics')
def assess_image_quality(self):
try:
self.check_background()
self.check_Moon()
self.check_Nstars()
self.check_ellipticity()
self.check_seeing()
self.logger.info('Quality flags well produced')
except:
self.logger.error('I can not assess the image quality, no quality flags produced!')
def check_background(self):
if self.sky_level:
if self.sky_level > self.quantity_limits.sky_background_median_limit:
self.quality_flags.append('High sky background')
else:
self.quality_flags.append('No sky level measured!')
if self.sky_level_std :
if self.sky_level_std > self.quantity_limits.sky_background_std_limit:
self.quality_flags.append('High sky background variations')
else:
self.quality_flags.append('No sky level variations measured!')
if self.sky_minimum_level:
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# Copyright (C) 2016 Oladimeji Fayomi, University of Waikato.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/lic | enses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Name: NetlinkProcessor.py
# Author: Oladimeji Fayomi
# Created: 25 May 2016 |
# Last Modified: 17 August 2016
# Version: 1.0
# Description: Listens to netlink messages and notifies the RheaFlow
# application of important netlink messages.
import socket
from pyroute2 import IPDB
import eventlet
from datetime import datetime
from log import log
try:
import cPickle as pickle
except:
import pickle
server_addr = ('127.0.0.1', 55651)
class NetlinkClient(object):
def __init__(self):
self.neighbours = []
self.unresolvedneighbours = []
self.ip = IPDB(ignore_rtables=[254])
self.ip_uuid = self.ip.register_callback(self.callback)
self.server = eventlet.listen(('127.0.0.1', 55652))
self.socket = None
self.serve = True
self.pool = eventlet.GreenPool()
self.not_connect = True
def callback(self, ipdb, msg, action):
if action is 'RTM_NEWNEIGH':
self.add_neighbour(msg)
if action is 'RTM_DELNEIGH':
self.remove_neighbour(msg)
if action is 'RTM_NEWLINK':
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
if action is 'RTM_DELLINK':
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
if action is 'RTM_NEWADDR':
log.info("RTM_NEWADDR happened at %s", str(datetime.now()))
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
if action is 'RTM_DELADDR':
log.info("RTM_DELADDR happened at %s", str(datetime.now()))
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
def add_neighbour(self, msg):
attributes = msg['attrs']
ip_addr = attributes[0][1]
if attributes[1][0] is 'NDA_LLADDR':
mac_addr = attributes[1][1]
iface_index = msg['ifindex']
host = {'ipaddr': ip_addr, 'mac_addr': mac_addr,
'ifindex': iface_index}
if host not in self.neighbours:
self.notify(['add_neigh', host])
self.neighbours.append(host)
if ip_addr in self.unresolvedneighbours:
self.unresolvedneighbours = list(filter(lambda x: x !=
ip_addr,
self.unresolvedneighbours)
)
else:
if ip_addr not in self.unresolvedneighbours:
self.unresolvedneighbours.append(ip_addr)
self.notify(['unresolved', self.unresolvedneighbours])
def remove_neighbour(self, msg):
attributes = msg['attrs']
ip_addr = attributes[0][1]
if attributes[1][0] is 'NDA_LLADDR':
mac_addr = attributes[1][1]
iface_index = msg['ifindex']
host = {'ipaddr': ip_addr, 'mac_addr': mac_addr,
'ifindex': iface_index}
self.notify(['remove_neigh', host])
self.neighbours = list(filter(
lambda x: x != host, self.neighbours))
def notify(self, rheamsg):
notification = pickle.dumps(rheamsg)
if self.socket is not None:
self.socket.send(notification)
recv = self.socket.recv(8192)
def ifaceTable(self, ipdb):
ifaces = ipdb.by_name.keys()
table = []
for iface in ifaces:
mac_addr = ipdb.interfaces[iface]['address']
ip_addresses = ipdb.interfaces[iface]['ipaddr']
ifindex = ipdb.interfaces[iface]['index']
state = ipdb.interfaces[iface]['operstate']
table.append({'ifname': iface, 'mac-address': mac_addr,
'IP-Addresses': [x for x in ip_addresses],
'ifindex': ifindex,
'state': state})
return table
def neighbourtable(self):
return self.neighbours
def returnunresolvedhost(self):
return self.unresolvedneighbours
def process_requests(self, ipdb, request):
if request[0] == 'ifaceTable':
res = self.ifaceTable(ipdb)
result = ['ifaceTable', res]
return pickle.dumps(result)
if request[0] == 'neighbourtable':
res = self.neighbourtable()
result = ['neighbourtable', res]
return pickle.dumps(result)
if request[0] == 'get_unresolved':
res = self.returnunresolvedhost()
result = ['unresolved', res]
return pickle.dumps(result)
def handle_request(self, sock):
is_active = True
while is_active:
received = sock.recv(8192)
if len(received) != 0:
request = pickle.loads(received)
response = self.process_requests(self.ip, request)
sock.send(response)
if len(received) == 0:
is_active = False
sock.close()
sock.close()
def try_connect(self):
while self.not_connect:
try:
self.socket = eventlet.connect(('127.0.0.1', 55651))
except socket.error as e:
pass
else:
self.not_connect = False
def serve_forever(self):
while self.serve:
nl_sock, address = self.server.accept()
self.pool.spawn_n(self.handle_request, nl_sock)
log.info("Rhea has contacted us")
self.try_connect()
if __name__ == "__main__":
nlclient = NetlinkClient()
nlclient.serve_forever()
|
# -*- coding: utf-8 -*-
from django.urls import re_path
from django.contrib.auth import views as auth_views
from django.utils.translation import gettext_lazy as _
from django.urls import reverse_lazy
from . import forms
from . import views
app_name = "auth"
urlpatterns = [
# Sign in / sign out
re_path(
_(r"^connexion/$"),
views.LoginView.as_view(
template_name= | "public/auth/login.html", authentication_form=forms.AuthenticationForm
),
name="login",
),
re_path(_(r"^deconnexion/$"), auth_views.LogoutView.as_view(next_page="/"), name="logout"),
re_path(_(r"^bienvenue/$"), | views.UserLoginLandingRedirectView.as_view(), name="landing"),
# Parameters & personal data
re_path(
_(r"^donnees-personnelles/$"),
views.UserPersonalDataUpdateView.as_view(),
name="personal_data",
),
re_path(_(r"^parametres/$"), views.UserParametersUpdateView.as_view(), name="parameters"),
# Password change
re_path(_(r"^mot-de-passe/$"), views.UserPasswordChangeView.as_view(), name="password_change"),
# Password reset
re_path(
_(r"^mot-de-passe/reinitialisation/$"),
auth_views.PasswordResetView.as_view(
template_name="public/auth/password_reset_form.html",
email_template_name="emails/auth/password_reset_registered_email.html",
subject_template_name="emails/auth/password_reset_registered_email_subject.txt",
form_class=forms.PasswordResetForm,
success_url=reverse_lazy("public:auth:password_reset_done"),
),
name="password_reset",
),
re_path(
_(r"^mot-de-passe/reinitialisation/termine/$"),
auth_views.PasswordResetDoneView.as_view(
template_name="public/auth/password_reset_done.html"
),
name="password_reset_done",
),
re_path(
_(r"^reinitialisation/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]+-[0-9A-Za-z]+)/$"),
auth_views.PasswordResetConfirmView.as_view(
template_name="public/auth/password_reset_confirm.html",
success_url=reverse_lazy("public:auth:password_reset_complete"),
),
name="password_reset_confirm",
),
re_path(
_(r"^reinitialisation/termine/$"),
auth_views.PasswordResetCompleteView.as_view(
template_name="public/auth/password_reset_complete.html"
),
name="password_reset_complete",
),
]
|
# This stores all the dialogue related stuff
import screen
class Dialogue(object):
"""Stores the dialogue tree for an individual NPC"""
def __init__(self, npc):
super(Dialogue, self).__init__()
self.npc = npc
self.game = npc.game
self.root = None
self.currentNode = None
def setRootNode(self, node):
self.root = node
def resetCurrentNode(self):
self.currentNode = self.root
def beginConversation(self):
self.resetCurrentNode()
self.runNextNode()
def runNextNode(self):
if self.currentNode is None:
return
# Grab all the DialogueChoices that should be shown
availableChoices = []
for (choice, predicate, child) in self.currentNode.choices:
if predicate is not None:
if predicate():
availableChoices.append((choice, child))
else:
availableChoices.append((choice, child))
npcName = None
if self.game.player.notebook.isNpcKnown(self.npc):
npcName = self.npc.firstName + " " + self.npc.lastName
choiceTexts = [choice.choiceText for (choice, child) in availableChoices]
screen.printDialogueChoices(self.game.screen, self.game.player,
choiceTexts, npcName)
choiceIdx = self.game.getDialogueChoice(len(choiceTexts)) - 1
self.game.draw()
(choice, nextNode) = availableChoices[choiceIdx]
response = ""
response += choice.response
if choice.responseFunction is not None:
respons | e = choice.responseFunction(self.npc, response)
self.game.printDescription(response | , npcName)
self.currentNode = nextNode
self.runNextNode()
class DialogueNode(object):
"""A single node of the dialogue tree"""
def __init__(self):
super(DialogueNode, self).__init__()
self.choices = []
def addChoice(self, choice, choicePredicate=None, childNode=None):
self.choices.append((choice, choicePredicate, childNode))
class DialogueChoice(object):
"""Stores the choice/function pair"""
def __init__(self, choiceText, response, responseFunction=None):
super(DialogueChoice, self).__init__()
self.choiceText = choiceText
self.response = response
self.responseFunction = responseFunction
def callResponseFunction(self, npcArgument, response):
if responseFunction is not None:
self.responseFunction(npcArgument, response) |
import threading
__author__ = "Piotr Gawlowicz"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = "gawlowicz@tkn. | tu-berlin.de"
class Timer(object):
def __init__(self, handler_):
assert callable(handler_)
super().__init__()
self._handler = handler_
self._event = threading.Event()
self._thread | = None
def start(self, interval):
"""interval is in seconds"""
if self._thread:
self.cancel()
self._event.clear()
self._thread = threading.Thread(target=self._timer, args=[interval])
self._thread.setDaemon(True)
self._thread.start()
def cancel(self):
if (not self._thread) or (not self._thread.is_alive()):
return
self._event.set()
# self._thread.join()
self._thread = None
def is_running(self):
return self._thread is not None
def _timer(self, interval):
# Avoid cancellation during execution of self._callable()
cancel = self._event.wait(interval)
if cancel:
return
self._handler()
class TimerEventSender(Timer):
# timeout handler is called by timer thread context.
# So in order to actual execution context to application's event thread,
# post the event to the application
def __init__(self, app, ev_cls):
super(TimerEventSender, self).__init__(self._timeout)
self._app = app
self._ev_cls = ev_cls
def _timeout(self):
self._app.send_event(self._ev_cls())
|
from flask import Flask
from flask import render_template
import euclid
import queue_constants
import ast
import redis
import t | hreading
REDIS_ADDRESS = "localhost"
REDIS_PORT = 6379
REDIS_DB = 0
app = Flask(__name__)
@app.route("/")
def monitor():
queue = redis.StrictRedis(host=REDIS_ADDRESS, port=REDIS_PORT, db=REDIS_DB)
nstatus = | []
status = ast.literal_eval(queue.get(queue_constants.NODE_KEY).decode())
for s in status:
nstatus.append({"name":s, "status":status[s]["status"]})
return render_template('monitor.html', status=nstatus)
if __name__ == "__main__":
euclidThread = threading.Thread(target=euclid.main)
euclidThread.setDaemon(True)
euclidThread.start()
app.run(host='0.0.0.0')
|
"""
Core
====
AudioSignals
---------- | --
.. autoclass:: nussl.core.AudioSignal
:members:
:autosu | mmary:
Masks
-----
.. automodule:: nussl.core.masks
:members:
:autosummary:
Constants
------------
.. automodule:: nussl.core.constants
:members:
:autosummary:
External File Zoo
-----------------
.. automodule:: nussl.core.efz_utils
:members:
:autosummary:
General utilities
-----------------
.. automodule:: nussl.core.utils
:members:
:autosummary:
Audio effects
-------------
.. automodule:: nussl.core.effects
:members:
:autosummary:
Mixing
------
.. automodule:: nussl.core.mixing
:members:
:autosummary:
Playing and embedding audio
---------------------------
.. automodule:: nussl.core.play_utils
:members:
:autosummary:
Checkpoint migration (backwards compatability)
----------------------------------------------
.. automodule:: nussl.core.migration
:members:
:autosummary:
"""
from .audio_signal import AudioSignal, STFTParams
from . import constants
from . import efz_utils
from . import play_utils
from . import utils
from . import mixing
from . import masks
__all__ = [
'AudioSignal',
'STFTParams',
'constants',
'efz_utils',
'play_utils',
'utils',
'mixing'
'masks',
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: S.H.
Version: 0.1
Date: 2015-01-17
Description:
Scan ip:
74.125.131.0/24
74.125.131.99-125
74.125.131.201
Only three format above.
Read ip form a ip.txt, and scan all port(or a list port).
"""
import os
import io
import socket
fileOpen = open("ip.txt", 'r')
fileTemp = open("temp.txt", 'a')
for line in fileOpen.readlines():
if line.find("-") != -1:
list = line[:line.index("-")]
ip = [int(a) for a in list.split(".")]
b = int(line[line.index("-")+1:])
for i in range(ip[3], b+1) | :
fileTemp.write(str(ip[0])+"."+str(ip[1])+"."+str(ip[2])+"."+str(i)+"\n")
elif line.find("/") != -1:
list = line[:line.index("/")]
ip = [int(a) for a in list.split(".")]
for i in range(256):
fileTemp.write(str(ip[0])+"."+str(ip[1])+"."+str(ip[2])+"."+str(i)+"\n")
else:
fileTemp.write(line)
fileTemp.close()
fileOpen.close()
# print("process is here.")
f = open("temp.txt", 'r')
print | ("===Scan Staring===")
for line in f.readlines():
hostIP = socket.gethostbyname(line)
# print(hostIP)
# for port in range(65535):
portList = [80, 8080]
for port in portList:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((hostIP, port))
if result == 0:
print("Port {} is OPEN on:\t\t\t {}".format(port, hostIP))
else:
print("Port {} is NOT open on {}".format(port, hostIP))
sock.close()
f.close()
os.remove("temp.txt")
print("===Scan Complement===")
|
ting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : boolean
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
location_ : array-like, shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EmpiricalCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = EmpiricalCovariance().fit(X)
>>> cov.covariance_
array([[0.7569..., 0.2818...],
[0.2818..., 0.3928...]])
>>> cov.location_
array([0.0622..., 0.0193...])
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
| the data used in fit (including centering).
y
not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_cov | ariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# fina |
"""
LsVarRun - command ``ls -lnL /var/run``
=======================================
The ``ls -lnL /var/run`` command provides information for the listing of the
``/var/run`` directory.
Sample input is shown in the Examples. See ``FileListing`` class for
additional information.
Sample directory list::
total 20
drwx--x---. 2 0 984 40 May 15 09:29 openvpn
drwxr-xr-x. 2 0 0 40 May 15 09:30 plymouth
drwxr-xr-x. 2 0 0 40 May 15 09:29 ppp
drwxr-xr-x. 2 75 75 40 May 15 09:29 radvd
-rw-r--r--. 1 0 0 5 May 15 09:30 rhnsd.pid
drwxr-xr-x. 2 0 0 60 May 30 09:31 rhsm
drwx------. 2 32 32 40 May 15 09:29 rpcbind
-r--r--r--. 1 0 0 0 May 17 16:26 rpcbind.lock
|
Examples:
>>> "rhnsd.pid" in ls_var_run
False
>>> "/var/run" in ls_var_run
True
>>> ls_var_run.dir_entry('/var/run', 'openvpn')['type']
'd'
"""
from insights.specs import Specs
from .. import FileListing
from .. import parser
@parser(Specs.ls_var_run)
| class LsVarRun(FileListing):
"""Parses output of ``ls -lnL /var/run`` command."""
pass
|
e.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
DEPRECATED = b" (D)"
pp = PrettyPrinter()
display = Display()
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
try:
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r':ref:`module_docs/' + r"\1 <\1>" + r"`", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
except Exception as e:
raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e)))
return t
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = html_escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
def write_data(text, output_dir, outputname, module=None):
''' dumps module output to a file or the screen, as requested '''
if output_dir is not None:
if module:
outputname = outputname % module
if not os.path.exists(output_dir):
os.makedirs(output_dir)
fname = os.path.join(output_dir, outputname)
fname = fname.replace(".py", "")
with open(fname, 'wb') as f:
f.write(to_bytes(text))
else:
print(text)
def get_plugin_info(module_dir, limit_to=None, verbose=False):
'''
Returns information about plugins and the categories that they belong to
:arg module_dir: file system path to the top of the plugin directory
:kwarg limit_to: If given, this is a list of plugin names to
generate information for. All other plugins will be ignored.
:returns: Tuple of two dicts containing module_info, categories, and
aliases and a set listing deprecated modules:
:module_info: mapping of module names to information about them. The fields of the dict are:
:path: filesystem path to the module
:deprecated: boolean. True means the module is deprecated otherwise not.
:aliases: set of aliases to this module name
:metadata: The modules metadata (as recorded in the module)
:doc: The documentation structure for the module
:examples: The module's examples
:returndocs: The module's returndocs
:categories: maps category names to a dict. The dict contains at
least one key, '_modules' which contains a list of module names in
that category. Any other keys in the dict are subcategories with
the same structure.
'''
categories = dict()
module_info | = defaultdict(dict)
# * windows powershell modules have documentation stubs in python docstring
# format (they are not executed) so skip the ps1 format files
# * One glob level for every module level that we're g | oing to traverse
files = (
glob.glob("%s/*.py" % module_dir) +
glob.glob("%s/*/*.py" % module_dir) +
glob.glob("%s/*/*/*.py" % module_dir) +
glob.glob("%s/*/*/*/*.py" % module_dir)
)
for module_path in files:
# Do not list __init__.py files
if module_path.endswith('__init__.py'):
continue
# Do not list blacklisted modules
module = os.path.splitext(os.path.basename(module_path))[0]
if module in plugin_docs.BLACKLIST['MODULE'] or module == 'base':
continue
# If requested, limit module documentation building only to passed-in
# modules.
if limit_to is not None and module.lower() not in limit_to:
continue
deprecated = False
if module.startswith("_"):
if os.path.islink(module_path):
# Handle aliases
source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0]
module = module.replace("_", "", 1)
aliases = module_info[source].get('aliases', set())
aliases.add(module)
# In case we just created this via get()'s fallback
module_info[source]['aliases'] = aliases
continue
else:
# Handle deprecations
module = module.replace("_", "", 1)
deprecated = True
#
# Regular module to process
#
category = categories
# Start at the second directory because we don't want the "vendor"
mod_path_only = os.path.dirname(module_path[len(module_dir):])
module_categories = []
# build up the categories that this module belongs to
for new_cat in mod_path_only.split('/')[1:]:
if new_cat not in category:
category[new_cat] = dict()
category[new_cat]['_modules'] = []
module_categories.append(new_cat)
category = category[new_cat]
category['_modules'].append(module)
# the category we will use in links (so list_of_all_plugins can point to plugins/action_plugins/*'
if module_categories:
primary_category = module_categories[0]
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples, returndocs, metadata = plugin_docs.get_docstring(module_path, verbose=verbose)
# save all the information
module_info[module] = {'path': module_path,
'deprecated': deprecated,
'aliases': set(),
'metadata': metadata,
'doc': doc,
'examples': examples,
'returndocs': returndocs,
'categories': module_categories,
'primary_category': primary_category,
}
# keep module tests out of becoming module docs
if 'test' in categories:
del categories['test']
return module_info, categories
def generate_parser():
''' generate an optparse parser '''
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options] arg1 arg2',
description='Generate module documentation from metadata',
)
p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
p.add_option("-P", "--plugin-type", action="store", dest="plugin_type", default='module', help="The type of plugin (module, lookup, etc)")
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
p.add_option("-l", "--limit-to-modules", '--limit-to', action="store", dest="limit_to", default=None,
help="Limit building module documentation to comma-separated list of plugins. Specify non-existing plugin name for no plugins.")
p.add_option('-V', action='version', help='Show version number and exit')
p.add_option('-v', '--verbose', dest='verbosity', defa |
"""tuple sub-class which holds weak references to objects"""
import weakref
class WeakTuple( tuple ):
"""tuple sub-class holding weakrefs to items
The weak reference tuple is intended to allow you
to store references to a list of objects without
needing to manage weak references directly.
For the most part, the WeakTuple operates just
like a tuple object, in that it allows for all
of the standard tuple operations. The difference
is that the WeakTuple class only stores weak
references to its items. As a result, adding
an object to the tuple does not necessarily mean
that it will still be there later on during
execution (if the referent has been garbage
collected).
Because WeakTuple's are static (their membership
doesn't change), they will raise ReferenceError
when a sub-item is missing rather than skipping
missing items as does the WeakList. This can
occur for basically _any_ use of the tuple.
"""
def __init__( self, sequence=() ):
"""Initialize the tuple
The WeakTuple will store weak references to objects
within the sequence.
"""
super( WeakTuple, self).__init__( map( self.wrap, sequence))
def valid( self ):
"""Explicit validity check for the tuple
Checks whether all references can be resolved,
basically just sees whether calling list(self)
raises a ReferenceError
"""
try:
list( self )
return 1
except weakref.ReferenceError:
return 0
def wrap( self, item ):
"""Wrap an individual item in a weak-reference
If the item is already a weak reference, we store
a reference to the original item. We use approximately
the same weak reference callback mechanism as the
standard weakref.WeakKeyDictionary object.
"""
if isinstance( item, weakref.ReferenceType ):
item = item()
return weakref.ref( item )
def unwrap( self, item ):
"""Unwrap an individual item
This is a fairly trivial operation at the moment,
it merely calls the item with no arguments and
returns the result.
"""
ref = item()
if ref is None:
raise weakref.ReferenceError( """%s instance no longer valid (item %s has been collected)"""%( self.__class__.__name__, item))
return ref
def __iter__( self ):
"""Iterate over the tuple, yielding strong references"""
index = 0
while index < len(self):
yield self[index]
index += 1
def __getitem__( self, index ):
"""Get the item at the given index"""
return self.unwrap(super (WeakTuple,self).__getitem__( index ))
def __getslice__( self, start, stop ):
"""Get the items in the range start to stop"""
return map(
self.unwrap,
super (WeakTuple,self).__getslice__( start, stop)
)
def __contains__( self, item ):
"""Return boolean indicating whether the item is in the tuple"""
for node in self:
if item is node:
return 1
return 0
def count( self, item ):
"""Return integer count of instances of item in tuple"""
count = 0
for node in self:
if item is node:
count += 1
return count
def index( self, item ):
"""Return integer index of item in tuple"""
count = 0
for node in self:
if item is node:
return count
count += 1
return -1
def __add__(self, other):
"""Return a new path with other as tail"""
return tuple(self) + other
def __eq__( self, sequence ):
"""Compare the tuple to another (==)"""
return list(self) == sequence
def __ge__( self, sequence ):
"""Compare the tuple to another (>=)"""
return list(self) >= sequence
d | ef __gt__( self, sequence ):
"""Compare the tuple to another (>)"""
return list(self) > sequence
def __le__( self, sequence ):
"""Compare the tuple to another (<=)"""
return list(self) <= sequence
def __lt__( self, sequence ):
"""Compare the tuple to another (<)"""
return l | ist(self) < sequence
def __ne__( self, sequence ):
"""Compare the tuple to another (!=)"""
return list(self) != sequence
def __repr__( self ):
"""Return a code-like representation of the weak tuple"""
return """%s( %s )"""%( self.__class__.__name__, super(WeakTuple,self).__repr__())
|
list:
fields_to_strip += self._exclude_attributes_by_policy(
request.context, obj_list[0])
collection = {self._collection:
[self._filter_attributes(
request.context, obj,
fields_to_strip=fields_to_strip)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
# Synchronize usage trackers, if needed
resource_registry.resync_resource(
request.context, self._resource, request.context.tenant_id)
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
return obj
@db_api.retry_db_errors
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return self._items(request, True, parent_id)
@db_api.retry_db_errors
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except oslo_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
fields_to_strip = self._exclude_attributes_by_policy(
request.context, item)
objs.append(self._filter_attributes(
request.context,
obj_creator(request.context, **kwargs),
fields_to_strip=fields_to_strip))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception:
with excutils.save_and_reraise_exception():
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id}
if parent_id else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the
# exception
LOG.exception(_LE("Unable to undo add for "
"%(resource)s %(id)s"),
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
def create(self, request, body=None, **kwargs):
self._notifier.info(request.context,
self._resource + '.create.start',
body)
return self._create(request, body, **kwargs)
@db_api.retry_db_errors
def _create(self, request, body, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
body = Controller.prepare_request_body(request.context,
body, True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
else:
items = [body]
# Ensure policy engine is initialized
policy.init()
# Store requested resource amounts grouping them by tenant
# This won't work with multiple resources. However because of the
# current structure of this controller there will hardly be more than
# one resource for which reservations are being made
request_deltas = collections.defaultdict(int)
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
policy.enforce(request.context,
action,
item[self._resource],
pluralized=self._collection)
if 'tenant_id' not in item[self._resource]:
# no tenant_id - no quota check
continue
tenant_id = item[self._resource]['tenant_id']
request_deltas[tenant_id] += 1
# Quota enforcement
reservations = []
try:
for (tenant, delta) in request_deltas.items():
reservation = quota.QUOTAS.make_reservation(
request.context,
tenant,
{self._resource: delta},
self._plugin)
reserva | tions.append(reservation)
except n_exc.QuotaResourceUnknown as e:
| # We don't want to quota this resource
LOG.debug(e)
def notify(create_result):
# Ensure usage trackers for all resources affected by this API
# operation are marked as dirty
with request.context.session.begin():
# Commit the reservation(s)
for reservation in reservations:
quota.QUOTAS.commit_reservation(
request.context, reservation.reservation_id)
resource_registry.set_resources_dirty(request.context)
notifier_method = self._resource + '.create.end'
self._notifier.info(request.context,
notifier_method,
create_result)
registry.notify(self._resource, e |
# -*-coding: utf-8 -*-
import colander
from . import (
SelectInteger,
ResourceSchema,
BaseForm,
BaseSearchForm,
)
from ..resources.leads_offers import LeadsOffersResource
from ..models.lead_offer import LeadOffer
from ..models.currency import Currency
from ..models.supplier import Supplier
from ..models.service import Service
from ..lib.qb.leads_offers import LeadsOffersQueryBuilder
from ..lib.utils.security_utils import get_auth_employee
class _LeadOfferSchema(ResourceSchema):
service_id = colander.SchemaNode(
SelectInteger(Service),
)
supplier_id = colander.SchemaNode(
SelectInteger(Supplier),
)
currency_id = colander.SchemaNode(
SelectInteger(Currency),
)
price = colander.SchemaNode(
colander.Money(),
)
status = colander.SchemaNode(
colander.String(),
)
descr = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=255),
)
class LeadOfferForm(BaseForm):
_schema = _LeadOfferSchema
def submit(self, lead_offer=None):
if not lead_offer:
lead_offer = LeadOffer(
resource=LeadsOffersResource.crea | te_resource(
get_auth_employee(self.request)
)
)
lead_offer.service_id = self._controls.get('service_id')
lead_offer.currency_id = self._controls.get('currency_id')
lead | _offer.supplier_id = self._controls.get('supplier_id')
lead_offer.price = self._controls.get('price')
lead_offer.status = self._controls.get('status')
lead_offer.descr = self._controls.get('descr')
return lead_offer
class LeadOfferSearchForm(BaseSearchForm):
_qb = LeadsOffersQueryBuilder
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permission | s and
# limitations under the License.
from __future__ import absolute_import
from behave import given
from behave import when
from steps.util import create_consumer_group
from steps.util import create_random_group_id
from steps.util import create_random_topic
from steps.util import initialize_kafka_offsets_topi | c
from steps.util import produce_example_msg
PRODUCED_MSG_COUNT = 82
CONSUMED_MSG_COUNT = 39
@given(u'we have an existing kafka cluster with a topic')
def step_impl1(context):
context.topic = create_random_topic(1, 1)
@given(u'we have a kafka consumer group')
def step_impl2(context):
context.group = create_random_group_id()
context.client = create_consumer_group(
context.topic,
context.group,
)
@when(u'we produce some number of messages into the topic')
def step_impl3(context):
produce_example_msg(context.topic, num_messages=PRODUCED_MSG_COUNT)
context.msgs_produced = PRODUCED_MSG_COUNT
@when(u'we consume some number of messages from the topic')
def step_impl4(context):
context.group = create_random_group_id()
context.client = create_consumer_group(
context.topic,
context.group,
num_messages=CONSUMED_MSG_COUNT,
)
context.msgs_consumed = CONSUMED_MSG_COUNT
@given(u'we have initialized kafka offsets storage')
def step_impl5(context):
initialize_kafka_offsets_topic()
@given(u'we have an existing kafka cluster')
def step_impl6(context):
pass
@given(u'we have an existing kafka cluster with multiple topics')
def step_impl7(context):
context.topic = []
context.topic.append(create_random_topic(1, 1, 'abcde'))
context.topic.append(create_random_topic(1, 1, 'abcd'))
|
'''This gile will take arguments from the command line, if none are found it
will look for a .bot file, if that isn't found it will promt the user for auth
tokens :- with this information the masterbot will connect to its own twitch chan | nel and await a !connect command'''
#Author MrYevral
#check for .bot file in current directory
import os
import sys
def getBotInfo():
if len(sys.argv) > 2:
if sys.argv[2] == '-c':
newBotFile()
else:
print "incorrect use of | flags please use -c for creating a bot"
'''for file in os.listdir("."):
if file.endswith(".bot"):
print file'''
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example deactivates all active placements. To determine which
placements exist, run get_all_placements.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201306')
# Create query.
values = [{
'key': 'status',
'value': {
'xsi_type': 'TextValue',
'value': 'ACTIVE'
}
}]
query = 'WHERE status = :status'
# Get placements by statement.
placements = DfpUtils.GetAllEntitiesByStatementWithService(
placement_service, query=query, bind_vars=values)
for placement in placements:
pr | int ('Placement with id \'%s\', name \'%s\', and status \'%s\' will be '
'deactivated.' % (placement['id'], placement['name'],
placement['status']))
print 'Number of placements to be deactivated: %s' % len(placements)
# Perform action.
result = placement_service.PerformPlacementAction(
{'type': 'DeactivatePlacements'}, {'query': query, 'values': values})[0]
# Display results.
if result and int(result['numChanges']) > 0:
print 'Number of placements deactivate | d: %s' % result['numChanges']
else:
print 'No placements were deactivated.'
|
# -*- coding: utf-8 *-*
from pyglet.window import key
from pyglet import clock
from . import util, physicalobject
from . import resources
class Ship(physicalobject.PhysicalObject):
"""A class for the player"""
def __init__(self, thrust_image=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set some easy-to-tweak constants
# play values
self.rotate_speed = 170.0
self.bullet_speed = 500.0
self.thrust_acc = 500
self.friction = 0.95
self.bullet_duration = 0.6
self.thrust = False
self.thrust_image = thrust_image
self.normal_image = self.image
self.bullets = set() # FIXME: bullet by OOT
def on_key_press(self, symbol, modifiers):
if symbol == key.SPACE:
self.shoot()
elif symbol == key.LEFT:
self.turn(-1)
elif symbol == key.RIGHT:
self.turn(1)
elif symbol == key.UP:
self.set_thrust(True)
def on_key_release(self, symbol, modifiers):
if symbol in (key.LEFT, key.RIGHT):
self.turn(0)
elif symbol == key.UP:
self.set_thrust(False)
def update(self, dt):
super().update(dt)
if self.thrust and self.thrust_image:
self.image = self.thrust_image
else:
self.image = self.normal_image
# update velocity
if self.thrust:
acc = util.angle_to_vector(self.rotation)
for i in (0,1):
self.vel[i] += acc[i] * self.thrust_acc * dt
# add friction
for i in (0,1):
self.vel[i] *= (1 - self.friction * dt)
for bullet in set(self.bullets):
if bullet.update(dt):
self.bullets.remove(bullet)
return False
def set_thrust(self, on):
self.thrust = on
if on:
resources.thrust_sound.seek(0)
resources.thrust_sound.play()
else:
resources.thrust_sound.pause()
def turn(self, clockwise):
self.rotation_speed = clockwise * self.rotate_speed
def shoot(self):
resources.bullet_sound.play()
forward = util.angle_to_vector(self.rotation)
bullet_pos = [self.x + self.radius * forward[0], self.y + self.radius * forward[1]]
bullet_vel = [self.vel[0] + self.bullet_speed * forward[0], self.vel[1] + self.bullet_speed * forward[1]]
bullet = physicalobject.PhysicalObject(lifespan=self.bullet_duration, vel=bullet_vel, x=bullet_pos[0], y=bullet_pos[1],
| img=resources.shot_image, batch=self.batch, group=self.group, screensize=self.screensize)
self.bullets.add(bullet)
def destroy(self):
# check invulnerability
if self.opacity != 255:
return
explosion = super().destroy()
| self.rotation = -90
self.x = self.screensize[0] / 2
self.y = self.screensize[1] / 2
self.vel = [0, 0]
self.set_thrust(False)
self.visible = True
return explosion
def normal_mode(self, dt):
self.opacity = 255
def invulnerable(self, time):
# be invulnerable for a brief time
self.opacity = 128
clock.schedule_once(self.normal_mode, time)
|
from django.contrib import admin
from freemix.exhibit import models
class CanvasAdmin(admin.ModelAdmin):
list_display = ('title', 'description')
search_fields = ('title', 'description',)
admin.site.register(models.Canvas, CanvasAdmin)
class ExhibitAdmin(admin.ModelAdmin):
list_display = ('slug', 'owner',)
search_fields = ('slug', 'title', 'description', 'ow | ner__username')
admin.site.register(models.Exhibit, ExhibitAdmin)
class ThemeA | dmin(admin.ModelAdmin):
list_display = ('title', 'description')
search_fields = ('title', 'description',)
admin.site.register(models.Theme, ThemeAdmin)
|
import os
require | ments = ["numpy", "scipy", "pandas",
"matplotlib", "peakutils", "uncertainties",
"pyqtgraph"]
for package in requirements:
os.system("pip install " + package | )
|
will compare with.
| If a string, it can be a single command,
| multiple commands separated by commas, or
| a filepath location of a file with multiple
| commands, each on its own line.
@type commands: str or list
@param confirmed: integer value of the number of **seconds** to
| confirm the commit for, if requested.
@type confirmed: int
@param comment: string that the user wants to comment the commit
| with. Will show up in the 'show system commit' log.
@type comment: str
@param at_time: string designating the time at which the commit
| should happen. Can be in one of two Junos approved
| formats.
@type comment: str
@param synchronize: boolean set to true if desiring a commit
| synchronize operation.
@type synchronize: bool
@param req_format: string to specify the response format. Accepts
| either 'text' or 'xml'
@type req_format: str
@returns: The reply from the device.
@rtype: str
"""
# ncclient doesn't support a truly blank commit, so if nothing is
# passed, use 'annotate system' to make a blank commit
if not commands:
commands = 'annotate system ""'
clean_cmds = []
for cmd in clean_lines(commands):
clean_cmds.append(cmd)
# try to lock the candidate config so we can make changes.
self.lock()
self._session.load_configuration(action='set', config=commands)
results = ""
# confirmed and commit at are mutually exclusive. commit confirm
# takes precedence.
if confirmed:
results = self._session.commit(confirmed=True,
timeout=str(confirmed),
comment=comment,
synchronize=synchronize)
else:
results = self._session.commit(comment=comment, at_time=at_time,
synchronize=synchronize)
self.unlock()
if results:
if req_format == 'xml':
return results
# commit() DOES NOT return a parse-able xml tree, so we
# convert it to an ElementTree xml tree.
results = ET.fromstring(results.tostring)
out = ''
for i in results.iter():
# the success message is just a tag, so we need to get it
# specifically.
if i.tag == 'commit-check-success':
out += 'configuration check succeeds\n'
elif i.tag == 'commit-success':
out += 'commit complete\n'
elif i.tag == 'ok':
out += 'commit complete\n'
# this is for normal output with a tag and inner text, it will
# strip the inner text and add it to the output.
elif i.text is not None:
if i.text.strip() + '\n' != '\n':
out += i.text.strip() + '\n'
# this is for elements that don't have inner text,
# it will add the tag to the output.
elif i.text is None:
if i.tag + '\n' != '\n':
out += i.tag + '\n'
return out
return False
@check_instance
def commit_check(self, commands="", req_format="text"):
""" Execute a commit check operation.
Purpose: This method will take in string of multiple commands,
| and perform and 'commit check' on the device to ensure
| the commands are syntactically correct. The response can
| be formatted as text or as xml.
@param commands: A string, filepath, or list of multiple commands
| that the device will compare with.
@type commands: str or list
@param req_format: The desired format of the response, defaults to
| 'text', but also accepts 'xml'
@type req_format: str
@returns: The reply from the device.
@rtype: str
"""
if not commands:
raise InvalidCommandError('No commands specified')
clean_cmds = []
for cmd in clean_lines(commands):
clean_cmds.append(cmd)
self.lock()
self._session.load_configuration(action='set', config=clean_cmds)
# conn.validate() DOES NOT return a parse-able xml tree, so we
# convert it to an ElementTree xml tree.
results = ET.fromstring(self._session.validate(
source='candidate').tostring)
# release the candidate configuration
self.unlock()
if req_format == "xml":
return ET.tostring(results)
out = ""
# we have to parse the elementTree object, and get the text
# from the xml.
for i in results.iter():
# the success message is just a tag, so we need to get it
# specifically.
if i.tag == 'commit-check-success':
out += 'configuration check succeeds\n'
# this is for normal output with a tag and inner text, it will
# strip the inner text and add it to the output.
elif i.text is not None:
if i.text.strip() + '\n' != '\n':
out += i.text.strip() + '\n'
# this is for elements that don't have inner text, it will add the
# tag to the output.
elif i.text is None:
if i.tag + '\n' != '\n':
out += i.tag + '\n'
return out
@check_instance
def compare_config(self, commands="", req_format="text"):
""" Execute a 'show | compare' against the specified commands.
Purpose: This method will take in string of multiple commands,
| and perform and 'show | compare' on the device to show the
| differences between the active running configuration and
| the changes proposed by the passed commands parameter.
@param commands: A string, filepath, or list of multiple commands
| that the device will compare with.
@type commands: str or list
@param req_format: The desired format of the response, defaults to
| 'text', but also accepts 'xml'
@type req_format: str
@returns: The reply from the device.
@rtype: str
"""
if not commands:
raise InvalidCommandError('No commands specified')
clean_cmds = [cmd for cmd in clean_lines(commands)]
self.lock()
self._session.load_configuration(action='set', config=clean_cmds)
out = self._session.compare_configuration()
self.unlock()
if req_format.lower() == "xml":
return out
return out.xpath(
'configuration-information/configuration-output')[0].text
def connect(self):
""" Establish a connection to the device.
Purpose: This method is used to make a connection to the junos
| device. The internal property conn_type is what
| determines the type of connection we | make to the device.
| - 'paramiko' is used for operational commands (to allow
| pipes in commands)
| - 'scp' is used for copying files
| | - 'shell' is used for to send shell commands
| - 'root' is used when logging into the device as root, and
| wanting to send operational commands
| - 'ncclient' is used for the rest (commit, compare_config,
| commit_check)
@returns: None
@rtype: None
"""
if self.conn_type == 'paramiko':
self._session = paramiko.SSHClient()
# These |
from iliasCorrector import db
def _split_ident(ident):
data = ident.split('_')
matr = int(data[-1])
last = data[0]
first = ' '.join(data[1:-2])
return first, last, matr
class Exercise(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
path = db.Column(db.String(256), unique=True)
submissions = db.relationship('Submission', backref='exercise', lazy='dynamic')
@property
def num_corrected(self):
return len(self.submissions.filter(Submission.grade != None).all())
@property
def num_submissions(self):
return len(self.submissions.all())
@property
def num_to_correct(self):
return len(self.submissions.filter_by(grade=None).all())
def __repr__(self):
return '<Exercise {}>'.format(self.name)
class Submission(db.Model):
id = db.Column(db.Integer, primary_key=True)
grade = db.Column(db.Float)
exercise_id = db.Column(db.Integer, db.ForeignKey('exercise.id'))
student_ident = db.Column(db.String(256))
files = db.relationship('File', backref='submission', lazy='dynamic')
remarks = db.Column(db.Text)
def __repr__(self):
return '<Submission of {} for exercise {}>'.format(self.student_ident,
self.exercise)
@property
def first_name(self):
return _split_ident(self.student_ident)[0]
@property
def last_name(self):
return _split_ident(self.student_ident)[1]
@prope | rty
def student(self):
return '{}, {}'.format(self.last_name, self.first_name)
@property
def matriculation_number(s | elf):
return _split_ident(self.student_ident)[2]
class File(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
path = db.Column(db.String(256))
submission_id = db.Column(db.Integer, db.ForeignKey('submission.id'))
def __repr__(self):
return '<File {}>'.format(self.name)
|
import numpy as np
tansig = lambda n: 2 / (1 + np.exp(-2 * n)) - 1
sigmoid = lambda n: 1 / (1 + np.exp(-n))
hardlim = lambda n: 1 if n >= 0 else 0
purelin = lambda n: n
relu = lambda n: np.fmax(0, | n)
square_error = lambda x, y: np.sum(0.5 * (x - y)**2)
sig_prime = lambda z: sigmoid(z) * (1 - sigmoid(z))
relu_prime = lambda z: relu(z) * (1 - relu(z))
softmax = lambda n: n | p.exp(n)/np.sum(np.exp(n))
softmax_prime = lambda n: softmax(n) * (1 - softmax(n))
cross_entropy = lambda x, y: -np.dot(x, np.log(y))
|
imp | ort pytest
def test_zero_division():
with pytest.raises(ZeroDivisionError):
1 / 0
def test_recursion_depth():
with pytest.raises(RuntimeError) as excinfo:
def f():
f()
f()
assert 'maximum recursion depth exceeded' in str( | excinfo.value)
|
from __future__ import unicode_literals
from django.db import models
from db.models import Bin
class C | ollectionEntry(models.Model):
bin_obj = models.ForeignKey(Bin, related_name='requested_bins')
| fullness = models.IntegerField()
date_added = models.DateTimeField(auto_now_add=True)
|
# -*- coding: utf-8 -*-
import unittest
from cwr.grammar.factory.config import rule_options
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestConfigOptions(unittest.TestCase):
def setUp(self):
self._rule = rule_options
def test_zero_options(self):
line = '()'
result = self._rule.parseString(line)
self.assertEqual(1, len(result))
| self.assertEqual('', result[0])
def test_one_options(self):
line = '(option2)'
result = self._rule.parseString(line)
self.assertEqual(1, len(result))
self.assertEqual('option2', result[0])
def test_two_options(self):
line = '(option1, option2)'
result = self._rule.parseString(line)
self.assertEqual(2, len(result))
| self.assertEqual('option1', result[0])
self.assertEqual('option2', result[1])
|
"""
Operations:
P - login
G - list - list the dir's content
G - read - reads a file
G - info - infos about a file
P - write - writes a file
P - mkdir - makes a dir
P - copy - to=...
P - move - to=...
P - delete - DELETE
P - logout
"""
import canister
import bottle
import os.path
import json
import sys
import shutil
import time
# a callback that can be replaced to determine whether a resource is allowed access or not
# by default, read access is authorized and write access forbidden
def authorized(write, path):
#return not write
return True
root = os.getcwd()
app = bottle.Bottle()
app.install(canister.Canister())
def fullpath(path):
global root
fp = os.path.join(root, path.strip('/'))
fp = os.path.abspath(fp)
if not fp.startswith(root):
raise Exception('Path forbidden: ' + path)
else:
return fp
@app.get('<path:path>')
def get(path='', hidden=False, jstree=False):
path = path.strip('/')
if not authorized(False, path):
return bottle.Response(status=401) # TODO
raise Exception('Unauthorized path: ' + path)
global root
fpath = fullpath(path)
print(fpath)
if os.path.isfile(fpath):
return bottle.static_file(path, root=root)
elif os.path.isdir(fpath):
| files = os.listdir(fpath)
listing = []
for name in files:
if not hidden and name[0] == '.':
continue
p = os.path.join(fpath, name)
item = {
'name': name,
'type': 'dir' if os.path.isdir(p) else 'file',
'is_directory': os.path.isdir(p),
'size': os.path.getsiz | e(p),
'last_modified': time.ctime(os.path.getmtime(p))
}
listing.append( item )
listing.sort(key=lambda x: x['name'])
bottle.response.content_type = 'application/json'
return json.dumps(listing)
else:
raise Exception('No such path: ' + path)
@app.post('<path:path>')
def post(path, cmd, to=None):
if not authorized(True, path):
return bottle.Response(status=401)
raise Exception('Unauthorized path: ' + path)
# TODO: exceptions might reveal real paths
fpath = fullpath(path)
app.log.debug('Full path: %s' % fpath)
cmd = cmd.lower()
if cmd == 'set':
content = bottle.request.body.readall()
file = open(fpath, mode='w')
file.write(content)
file.close()
elif cmd == 'upload':
for name, up in bottle.request.files.items():
file = open(os.path.join(fpath, name), mode='w')
file.write(up)
file.close()
elif cmd == 'mkdir':
# os.mkdir
# build dirs recursively
os.makedirs(fpath, exist_ok=True)
elif cmd == 'move':
if not to:
raise Exception('Missing destination ("to=...")')
fto = fullpath(to)
shutil.move(fpath, fto)
elif cmd == 'rename':
if not to:
raise Exception('Missing destination ("to=...")')
os.rename(fpath, to)
elif cmd == 'copy':
if not to:
raise Exception('Missing destination ("to=...")')
fto = fullpath(to)
shutil.copy(fpath, fto)
else:
raise Exception('Unknown command: %s' % cmd)
@app.delete('<path:path>')
def delete(path):
if not authorized(True, path):
raise Exception('Unauthorized path: ' + path) # TODO: return response instead
fpath = fullpath(path)
shutil.rmtree(fpath)
if __name__ == '__main__':
print(sys.argv)
args = sys.argv
#if len(args) != 2:
# print('Usage: %s <path-to-serve>' % os.path.basename(args[0]))
#root = os.path.abspath(args[1])
#root = os.getcwd()
import webfs
app.mount('@admin', webfs.app)
print('Serving: ' + root)
app.run(debug=True, host='0.0.0.0')
|
"""
-------------------------------------------------------------------------
AIOpening - special.py
useful functions
created: 2017/09/01 in PyCharm
(c) 2017 Sven - ducandu GmbH
-------------------------------------------------------------------------
"""
import numpy as np
import tensorflow as tf
def weighted_sample(weights, objects):
"""
Return a random item from objects, with the weighting defined by weights (which must sum to 1).
"""
| # An array of the weights, cumulatively summed.
cs = np.cumsum(weights)
# Find the index of the first weight over a random value.
idx = sum(cs < np.random.rand())
return objects[min(idx, len(objects) - 1)]
def to_one_hot(ind, dim):
ret = np.zeros(dim)
ret[ind] = 1
return ret
def to_one_hot_batch(inds, dim):
ret = np.zeros((len(inds), dim))
ret[np.arange(len(inds)), inds] = 1
return re | t
def from_one_hot(v):
return np.nonzero(v)[0][0]
def from_one_hot_batch(v):
if len(v) == 0:
return []
return np.nonzero(v)[1]
def new_tensor(name, n_dim, dtype):
return tf.placeholder(dtype=dtype, shape=[None] * n_dim, name=name)
|
#
# LMirror is Copyright (C) 2010 Robert Collins <robertc@robertcollins.net>
#
# LMirror is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the Li | cense, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the | implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# In the LMirror source tree the file COPYING.txt contains the GNU General Public
# License version 3.
#
"""Tests for the commands command."""
from l_mirror.commands import commands
from l_mirror.ui.model import UI
from l_mirror.tests import ResourcedTestCase
class TestCommandCommands(ResourcedTestCase):
def get_test_ui_and_cmd(self):
ui = UI()
cmd = commands.commands(ui)
ui.set_command(cmd)
return ui, cmd
def test_shows_a_table_of_commands(self):
ui, cmd = self.get_test_ui_and_cmd()
cmd.execute()
self.assertEqual(1, len(ui.outputs))
self.assertEqual('table', ui.outputs[0][0])
self.assertEqual(('command', 'description'), ui.outputs[0][1][0])
command_names = [row[0] for row in ui.outputs[0][1]]
summaries = [row[1] for row in ui.outputs[0][1]]
self.assertTrue('help' in command_names)
self.assertTrue(
'Get help on a command.' in summaries)
|
rom tensorflow.python.ops.rnn_cell import DropoutWrapper, RNNCell, LSTMStateTuple
from my.tensorflow import exp_mask, flatten
from my.tensorflow.nn import linear, softsel, double_linear_logits
class SwitchableDropoutWrapper(DropoutWrapper):
def __init__(self, cell, is_train, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
super(SwitchableDropoutWrapper, self).__init__(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob,
seed=seed)
self.is_train = is_train
def __call__(self, inputs, state, scope=None):
outputs_do, new_state_do = super(SwitchableDropoutWrapper, self).__call__(inputs, state, scope=scope)
tf.get_variable_scope().reuse_variables()
outputs, new_state = self._cell(inputs, state, scope)
outputs = tf.cond(self.is_train, lambda: outputs_do, lambda: outputs)
if isinstance(state, tuple):
new_state = state.__class__(*[tf.cond(self.is_train, lambda: new_state_do_i, lambda: new_state_i)
for new_state_do_i, new_state_i in zip(new_state_do, new_state)])
else:
new_state = tf.cond(self.is_train, lambda: new_state_do, lambda: new_state)
return outputs, | new_state
class TreeRNNCell(RNNCell):
def __init__(self, cell, input_size, reduce_func):
self._cell = cell
self._input_size = input_size
self._reduce_func = reduce_fu | nc
def __call__(self, inputs, state, scope=None):
"""
:param inputs: [N*B, I + B]
:param state: [N*B, d]
:param scope:
:return: [N*B, d]
"""
with tf.variable_scope(scope or self.__class__.__name__):
d = self.state_size
x = tf.slice(inputs, [0, 0], [-1, self._input_size]) # [N*B, I]
mask = tf.slice(inputs, [0, self._input_size], [-1, -1]) # [N*B, B]
B = tf.shape(mask)[1]
prev_state = tf.expand_dims(tf.reshape(state, [-1, B, d]), 1) # [N, B, d] -> [N, 1, B, d]
mask = tf.tile(tf.expand_dims(tf.reshape(mask, [-1, B, B]), -1), [1, 1, 1, d]) # [N, B, B, d]
# prev_state = self._reduce_func(tf.tile(prev_state, [1, B, 1, 1]), 2)
prev_state = self._reduce_func(exp_mask(prev_state, mask), 2) # [N, B, d]
prev_state = tf.reshape(prev_state, [-1, d]) # [N*B, d]
return self._cell(x, prev_state)
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
class NoOpCell(RNNCell):
def __init__(self, num_units):
self._num_units = num_units
def __call__(self, inputs, state, scope=None):
return state, state
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
class MatchCell(RNNCell):
def __init__(self, cell, input_size, q_len):
self._cell = cell
self._input_size = input_size
# FIXME : This won't be needed with good shape guessing
self._q_len = q_len
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""
:param inputs: [N, d + JQ + JQ * d]
:param state: [N, d]
:param scope:
:return:
"""
with tf.variable_scope(scope or self.__class__.__name__):
c_prev, h_prev = state
x = tf.slice(inputs, [0, 0], [-1, self._input_size])
q_mask = tf.slice(inputs, [0, self._input_size], [-1, self._q_len]) # [N, JQ]
qs = tf.slice(inputs, [0, self._input_size + self._q_len], [-1, -1])
qs = tf.reshape(qs, [-1, self._q_len, self._input_size]) # [N, JQ, d]
x_tiled = tf.tile(tf.expand_dims(x, 1), [1, self._q_len, 1]) # [N, JQ, d]
h_prev_tiled = tf.tile(tf.expand_dims(h_prev, 1), [1, self._q_len, 1]) # [N, JQ, d]
f = tf.tanh(linear([qs, x_tiled, h_prev_tiled], self._input_size, True, scope='f')) # [N, JQ, d]
a = tf.nn.softmax(exp_mask(linear(f, 1, True, squeeze=True, scope='a'), q_mask)) # [N, JQ]
q = tf.reduce_sum(qs * tf.expand_dims(a, -1), 1)
z = tf.concat(1, [x, q]) # [N, 2d]
return self._cell(z, state)
class AttentionCell(RNNCell):
def __init__(self, cell, memory, mask=None, controller=None, mapper=None, input_keep_prob=1.0, is_train=None):
"""
Early fusion attention cell: uses the (inputs, state) to control the current attention.
:param cell:
:param memory: [N, M, m]
:param mask:
:param controller: (inputs, prev_state, memory) -> memory_logits
"""
self._cell = cell
self._memory = memory
self._mask = mask
self._flat_memory = flatten(memory, 2)
self._flat_mask = flatten(mask, 1)
if controller is None:
controller = AttentionCell.get_linear_controller(True, is_train=is_train)
self._controller = controller
if mapper is None:
mapper = AttentionCell.get_concat_mapper()
elif mapper == 'sim':
mapper = AttentionCell.get_sim_mapper()
self._mapper = mapper
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or "AttentionCell"):
memory_logits = self._controller(inputs, state, self._flat_memory)
sel_mem = softsel(self._flat_memory, memory_logits, mask=self._flat_mask) # [N, m]
new_inputs, new_state = self._mapper(inputs, state, sel_mem)
return self._cell(new_inputs, state)
@staticmethod
def get_double_linear_controller(size, bias, input_keep_prob=1.0, is_train=None):
def double_linear_controller(inputs, state, memory):
"""
:param inputs: [N, i]
:param state: [N, d]
:param memory: [N, M, m]
:return: [N, M]
"""
rank = len(memory.get_shape())
_memory_size = tf.shape(memory)[rank-2]
tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1])
if isinstance(state, tuple):
tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1])
for each in state]
else:
tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])]
# [N, M, d]
in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory])
out = double_linear_logits(in_, size, bias, input_keep_prob=input_keep_prob,
is_train=is_train)
return out
return double_linear_controller
@staticmethod
def get_linear_controller(bias, input_keep_prob=1.0, is_train=None):
def linear_controller(inputs, state, memory):
rank = len(memory.get_shape())
_memory_size = tf.shape(memory)[rank-2]
tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1])
if isinstance(state, tuple):
tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1])
for each in state]
else:
tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])]
# [N, M, d]
in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory])
out = linear(in_, 1, bias, squeeze=True, input_keep_prob=input_keep_prob, is_train=is_train)
return out
return linear_controll |
pache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an fake image service"""
import copy
import datetime
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
LOG = logging.getLogger('nova.image.fake')
FLAGS = flags.FLAGS
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
# NOTE(bcwaldon): was image '123456'
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64'}}
# NOTE(bcwaldon): was image 'fake'
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '2'
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '1'
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '3'
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
# NOTE(sirp): was image '6'
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
# NOTE(sirp): was image '7'
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
super(_FakeImageService, self).__init__()
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def index(self, context, **kwargs):
"""Returns list of images."""
retval = | []
for img in self.images.values():
retval += [dict([(k, v) for k, v in img.iteritems()
if k in ['id', 'name']])]
| return retval
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def get(self, context, image_id, data):
metadata = self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
return metadata
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def show_by_name(self, context, name):
"""Returns a dict containing image data for the given name."""
images = copy.deepcopy(self.images.values())
for image in images:
if name == image.get('name'):
return image
raise exception.ImageNotFound(image_id=name)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', utils.gen_uuid()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
def delete(self, context, image_id):
"""Delete the |
"""Internal module for Python 2 backwards compatibility."""
import sys
if sys.version_info[0] < 3:
from urlparse import parse_qs, urlparse
from itertools import imap, izip
from string import letters as ascii_letters
from Queue import Queue
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
iteritems = lambda x: x.iteritems()
iterk | eys = lambda x: x.iterkeys()
itervalues = lambda x: x.itervalues()
nativestr = lambda x: \
x if isinstance(x, str) else x.encode('utf-8', 'replace')
u = lambda x: x.decode()
b = lambda x: x
next = lambda x: x.next()
byte_to_chr = lambda x: x
unichr = unichr
xrange = xrange
basestring = basestring
unicode = unicode
bytes = str
long = long
else:
from urllib.parse import pa | rse_qs, urlparse
from io import BytesIO
from string import ascii_letters
from queue import Queue
iteritems = lambda x: iter(x.items())
iterkeys = lambda x: iter(x.keys())
itervalues = lambda x: iter(x.values())
byte_to_chr = lambda x: chr(x)
nativestr = lambda x: \
x if isinstance(x, str) else x.decode('utf-8', 'replace')
u = lambda x: x
b = lambda x: x.encode('iso-8859-1') if not isinstance(x, bytes) else x
next = next
unichr = chr
imap = map
izip = zip
xrange = range
basestring = str
unicode = str
bytes = bytes
long = int
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import Empty, Full
try: # Python 2.6 - 2.7
from Queue import LifoQueue
except ImportError: # Python 2.5
from Queue import Queue
# From the Python 2.7 lib. Python 2.5 already extracted the core
# methods to aid implementating different queue organisations.
class LifoQueue(Queue):
"Override queue methods to implement a last-in first-out queue."
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
|
import platform
import urllib
import subprocess
from progressbar import ProgressBar
class Downloader(object):
WINDOWS_DOWNLOAD_URL = "http://cache.lego.com/downloads/ldd2.0/installer/setupLDD-PC-4_3_8.exe"
MAC_DOWNLOAD_URL = "http://cache.lego.com/downloads/ldd2.0/installer/setupLDD-MAC-4_3_8.zip"
PB = None
@classmethod
def download_ldd(cls):
if platform.system() == "Darwin":
urllib.urlretrieve(cls.MAC_DOWNLOAD_URL, "ldd.zip", reporthook=cls.download_progress)
elif platform.system() == "Windows":
urllib.urlretrieve(cls.WINDOWS_DOWNLOAD_URL, "ldd.exe")
# subprocess.Popen("ldd.exe")
@classmethod
def download_progress(cls, count, block_size, total_size):
if not cls.PB:
cls.PB = ProgressBar(maxval=total_size).start()
cls.PB.update( | count * block_size)
class Installer(object):
@classmetho | d
def install(cls):
pass
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
from nose.tools import ok_
from crontabber.app import CronTabber
from socorro.unittest.cron.jobs.base import IntegrationTestBase
from socorro.unittest.cron.setup_configman import (
get_config_manager_for_crontabber,
)
from socorro.schemas import CRASH_REPORT_JSON_SCHEMA_AS_STRING
class TestUploadCrashReportJSONSchemaCronApp(IntegrationTestBase):
def _setup_config_manager(self):
return get_config_manager_for_crontabber(
jobs='socorro.cron.jobs.upload_crash_report_json_schema.'
'UploadCrashReportJSONSchemaCronApp|30d',
)
@mock.patch('boto.connect_s3')
def test_run(self, connect_s3):
key = mock.MagicMock()
connect_s3().get_bucket().get_key.return_value = None
connect_s3().get_bucket().new_key.return_value = key
with self._setup_config_manager().context() as config:
tab = CronTabber(config)
tab.run_all()
information = self._load_structure()
app_name = 'upload-crash-rep | ort-json-schema'
ok_(information[app_name])
ok_(not information[app_name]['last_error'])
ok_(information[app_name]['last_success'])
key.set_contents_from_string.assert_called_with(
CRASH_REPORT_JSON_SCHEMA_AS_STRING
| )
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,traceback,sys
from waflib import Utils,ansiterm
if not os.environ.get('NOSYNC',False):
if sys.stdout.isatty()and id(sys.stdout)==id(sys.__stdout__):
sys.stdout=ansiterm.AnsiTerm(sys.stdout)
if sys.stderr.isatty()and id(sys.stderr)==id(sys.__stderr__):
sys.stderr=ansiterm.AnsiTerm(sys.stderr)
import logging
LOG_FORMAT=os.environ.get('WAF_LOG_FORMAT','%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s')
HOUR_FORMAT=os.environ.get('WAF_HOUR_FORMAT','%H:%M:%S')
zones=[]
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','GREY':'\x1b[37m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
indicator='\r\x1b[K%s%s%s'
try:
unicode
except NameError:
unicode=None
def enable_colors(use):
if use==1:
if not(sys.stderr.isatty()or sys.stdout.isatty()):
use=0
if Utils.is_win32 and os.name!='java':
term=os.environ.get('TERM','')
else:
term=os.environ.get('TERM','dumb')
if term in('dumb','emacs'):
use=0
if use>=1:
os.environ['TERM']='vt100'
colors_lst['USE']=use
try:
get_term_cols=ansiterm.get_term_cols
except AttributeError:
def get_term_cols():
return 80
get_term_cols.__doc__="""
Returns the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if colors_lst['USE']:
return colors_lst.get(cl,'')
return''
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=''):
logging.Filter.__init__(self,name)
def filter(self,rec):
global verbose
rec.zone=rec.module
if rec.levelno>=logging.INFO:
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class log_handler(logging.StreamHandler):
def emit(self,record):
try:
try:
self.stream=record.stream
except AttributeError:
if record.levelno>=logging.WARNING:
record.stream=self.stream=sys.stderr
else:
record.stream=self.stream=sys.stdout
self.emit_override(record)
self.flush()
except(KeyboardInterrupt,SystemExit):
raise
except:
self.handleError(record)
def emit_override(self,record,**kw):
self.terminator=getattr(record,'terminator','\n')
stream=self.stream
if unicode:
msg=self.formatter.format(record)
fs='%s'+self.terminator
try:
if(isinstance(msg,unicode)and getattr(stream,'encoding',None)):
fs=fs.decode(stream.encoding)
try:
stream.write(fs%msg)
except UnicodeEncodeError:
stream.write((fs%msg).encode(stream.encoding))
else:
stream.write(fs%msg)
except UnicodeError:
stream.write((fs%msg).encode('utf-8'))
else:
logging.StreamHandler.emit(self,record)
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
use=colors_lst['USE']
if(use==1 and rec.stream.isatty())or use==2:
c1=getattr(rec,'c1',None)
if c1 is None:
c1=''
if rec.levelno>=logging.ERROR:
c1=colors.RED
elif rec.levelno>=logging.WARNING:
c1=colors.YELLOW
elif rec.levelno>=logging.INFO:
c1=colors.GREEN
c2=getattr(rec,'c2',colors.NORMAL)
msg='%s%s%s'%(c1,msg,c2)
else:
msg=re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))','',msg)
if rec.levelno>=logging.INFO:
if rec.args:
return msg%rec.args
return msg
rec.msg=msg
rec.c1=colors.PINK
rec.c2=colors.NORMAL
return logging.Formatter.format(self,rec)
log=None
def debug(*k,**kw):
global verbose
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log,verbose
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File %r, line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error('\n'.join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
global log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=log_handler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=8192):
f | rom logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def free_logger(logger):
try:
for x in logger.handlers:
x.close()
logger.removeHandler(x)
except Exception:
pass
def pprint(col,msg,label='',sep='\n'):
global info
info('%s%s%s %s',col | ors(col),msg,colors.NORMAL,label,extra={'terminator':sep})
|
#!/usr/bin/python
#
# $Id: gen-data-queryperf.py,v 1.1.10.1 2003/05/15 05:07:21 marka Exp $
#
# Contributed by Stephane Bortzmeyer <bortzmeyer@nic.fr>
#
# "A small tool which may be useful with contrib/queryperf. This script
# can generate files of queries, both with random names (to test the
# behaviour with NXdomain) and with domains from a real zone file."
#
import sys
import getopt
import random
import re
ldh = []
# Letters
for i in range(97, 122):
ldh.append(chr(i))
# Digits
for i in range(48, 57):
ldh.append(chr(i))
# Hyphen
ldh.append('-')
maxsize=10
tld='org'
num=4
percent_random = 0.3
gen = None
zone_file = None
domains = {}
domain_ns = "^([a-z0-9-]+)(\.([a-z0-9-\.]+|)|)( +IN|) +NS"
domain_ns_re = re.compile(domain_ns, re.IGNORECASE)
def gen_random_label():
label = ""
for i in range(gen.randint(1, maxsize)):
label = label + gen.choice(ldh)
return label
def make_domain(label):
return "www." + label + "." + tld + " A"
def usage():
sys.stdout.write("Usage: " + sys.argv[0] | + " [-n number] " + \
"[-p percent-random] [-t TLD]\n")
sys.stdout.write(" [-m MAXSIZE] [-f zone-file]\n")
try:
optlist, args = getopt.getopt(sys.argv[1:], "hp:f:n:t:m:",
["help", "percentrandom=", "zonefile=",
| "num=", "tld=",
"maxsize="])
for option, value in optlist:
if option == "--help" or option == "-h":
usage()
sys.exit(0)
elif option == "--number" or option == "-n":
num = int(value)
elif option == "--maxsize" or option == "-m":
maxsize = int(value)
elif option == "--percentrandom" or option == "-p":
percent_random = float(value)
elif option == "--tld" or option == "-t":
tld = str(value)
elif option == "--zonefile" or option == "-f":
zone_file = str(value)
else:
error("Unknown option " + option)
except getopt.error, reason:
sys.stderr.write(sys.argv[0] + ": " + str(reason) + "\n")
usage()
sys.exit(1)
if len(args) <> 0:
usage()
sys.exit(1)
gen = random.Random()
if zone_file:
file = open(zone_file)
line = file.readline()
while line:
domain_line = domain_ns_re.match(line)
if domain_line:
domain = domain_line.group(1)
domains[domain] = 1
line = file.readline()
file.close()
for i in range(num):
if zone_file:
if gen.random() < percent_random:
print make_domain(gen_random_label())
else:
print make_domain(gen.choice(domains.keys()))
else:
print make_domain(gen_random_label())
|
import numpy as np
from gpaw import KohnShamConvergenceError
class SCFLoop:
"""Self-consistent field loop.
converged: Do we have a self-consistent solution?
"""
def __init__(self, eigenstates=0.1, energy=0.1, density=0.1, maxiter=100,
fixdensity=False, niter_fixdensity=None):
self.max_eigenstates_error = max(eigenstates, 1e-20)
self.max_energy_error = energy
self.max_density_error = max(density, 1e-20)
self.maxiter = maxiter
self.fixdensity = fixdensity
if niter_fixdensity is None:
niter_fixdensity = 2
self.niter_fixdensity = niter_fixdensity
if fixdensity:
self.fix_density()
self.reset()
def fix_density(self):
self.fixdensity = True
self.niter_fixdensity = 10000000
self.max_density_error = np.inf
def reset(self):
self.energies = []
self.eigenstates_error = None
self.energy_error = None
self.density_error = None
self.converged = False
def run(self, wfs, hamiltonian, density, occupations):
if self.converged:
return
for iter in range(1, self.maxiter + 1):
wfs.eigensolver.iterate(hamiltonian, wfs)
occupations.calculate(wfs)
# XXX ortho, dens, wfs?
energy = hamiltonian.get_energy(occupations)
self.energies.append(energy)
self.check_convergence(density, wfs.eigensolver)
yield iter
if self.converged:
break
if iter > self.niter_fixdensity:
density.update(wfs)
hamiltonian.update(density)
else:
hamiltonian.npoisson = 0
# Don't fix the density in the next step:
self.niter_fixdensity = 0
def check_convergence(self, density, eigensolver):
"""Check convergence of eigenstates, energy | and density."""
if self.converged:
| return True
self.eigenstates_error = eigensolver.error
if len(self.energies) < 3:
self.energy_error = self.max_energy_error
else:
self.energy_error = np.ptp(self.energies[-3:])
self.density_error = density.mixer.get_charge_sloshing()
if self.density_error is None:
self.density_error = 1000000.0
self.converged = (
self.eigenstates_error < self.max_eigenstates_error and
self.energy_error < self.max_energy_error and
self.density_error < self.max_density_error)
return self.converged
|
from netfilterqueue import NetfilterQueue
from dpkt import ip, icmp, tcp, udp
from scapy.all import *
import socket
def print_and_accept(pkt):
data=pkt.get_payload()
res = ip.IP(data)
| res2 = IP(data)
i = ICMP(data)
t = TCP(data)
u = UDP(data)
print "SOURCE IP: %s\tDESTINATION IP: %s" % (socket.inet_ntoa(res.src),socket.inet_ntoa(res.dst))
print res2.show2()
resp=srp1(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst='192.168.0.34'),iface="eth0",timeout=2)
print resp.dst
eth_dst = resp.src
eth_src = resp.dst
eth = Ether(src=eth_src, dst=eth_dst)
eth.type = 2048
sendp(eth/res2/res2,iface="eth0") |
pkt.accept()
nfqueue = NetfilterQueue()
nfqueue.bind(6, print_and_accept)
try:
nfqueue.run()
except KeyboardInterrupt, ex:
print ex
|
#!/usr/bin/env python
import csv
# create an empty list that will be filled with the rows of data from the CSV as dictionaries
csv_content = []
# open and loop through each line of the csv file to populate our data file
with open('aaj1945_DataS1_Egg_shape_by_species_v2.csv') as csv_file:
csv_reader = csv.DictReade | r(csv_file)
lineNo = 0
for row in csv_reader: # process each row of the csv fil | e
csv_content.append(row)
if lineNo < 3: # print out a few lines of data for our inspection
print(row)
lineNo += 1
# create some empty lists that we will fill with values for each column of data
order = []
family = []
species = []
asymmetry = []
ellipticity = []
avglength = []
# for each row of data in our dataset write a set of values into the lists of column values
for item in csv_content:
order.append(item['\ufeffOrder'])
family.append(item['Family'])
species.append(item['Species'])
# deal with issues
try:
asymmetry.append(float(item['Asymmetry']))
except:
asymmetry.append(-9999)
try:
ellipticity.append(float(item['Ellipticity']))
except:
ellipticity.append(-9999)
try:
avglength.append(float(item['AvgLength (cm)']))
except:
avglength.append(-9999)
print()
print()
# Calculate and print some statistics
mean_asymmetry = sum(asymmetry)/len(asymmetry)
print("Mean Asymmetry: ", str(mean_asymmetry))
mean_ellipticity = sum(ellipticity)/len(ellipticity)
print("Mean Ellipticity: ", str(mean_ellipticity))
mean_avglength = sum(avglength)/len(avglength)
print("Mean Average Length: ", str(mean_avglength))
# What's wrong with these results? What would you do next to fix the problem? |
################################################################################
# Copyright 2015 Samuel Gongora Garcia (s.gongora | garcia@gmail.com)
#
# This program is free software: you can redistribut | e it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
# Author: s.gongoragarcia[at]gmail.com
################################################################################
class Read_predict_data:
def __init__(self, index_satellite):
from os import getcwd, chdir
index_satellite = index_satellite + 1
directorio_script = getcwd()
# predict routine
self.open_predict(directorio_script)
self.open_files_predict(index_satellite)
chdir(directorio_script)
def open_predict(self, directorio_script):
from os import chdir, listdir, getcwd
chdir(directorio_script + '/results/predict')
self.files_predict = listdir(getcwd())
self.files_predict.sort()
def open_files_predict(self, index_satellite):
for i in range(index_satellite):
self.open_file_predict(self.files_predict[i])
def open_file_predict(self, name):
self.predict_simulation_time = []
self.predict_alt_satellite = []
self.predict_az_satellite = []
import csv
with open(name) as tsv:
for line in csv.reader(tsv, delimiter = "\t"):
if float(line[1]) >= 0:
linea0 = float(line[0])
self.predict_simulation_time.append(linea0)
self.predict_alt_satellite.append(float(line[1]))
self.predict_az_satellite.append(float(line[2])) |
from django.conf.urls import include, url
from django.contrib import admin
u | rlpatterns = [
url(r'^admin/', | include(admin.site.urls)),
url(r'^external/', include('external.urls')),
url(r'^dev/', include('dev.urls')),
]
|
self.uri = None
self.rs_uri = None
self.version = None
self.sessions_enabled = False
self.fake_hostname_uri = None
self.server_status = None
def setup(self):
assert not self.initialized
self.setup_sync_cx()
self.setup_auth_and_uri()
self.setup_version()
self.setup_v8()
self.server_status = self.sync_cx.admin.command("serverStatus")
self.initialized = True
def setup_sync_cx(self):
"""Get a synchronous PyMongo MongoClient and determine SSL config."""
host = os.environ.get("DB_IP", "localhost")
port = int(os.environ.get("DB_PORT", 27017))
connectTimeoutMS = 100
serverSelectionTimeoutMS = 100
socketTimeoutMS = 10000
try:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
connectTimeoutMS=connectTimeoutMS,
socketTimeoutMS=socketTimeoutMS,
serverSelectionTimeoutMS=serverSelectionTimeoutMS,
tlsCAFile=CA_PEM,
ssl=True,
)
)
self.mongod_started_with_ssl = True
except pymongo.errors.ServerSelectionTimeoutError:
try:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
connectTimeoutMS=connectTimeoutMS,
socketTimeoutMS=socketTimeoutMS,
serverSelectionTimeoutMS=serverSelectionTimeoutMS,
tlsCAFile=CA_PEM,
tlsCertificateKeyFile=CLIENT_PEM,
)
)
self.mongod_started_with_ssl = True
self.mongod_validates_client_cert = True
except pymongo.errors.ServerSelectionTimeoutError:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
connectTimeoutMS=connectTimeoutMS,
socketTimeoutMS=socketTimeoutMS,
serverSelectionTimeoutMS=serverSelectionTimeoutMS,
)
)
response = client.admin.command("ismaster")
self.sessions_enabled = "logicalSessionTimeoutMinutes" in response
self.is_mongos = response.get("msg") == "isdbgrid"
if "setName" in response:
self.is_replica_set = True
self.rs_name = str(response["setName"])
self.w = len(response["hosts"])
self.hosts = set([partition_node(h) for h in response["hosts"]])
host, port = self.primary = partition_node(response["primary"])
self.arbiters = set([partition_node(h) for h in response.get("arbiters", [])])
self.secondaries = [
partition_node(m)
for m in response["hosts"]
if m != self.primary and m not in self.arbiters
]
elif not self.is_mongos:
self.is_standalone = True
# Reconnect to found primary, without short timeouts.
if self.mongod_started_with_ssl:
client = connected(
pymongo.MongoCli | ent(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
tlsCAFile=CA_PEM,
tlsCertificateKeyFile=CLIENT_PEM,
)
)
| else:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
ssl=False,
)
)
self.sync_cx = client
self.host = host
self.port = port
def setup_auth_and_uri(self):
"""Set self.auth and self.uri."""
if db_user or db_password:
if not (db_user and db_password):
sys.stderr.write("You must set both DB_USER and DB_PASSWORD, or neither\n")
sys.exit(1)
self.auth = True
uri_template = "mongodb://%s:%s@%s:%s/admin"
self.uri = uri_template % (db_user, db_password, self.host, self.port)
# If the hostname 'server' is resolvable, this URI lets us use it
# to test SSL hostname validation with auth.
self.fake_hostname_uri = uri_template % (db_user, db_password, "server", self.port)
else:
self.uri = "mongodb://%s:%s/admin" % (self.host, self.port)
self.fake_hostname_uri = "mongodb://%s:%s/admin" % ("server", self.port)
if self.rs_name:
self.rs_uri = self.uri + "?replicaSet=" + self.rs_name
def setup_version(self):
"""Set self.version to the server's version."""
self.version = Version.from_client(self.sync_cx)
def setup_v8(self):
"""Determine if server is running SpiderMonkey or V8."""
if self.sync_cx.server_info().get("javascriptEngine") == "V8":
self.v8 = True
@property
def storage_engine(self):
try:
return self.server_status.get("storageEngine", {}).get("name")
except AttributeError:
# Raised if self.server_status is None.
return None
def supports_transactions(self):
if self.storage_engine == "mmapv1":
return False
if self.version.at_least(4, 1, 8):
return self.is_mongos or self.is_replica_set
if self.version.at_least(4, 0):
return self.is_replica_set
return False
def require(self, condition, msg, func=None):
def make_wrapper(f):
@wraps(f)
def wrap(*args, **kwargs):
if condition():
return f(*args, **kwargs)
raise SkipTest(msg)
return wrap
if func is None:
def decorate(f):
return make_wrapper(f)
return decorate
return make_wrapper(func)
def require_auth(self, func):
"""Run a test only if the server is started with auth."""
return self.require(lambda: self.auth, "Server must be start with auth", func=func)
def require_version_min(self, *ver):
"""Run a test only if the server version is at least ``version``."""
other_version = Version(*ver)
return self.require(
lambda: self.version >= other_version,
"Server version must be at least %s" % str(other_version),
)
def require_version_max(self, *ver):
"""Run a test only if the server version is at most ``version``."""
other_version = Version(*ver)
return self.require(
lambda: self.version <= other_version,
"Server version must be at most %s" % str(other_version),
)
def require_replica_set(self, func):
"""Run a test only if the client is connected to a replica set."""
return self.require(
lambda: self.is_replica_set, "Not connected to a replica set", func=func
)
def require_transactions(self, func):
"""Run a test only if the deployment might support transactions.
*Might* because this does not test the FCV.
"""
return self.require(self.supports_transactions, "Transactions are not supported", func=func)
def create_user(self, dbname, user, pwd=None, roles=None, **kwargs):
kwargs["writeConcern"] = {"w": |
import os
import random
import string
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
TEST_APP_URL = os.getenv('TEST_APP_URL')
TEST_CLIENT_URL = os.getenv('TEST_CLIENT_URL')
E2E_ARGS = os.getenv('E2E_ARGS')
TEST_URL = TEST_CLIENT_URL if E2E_ARGS == | 'client' else TEST_APP_URL
def random_string(length=8):
return ''.join(random.choice(string.ascii_letters) for x in range(length))
def register(se | lenium, user):
selenium.get(f'{TEST_URL}/register')
selenium.implicitly_wait(1)
username = selenium.find_element_by_id('username')
username.send_keys(user.get('username'))
email = selenium.find_element_by_id('email')
email.send_keys(user.get('email'))
password = selenium.find_element_by_id('password')
password.send_keys(user.get('password'))
password_conf = selenium.find_element_by_id('confirm-password')
password_conf.send_keys(user.get('password_conf'))
submit_button = selenium.find_element_by_tag_name('button')
submit_button.click()
def login(selenium, user):
selenium.get(f'{TEST_URL}/login')
selenium.implicitly_wait(1)
email = selenium.find_element_by_id('email')
email.send_keys(user.get('email'))
password = selenium.find_element_by_id('password')
password.send_keys(user.get('password'))
submit_button = selenium.find_element_by_tag_name('button')
submit_button.click()
def register_valid_user(selenium):
user_name = random_string()
user = {
'username': user_name,
'email': f'{user_name}@example.com',
'password': 'p@ssw0rd',
'password_conf': 'p@ssw0rd',
}
register(selenium, user)
WebDriverWait(selenium, 15).until(EC.url_changes(f"{TEST_URL}/register"))
return user
def register_valid_user_and_logout(selenium):
user_name = random_string()
user = {
'username': user_name,
'email': f'{user_name}@example.com',
'password': 'p@ssw0rd',
'password_conf': 'p@ssw0rd',
}
register(selenium, user)
WebDriverWait(selenium, 15).until(EC.url_changes(f"{TEST_URL}/register"))
user_menu = selenium.find_element_by_class_name('nav-items-user-menu')
logout_link = user_menu.find_elements_by_class_name('nav-item')[2]
logout_link.click()
selenium.implicitly_wait(1)
return user
def login_valid_user(selenium, user):
login(selenium, user)
WebDriverWait(selenium, 10).until(EC.url_changes(f"{TEST_URL}/login"))
return user
def assert_navbar(selenium, user):
nav = selenium.find_element_by_id('nav').text
assert 'Register' not in nav
assert 'Login' not in nav
assert 'Dashboard' in nav
assert 'Workouts' in nav
assert 'Statistics' in nav
assert 'Add a workout' in nav
assert user['username'] in nav
assert 'Logout' in nav
|
UL_CATEGORY_LI = '//ul[@class="category"]/li'
H2_A_TITLELINK = './h2/a[@class="titlelink"]'
SPAN_A_TITLELINK = './span/a[@class="titlelink"]'
DIV_BODYFIELD_P = '//div[contains(@class,"bodyfield")]/p'
CATEGORY_H2_XPATH = [ UL_CATEGORY_LI, H2_A_TITLELINK ]
BODYFIELD_SPAN_XPATH = [ DIV_BODYFIELD_P, SPAN_A_TITLELINK ]
"""Mapping of relative URL (fo | r EOPSS pages) to the xpath needed
to extract documents (1st xpath for s | ection, 2nd xpath for document link)
"""
MASSGOV_DICT = {
'homeland-sec/grants/docs/':
[
UL_CATEGORY_LI,
'./h2/span/a[@class="titlelink"]'
],
'homeland-sec/grants/hs-grant-guidance-and-policies.html':
BODYFIELD_SPAN_XPATH,
'homeland-sec/grants/standard-documents.html':
[
'//div[contains(@class,"bodyfield")]/ul/li',
SPAN_A_TITLELINK
],
'law-enforce/grants/': CATEGORY_H2_XPATH,
'law-enforce/grants/2017-muni-public-safety-staffing-grant.html':
BODYFIELD_SPAN_XPATH,
'law-enforce/grants/le-grants-public-records.html':
BODYFIELD_SPAN_XPATH,
'justice-and-prev/grants/': CATEGORY_H2_XPATH,
'justice-and-prev/grants/bgp/': CATEGORY_H2_XPATH,
'hwy-safety/grants/': CATEGORY_H2_XPATH,
'hwy-safety/grants/ffy-2017-traffic-enforcement-grant-program.html':
BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/ffy2017-hsd-grant-opportunities.html':
BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/ffy-2017-step.html': BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/highway-safety-grants-public-records.html':
BODYFIELD_SPAN_XPATH
}
|
ommit(dict_, to_load)
if post_load and context.invoke_all_eagers:
post_load.add_state(state, False)
return instance
if mapper.polymorphic_map and not _polymorphic_from and not refresh_state:
# if we are doing polymorphic, dispatch to a different _instance()
# method specific to the subclass mapper
def ensure_no_pk(row):
identitykey = (
identity_class,
tuple_getter(row),
identity_token,
)
if not is_not_primary_key(identitykey[1]):
return identitykey
else:
return None
_instance = _decorate_polymorphic_switch(
_instance,
context,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
ensure_no_pk,
)
return _instance
def _load_subclass_via_in(context, path, entity):
mapper = entity.mapper
zero_idx = len(mapper.base_mapper.primary_key) == 1
if entity.is_aliased_class:
q, enable_opt, disable_opt = mapper._subclass_load_via_in(entity)
else:
q, enable_opt, disable_opt = mapper._subclass_load_via_in_mapper
def do_load(context, path, states, load_only, effective_entity):
orig_query = context.query
q2 = q._with_lazyload_options(
(enable_opt,) + orig_query._with_options + (disable_opt,),
path.parent,
cache_path=path,
)
if orig_query._populate_existing:
q2.add_criteria(lambda q: q.populate_existing())
q2(context.session).params(
primary_keys=[
state.key[1][0] if zero_idx else state.key[1]
for state, load_attrs in states
]
).all()
return do_load
def _populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
populate_existing,
populators,
):
if isnew:
# first time we are seeing a row with this identity.
state.runid = context.runid
for key, getter in populators["quick"]:
dict_[key] = getter(row)
if populate_existing:
for key, set_callable in populators["expire"]:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
else:
for key, set_callable in populators["expire"]:
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
populator(state, dict_, row)
elif load_path != state.load_path:
# new load path, e.g. object is present in more than one
# column position in a series of rows
state.load_path = load_path
# if we have data, and the data isn't in the dict, OK, let's put
# it in.
| for key, getter in populators["quick"]:
if key not in dict_:
dict_[key] = getter(row)
# otherwise treat like an "already seen" row
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: allow "existing" populator to know this is
# a new path for the state:
# populator(state, dict_, row, new_path=True)
else:
# have already seen rows with this identity in this same pat | h.
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: same path
# populator(state, dict_, row, new_path=False)
def _populate_partial(
context, row, state, dict_, isnew, load_path, unloaded, populators
):
if not isnew:
to_load = context.partials[state]
for key, populator in populators["existing"]:
if key in to_load:
populator(state, dict_, row)
else:
to_load = unloaded
context.partials[state] = to_load
for key, getter in populators["quick"]:
if key in to_load:
dict_[key] = getter(row)
for key, set_callable in populators["expire"]:
if key in to_load:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["eager"]:
if key not in unloaded:
populator(state, dict_, row)
return to_load
def _validate_version_id(mapper, state, dict_, row, getter):
if mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col
) != getter(row):
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (
state_str(state),
mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col
),
getter(row),
)
)
def _decorate_polymorphic_switch(
instance_fn,
context,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
ensure_no_pk,
):
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
if polymorphic_on is None:
return instance_fn
if adapter:
polymorphic_on = adapter.columns[polymorphic_on]
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" % discriminator
)
else:
if sub_mapper is mapper:
return None
elif not sub_mapper.isa(mapper):
return False
return _instance_processor(
sub_mapper,
context,
result,
path,
adapter,
_polymorphic_from=mapper,
)
polymorphic_instances = util.PopulateDict(configure_subclass_mapper)
getter = result._getter(polymorphic_on)
def polymorphic_instance(row):
discriminator = getter(row)
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row)
elif _instance is False:
identitykey = ensure_no_pk(row)
if identitykey:
raise sa_exc.InvalidRequestError(
"Row with identity key %s can't be loaded into an "
"object; the polymorphic discriminator column '%s' "
"refers to %s, which is not a sub-mapper of "
"the requested %s"
% (
identitykey,
polymorphic_on,
mapper.polymorphic_map[discriminator],
mapper,
)
)
else:
return None
else:
return instance_fn(row)
else:
identitykey = ensure_no_pk(row)
if identitykey:
raise sa_exc.InvalidRequestError(
"Row with identity key %s can't be loaded into an "
"object; the polymorphic discriminator column '%s' is "
"NULL" % (identitykey, polymorphic_on)
)
else:
return None
return polymorphic_instance
class PostLoad(object):
"""Track loaders and states for "p |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from datetime import (
datetime,
timedelta,
)
from dateutil.relativedelta import relativedelta
from base.tests.model_maker import clean_and_save
from booking.models import (
Booking,
Category,
Location,
)
def get_alpe_d_huez():
return Booking.objects.get(title='Alpe D Huez')
def make_booking(start_date, end_date, title, **kwargs):
defaults = dict(
start_date=start_date,
end_date=end_date,
title=title,
)
defaults.update(kwargs)
return clean_and_save(Booking(**defaults))
def make_booking_in_past(start_date, end_date, title):
"""Save a booking without cleaning (validating) the data."""
b = Booking(**dict(
start_date=start_date,
end_date=end_date,
title=title,
))
b.save()
return b
def next_weekday(d, weekday):
"""Find the date for the next weekday.
Copied from:
http://stackoverflow.com/questions/6558535/python-find-the-date-for-the-first-monday-after-a-given-a-date
"""
days_ahead = weekday - d.weekday()
if days_a | head <= 0: # Target day already happened this week
days_ahead += 7
return d + timedelta(days_ahead)
def demo_data():
# set-up some dates
today = datetime.today().date()
# 1st week last month starting Saturday
first_prev_month = today + relativedelta(months=-1, day=1)
start_date = next_weekday(first_prev_month, 5)
end_date = start_date + timedelta(days=7)
make_booking_in_past(start_date, end_date, 'Tignes')
# 2nd week last month
| make_booking_in_past(end_date, end_date + timedelta(days=7), 'Meribel')
# 1st week this month starting Saturday
first_this_month = today + relativedelta(day=1)
start_date = next_weekday(first_this_month, 5)
make_booking_in_past(start_date, start_date + timedelta(days=3), 'Whistler')
# later this month starting Tuesday
start_date = next_weekday(first_this_month + timedelta(days=10), 1)
make_booking_in_past(start_date, start_date + timedelta(days=3), 'Dorset')
# span this and next month
start_date = datetime(today.year, today.month, 27).date()
first_next_month = today + relativedelta(months=+1, day=1)
end_date = datetime(first_next_month.year, first_next_month.month, 2).date()
make_booking_in_past(start_date, end_date, 'Devon')
# next month
start_date = next_weekday(first_next_month + timedelta(days=3), 2)
end_date = next_weekday(start_date, 5)
make_booking(start_date, end_date, 'Alpe D Huez')
make_booking(end_date, end_date + timedelta(days=4), 'Cornwall')
# misc
Category.objects.create_category('Meeting')
Location.objects.create_location('Community Centre')
|
# Rest Imports
from rest_framework import status
# Local Imports
from ava_core.abstract.test_data import AvaCoreTestData
from ava_core.integration.integration_ldap.models import LDAPIntegrationAdapter
# Implementation
class LDAPIntegrationAdapterTestData(AvaCoreTestData):
"""
Test data for LDAPIntegrationAdapter
"""
@staticmethod
def init_requirements():
# Import the required model and data
from ava_core.gather.gather_ldap.models import LDAPGatherHistory
from ava_core.gather.gather_ldap.test_data import LDAPGatherHistoryTestData
# Check that requirements haven't already been created.
# True - Create necessary requirements.
if LDAPGatherHistory.objects.count() == 0:
LDAPGatherHistoryTestData.init_requirements()
model = LDAPGatherHistory.objects.create(**LDAPGatherHistoryTestData.get_data('standard'))
model.save()
model = LDAPGatherHistory.objects.create(**LDAPGatherHistoryTestData.get_data('unique'))
model.save()
# Import the required model and data
from ava_core.integration.integration_abstract.models import IntegrationAdapter
from ava_core.integration.integration_abstract.test_data import IntegrationAdapterTestData
# Check that requirements haven't already been created.
# True - Create necessary requirements.
if IntegrationAdapter.objects.count() == 0:
IntegrationAdapterTestData.init_requirements()
model = IntegrationAdapter.objects.create(**IntegrationAdapterTestData.get_data('standard'))
model.save()
model = IntegrationAdapter.objects.create(**IntegrationAdapterTestData.get_data('unique'))
model.save()
# Store self information
model = LDAPIntegrationAdapter
url = '/example'
standard = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
unique = {
'ldap_password': 'unique_char',
'salt': 'unique_char',
'dump_dn': 'unique_char',
'ldap_user': 'unique_char',
'ldap_integration_history': '/example/2/',
'integrationadapter_ptr': 'default',
'server': 'unique_char',
}
missing_ldap_password = {
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_ldap_password = {
'ldap_password': 'modified_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_salt = {
'ldap_password': 'standard_char',
'salt': 'modified_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_salt = {
'ldap_password': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_dump_dn = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'modified_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_dump_dn = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_ldap_user = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_ldap_user = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'modified_char',
'ldap_integration_history': '/example/1/',
' | integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_ldap_integration_history = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/2/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_ldap_integration_history = {
'ldap_password': 'standard_char',
'sa | lt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_integrationadapter_ptr = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_integrationadapter_ptr = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'server': 'standard_char',
}
missing_server = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
}
modified_server = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'modified_char',
}
|
"""Group Anagrams
Given an array of strings, group anagrams together.
Example:
Input:
["eat", "tea", "tan", "ate", "nat", "bat"]
Output:
[
["ate","eat","tea"],
["nat","tan"],
["bat"]
]
Note:
All inputs will be in lowercase.
The order of your output does not matter.
"""
from typing import List
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[st | r]]:
store = {}
for item in strs:
key = ''.join(sorted(item))
if key in store:
store[key].append(item)
else:
store[key] = [item]
return store.values()
if __name__ == '__main__':
cases = [
(
["eat", "tea", "tan", "ate", "nat", "bat"],
[
["ate", "eat", "tea"],
[ | "nat", "tan"],
["bat"]
]
),
] # yapf: disable
for case in cases:
for S in [Solution]:
result = S().groupAnagrams(case[0])
for l in case[1]:
for item in l:
found = False
for ll in result:
if item in ll:
found = True
assert found
|
class Content(object):
def __init__(self, type_=None, value=None):
self._type = None
self._value = None
if type_ is not None:
self.type = type_
if value is | not None:
self.value = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def get(self):
content = {}
if self.type is not None:
content["type"] = self.type
if se | lf.value is not None:
content["value"] = self.value
return content
|
"""
Implement the command-line tool interface.
"""
from __future__ import unicode_literals
import argparse
import os
import sys
import diff_cover
from diff_cover.diff_reporter import GitDiffReporter
from diff_cover.git_diff import GitDiffTool
from diff_cover.git_path import GitPathTool
from diff_cover.violations_reporter import (
XmlCoverageReporter, Pep8QualityReporter,
PyflakesQualityReporter, PylintQualityReporter
)
from diff_cover.report_generator import (
HtmlReportGenerator, StringReportGenerator,
HtmlQualityReportGenerator, StringQualityReportGenerator
)
from lxml import etree
import six
COVERAGE_XML_HELP = "XML coverage report"
HTML_REPORT_HELP = "Diff coverage HTML output"
COMPARE_BRANCH_HELP = "Branch to compare"
VIOLATION_CMD_HELP = "Which code quality tool to use"
INPUT_REPORTS_HELP = "Pep8, pyflakes or pylint reports to use"
OPTIONS_HELP = "Options to be passed to the violations tool"
QUALITY_REPORTERS = {
'pep8': Pep8QualityReporter,
'pyflakes': PyflakesQualityReporter,
'pylint': PylintQualityReporter
}
import logging
LOGGER = logging.getLogger(__name__)
def parse_coverage_args(argv):
"""
Parse command line arguments, returning a dict of
valid options:
{
'coverage_xml': COVERAGE_XML,
'html_report': None | HTML_REPORT
}
where `COVERAGE_XML` is a path, and `HTML_REPORT` is a path.
The path strings may or may not exist.
"""
parser = argparse.ArgumentParser(description=diff_cover.DESCRIPTION)
parser.add_argument(
'coverage_xml',
type=str,
help=COVERAGE_XML_HELP,
nargs='+'
)
parser.add_argument(
'--html-report',
type=str,
default=None,
help=HTML_REPORT_HELP
)
parser.add_argument(
'--compare-branch',
type=str,
default='origin/master',
help=COMPARE_BRANCH_HELP
)
return vars(parser.parse_args(argv))
def parse_quality_args(argv):
"""
Parse command line arguments, returning a dict of
valid options:
{
'violations': pep8 | pyflakes | pylint
'html_report': None | HTML_REPORT
}
where `HTML_REPORT` is a path.
"""
parser = argparse.ArgumentParser(
description=diff_cover.QUALITY_DESCRIPTION
)
parser.add_argument(
'--violations',
type=str,
help=VIOLATION_CMD_HELP,
required=True
)
parser.add_argument(
'--html-report',
type=str,
default=None,
help=HTML_REPORT_HELP
)
parser.add_argument(
'--compare-branch',
type=str,
default='origin/master',
help=COMPARE_BRANCH_HELP
)
parser.add_argument(
'input_reports',
type=str,
nargs="*",
default=[],
help=INPUT_REPORTS_HELP
)
parser.add_argument(
'--options',
type=str,
nargs='?',
default=None,
help=OPTIONS_HELP
)
return vars(parser.parse_args(argv))
def generate_coverage_report(coverage_xml, compare_branch, html_report=None):
"""
Generate the diff coverage report, using kwargs from `parse_args()`.
"""
diff = GitDiffReporter(compare_branch, git_diff=GitDiffTool())
xml_roots = [etree.parse(xml_root) for xml_root in coverage_xml]
coverage = XmlCoverageReporter(xml_roots)
# Build a report generator
if html_report is not None:
reporter = HtmlReportGenerator(coverage, diff)
with open(html_report, "wb") as output_file:
reporter.generate_report(output_file)
reporter = StringReportGenerator(coverage, diff)
output_file = sys.stdout if six.PY2 else sys.stdout.buffer
# Generate the report
reporter.generate_report(output_file)
def generate_quality_report(tool, compare_branch, html_report=None):
"""
Generate the quality report, using kwargs from `parse_args()`.
"""
diff = GitDiffReporter(compare_branch, git_diff=GitDiffTool())
if html_report is not None:
reporter = HtmlQualityReportGenerator(tool, diff)
output_file = open(html_report, "wb")
else:
reporter = StringQualityReportGenerator(tool, diff)
output_file = sys.stdout if six.PY2 else sys.stdout.buffer
reporter.generate_report(output_file)
def main():
"""
Main entry point for the tool, used by setup.py
"""
progname = sys.argv[0]
# Init the path tool to work with the current directory
try:
cwd = os.getcwdu()
except AttributeError:
cwd = os.getcwd()
GitPathTool.set_cwd(cwd)
if progname.endswith('diff-cover'):
arg_dict = parse_coverage_args(sys.argv[1:])
generate_coverage_report(
arg_dict['coverage_xml'],
arg_dict['compare_branch'],
html_report=arg_dict['html_report'],
)
elif progname.endswith('diff-quality'):
arg_dict = parse_quality_args(sys.argv[1:])
tool = arg_dict['violations']
user_options = arg_dict.get('options')
if user_options:
user_options = user_options[1:-1] # Strip quotes
reporter_class = QUALITY_REPORTERS.get(tool)
if reporter_class is not None:
# If we've been given pre-generated reports,
# try to open the files
input_reports = []
for path in arg_dict['input_reports']:
try:
input_reports.append(open(path, 'rb'))
except IOError:
LOGGER.warning("Could not load '{0}'".format(path))
try:
reporter = reporter_class(tool, input_reports, user_options=user_options)
generate_quality_report(
reporter,
arg_dict['compare_branch'],
arg_dict['html_report']
)
# Close any reports we opened
finally:
for file_handle in input_reports:
| file_handle.close()
else:
LOGGER.error("Quality tool not recognized: '{0}'".format(tool))
exit(1)
if __name__ == "__main__" | :
main()
|
"""
Support the OwnTracks platform.
For more details about this platfor | m, please refer to the documentation at
https://home-assis | tant.io/components/device_tracker.owntracks/
"""
import json
import logging
import threading
from collections import defaultdict
import homeassistant.components.mqtt as mqtt
from homeassistant.const import STATE_HOME
from homeassistant.util import convert, slugify
DEPENDENCIES = ['mqtt']
REGIONS_ENTERED = defaultdict(list)
MOBILE_BEACONS_ACTIVE = defaultdict(list)
BEACON_DEV_ID = 'beacon'
LOCATION_TOPIC = 'owntracks/+/+'
EVENT_TOPIC = 'owntracks/+/+/event'
_LOGGER = logging.getLogger(__name__)
LOCK = threading.Lock()
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
def setup_scanner(hass, config, see):
"""Setup an OwnTracks tracker."""
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
def validate_payload(payload, data_type):
"""Validate OwnTracks payload."""
try:
data = json.loads(payload)
except ValueError:
# If invalid JSON
_LOGGER.error('Unable to parse payload as JSON: %s', payload)
return None
if not isinstance(data, dict) or data.get('_type') != data_type:
_LOGGER.debug('Skipping %s update for following data '
'because of missing or malformatted data: %s',
data_type, data)
return None
if max_gps_accuracy is not None and \
convert(data.get('acc'), float, 0.0) > max_gps_accuracy:
_LOGGER.debug('Skipping %s update because expected GPS '
'accuracy %s is not met: %s',
data_type, max_gps_accuracy, data)
return None
if convert(data.get('acc'), float, 1.0) == 0.0:
_LOGGER.debug('Skipping %s update because GPS accuracy'
'is zero',
data_type)
return None
return data
def owntracks_location_update(topic, payload, qos):
"""MQTT message received."""
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typelocation
data = validate_payload(payload, 'location')
if not data:
return
dev_id, kwargs = _parse_see_args(topic, data)
# Block updates if we're in a region
with LOCK:
if REGIONS_ENTERED[dev_id]:
_LOGGER.debug(
"location update ignored - inside region %s",
REGIONS_ENTERED[-1])
return
see(**kwargs)
see_beacons(dev_id, kwargs)
def owntracks_event_update(topic, payload, qos):
"""MQTT event (geofences) received."""
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typetransition
data = validate_payload(payload, 'transition')
if not data:
return
if data.get('desc') is None:
_LOGGER.error(
"Location missing from `Entering/Leaving` message - "
"please turn `Share` on in OwnTracks app")
return
# OwnTracks uses - at the start of a beacon zone
# to switch on 'hold mode' - ignore this
location = slugify(data['desc'].lstrip("-"))
if location.lower() == 'home':
location = STATE_HOME
dev_id, kwargs = _parse_see_args(topic, data)
def enter_event():
"""Execute enter event."""
zone = hass.states.get("zone.{}".format(location))
with LOCK:
if zone is None and data.get('t') == 'b':
# Not a HA zone, and a beacon so assume mobile
beacons = MOBILE_BEACONS_ACTIVE[dev_id]
if location not in beacons:
beacons.append(location)
_LOGGER.info("Added beacon %s", location)
else:
# Normal region
regions = REGIONS_ENTERED[dev_id]
if location not in regions:
regions.append(location)
_LOGGER.info("Enter region %s", location)
_set_gps_from_zone(kwargs, location, zone)
see(**kwargs)
see_beacons(dev_id, kwargs)
def leave_event():
"""Execute leave event."""
with LOCK:
regions = REGIONS_ENTERED[dev_id]
if location in regions:
regions.remove(location)
new_region = regions[-1] if regions else None
if new_region:
# Exit to previous region
zone = hass.states.get("zone.{}".format(new_region))
_set_gps_from_zone(kwargs, new_region, zone)
_LOGGER.info("Exit to %s", new_region)
see(**kwargs)
see_beacons(dev_id, kwargs)
else:
_LOGGER.info("Exit to GPS")
# Check for GPS accuracy
if not ('acc' in data and
max_gps_accuracy is not None and
data['acc'] > max_gps_accuracy):
see(**kwargs)
see_beacons(dev_id, kwargs)
else:
_LOGGER.info("Inaccurate GPS reported")
beacons = MOBILE_BEACONS_ACTIVE[dev_id]
if location in beacons:
beacons.remove(location)
_LOGGER.info("Remove beacon %s", location)
if data['event'] == 'enter':
enter_event()
elif data['event'] == 'leave':
leave_event()
else:
_LOGGER.error(
'Misformatted mqtt msgs, _type=transition, event=%s',
data['event'])
return
def see_beacons(dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# the battery state applies to the tracking device, not the beacon
kwargs.pop('battery', None)
for beacon in MOBILE_BEACONS_ACTIVE[dev_id]:
kwargs['dev_id'] = "{}_{}".format(BEACON_DEV_ID, beacon)
kwargs['host_name'] = beacon
see(**kwargs)
mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1)
mqtt.subscribe(hass, EVENT_TOPIC, owntracks_event_update, 1)
return True
def _parse_see_args(topic, data):
"""Parse the OwnTracks location parameters, into the format see expects."""
parts = topic.split('/')
dev_id = slugify('{}_{}'.format(parts[1], parts[2]))
host_name = parts[1]
kwargs = {
'dev_id': dev_id,
'host_name': host_name,
'gps': (data['lat'], data['lon'])
}
if 'acc' in data:
kwargs['gps_accuracy'] = data['acc']
if 'batt' in data:
kwargs['battery'] = data['batt']
return dev_id, kwargs
def _set_gps_from_zone(kwargs, location, zone):
"""Set the see parameters from the zone parameters."""
if zone is not None:
kwargs['gps'] = (
zone.attributes['latitude'],
zone.attributes['longitude'])
kwargs['gps_accuracy'] = zone.attributes['radius']
kwargs['location_name'] = location
return kwargs
|
# Consider a row of n coins of values v1 . . . vn, where n is even.
# We play a game against an opponent by alternating turns. In each turn,
# a player selects either the first or last coin from the row, removes it
# from the row permanently, and receives the value of the coin. Determine the
# maximum possible amount of money we can definitely win if we move first.
# Note: The opponent is as clever as the user.
# http://www.geeksforgeeks.org/dynamic-programming-set-31-optimal-strategy-for-a-game/
def find_max_val_recur(coins,l,r):
if l + 1 == r:
return max(coins[l],coins[r])
if l == r:
return coins[i]
left_choose = coins[l] + min(find_max_val_recur(coins,l+1,r - 1),find_max_val_recur(coins,l+2,r))
right_choose = coins[r] + min(find_max_val_recur(coins,l + 1,r-1),find_max_val_recur(coins,l,r-2))
return max(le | ft_choose,right_choose)
coin_map = {}
def find_max_val_memo(coins,l,r):
if l + 1 == r:
return max(coins[l],coins[r])
if l == r:
return coins[i]
if (l,r) in coin_map:
return coin_map[(l,r)]
left_choose = coins[l] + min(find_max_val_memo(coins,l+1,r - 1),find_max_val_memo(coins,l+2,r))
right_choose = coins[r] + min(find_max_val_memo(coins,l + 1,r-1),find_max_val_memo(c | oins,l,r-2))
max_val = max(left_choose,right_choose)
coin_map[(l,r)] = max_val
return max_val
def find_max_val_bottom_up(coins):
coins_len = len(coins)
table = [[0] * coins_len for i in range(coins_len + 1)]
for gap in range(coins_len):
i = 0
for j in range(gap,coins_len):
# Here x is value of F(i+2, j), y is F(i+1, j-1) and
# z is F(i, j-2) in above recursive formula
x = table[i+2][j] if (i+2) <= j else 0
y = table[i+1][j-1] if (i+1) <= (j-1) else 0
z = table[i][j-2] if i <= (j-2) else 0
table[i][j] = max(coins[i] + min(x,y),coins[j] + min(y,z))
i += 1
return table[0][coins_len - 1]
if __name__=="__main__":
coins = [8,15,3,7]
print(find_max_val_bottom_up(coins))
|
# Yith Library Server is a password storage server.
# Copyright (C) 2012-2013 Yaco Sistemas
# Copyright (C) 2012-2013 Alejandro Blanco Escudero <alejandro.b.e@gmail.com>
# Copyright (C) 2012-2015 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import logging
from deform import Button, Form, ValidationFailure
from pyramid.i18n import get_locale_name
from pyramid.httpexceptions import HTTPFound
from pyramid.renderers import render_to_response
from pyramid.view import view_config
from yithlibraryserver.email import send_email_to_admins
from yithlibraryserver.i18n import TranslationString as _
from yithlibraryserver.schemas import ContactSchema
log = logging.getLogger(__name__)
@view_config(route_name='home', renderer='templates/home.pt')
def home(request):
return {}
@view_config(route_name='contact', renderer='templates/contact.pt')
def contact(request):
button1 = Button('submit', _('Send message'))
button1.css_class = 'btn-primary'
button2 = Button('cancel', _('Cancel'))
button2.css_class = 'btn-default'
form = Form(ContactSchema(), buttons=(button1, button2))
if 'submit' in request.POST:
controls = request.POST.items()
try:
appstruct = form.validate(controls)
except ValidationFailure as e:
return {'form': e.render()}
context = {'link': request.route_url('contact')}
context.update(appstruct)
subject = ("%s sent a message from Yith's contact form"
% appstruct['name'])
result = send_email_to_admins(
request,
'yithlibraryserver:templates/email_contact',
context,
subject,
extra_headers={'Reply-To': appstruct['email']},
)
if result is None:
log.error(
'%s <%s> tried to send a message from the contact form but no '
'admin emails were configured. Message: %s' % (
appstruct['name'],
appstruct['email'],
appstruct['message'],
)
| )
| request.session.flash(
_('Thank you very much for sharing your opinion'),
'info',
)
return HTTPFound(location=request.route_path('home'))
elif 'cancel' in request.POST:
return HTTPFound(location=request.route_path('home'))
initial = {}
if request.user is not None:
initial['name'] = request.user.first_name
if request.user.email_verified:
initial['email'] = request.user.email
return {'form': form.render(initial)}
@view_config(route_name='tos', renderer='templates/tos.pt')
def tos(request):
return {}
@view_config(route_name='faq', renderer='string')
def faq(request):
# We don't want to mess up the gettext .po file
# with a lot of strings which don't belong to the
# application interface.
#
# We consider the FAQ as application content
# so we simple use a different template for each
# language. When a new locale is added to the
# application it needs to translate the .po files
# as well as this template
locale_name = get_locale_name(request)
template = 'yithlibraryserver:templates/faq-%s.pt' % locale_name
return render_to_response(template, {}, request=request)
@view_config(route_name='credits', renderer='templates/credits.pt')
def credits(request):
return {}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2019-03-05 14:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('database', '0020_daymetdata'),
]
operations = [
migrations.CreateModel(
name='PublicProjects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('List', models.TextField(blank=True)),
],
),
migrations.Remo | veField(
model_name='daymetdata',
name='id',
),
migrations.AddField(
model_name='userprofile',
name='privateProjectList',
field=models.TextField(blank=True),
),
migrations.AlterField(
| model_name='daymetdata',
name='user',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional
import attr
import papermill as pm
from airflow.lineage.entities import File
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
@attr.s(auto_attribs=True)
class NoteBook(File):
"""
Jupyter notebook
"""
type_hint: Optional[str] = "jupyter_notebook"
parameters: Optional[Dict] = {}
meta_schema: str = __name__ + '.NoteBook'
class PapermillOperator(BaseOperator):
"""
Executes a jupyter notebook through papermill that is annotated with parameters
:param input_nb: input notebook (can also be a NoteBook or a File inlet)
:type input_nb: str
:param output_nb: output notebook (can also be a NoteBook or File outlet)
:type output_nb: str
:param parame | ters: the notebook parameters to set
:type parameters: dict
"""
supports_lineage = True
@apply_defaults
def __init__(self,
input_nb: Optional[str] = None,
output_nb: Optional[str] = None,
paramet | ers: Optional[Dict] = None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if input_nb:
self.inlets.append(NoteBook(url=input_nb,
parameters=parameters))
if output_nb:
self.outlets.append(NoteBook(url=output_nb))
def execute(self, context):
if not self.inlets or not self.outlets:
raise ValueError("Input notebook or output notebook is not specified")
for i in range(len(self.inlets)):
pm.execute_notebook(self.inlets[i].url, self.outlets[i].url,
parameters=self.inlets[i].parameters,
progress_bar=False, report_mode=True)
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import sys
import threading
from setproctitle import setproctitle as set_process_title
from pants.goal.run_tracker import RunTracker
from pants.logging.setup import setup_logging
from pants.pantsd.process_manager import ProcessManager
from pants.pantsd.util import clean_global_runtime_state
class _StreamLogger(object):
"""A sys.{stdout,stderr} replacement that pipes output to a logger."""
def __init__(self, logger, log_level):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
"""
self._logger = logger
self._log_level = log_level
def write(self, msg):
for line in msg.rstrip().splitlines():
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
class PantsDaemon(ProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = 'pantsd.log'
class StartupFailure(Exception): pass
class RuntimeFailure(Exception): pass
def __init__(self, build_root, work_dir, log_level, log_dir=None, services=None,
metadata_base_dir=None):
"""
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param int log_level: The log level to use for daemon logging.
:param string log_dir: The directory to use for file-based logging via the daemon. (Optional)
:param tuple services: A tuple of PantsService instances to launch/manage. (Op | tional)
"""
super(PantsDaemon, self).__ | init__(name='pantsd', metadata_base_dir=metadata_base_dir)
self._logger = logging.getLogger(__name__)
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._log_dir = log_dir or os.path.join(work_dir, self.name)
self._services = services or ()
self._socket_map = {}
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
@property
def is_killed(self):
return self._kill_switch.is_set()
def set_services(self, services):
self._services = services
def set_socket_map(self, socket_map):
self._socket_map = socket_map
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
for service, service_thread in service_thread_map.items():
self._logger.info('terminating pantsd service: {}'.format(service))
service.terminate()
service_thread.join()
self._logger.info('terminating pantsd')
self._kill_switch.set()
@staticmethod
def _close_fds():
"""Close pre-fork stdio streams to avoid output in the pants process that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
def _setup_logging(self, log_level):
"""Reinitialize logging post-fork to clear all handlers, file descriptors, locks etc.
This must happen first thing post-fork, before any further logging is emitted.
"""
# Re-initialize the childs logging locks post-fork to avoid potential deadlocks if pre-fork
# threads have any locks acquired at the time of fork.
logging._lock = threading.RLock() if logging.thread else None
for handler in logging.getLogger().handlers:
handler.createLock()
# Invoke a global teardown for all logging handlers created before now.
logging.shutdown()
# Reinitialize logging for the daemon context.
setup_logging(log_level, console_stream=None, log_dir=self._log_dir, log_name=self.LOG_NAME)
# Close out pre-fork file descriptors.
self._close_fds()
# Redirect stdio to the root logger.
sys.stdout = _StreamLogger(logging.getLogger(), logging.INFO)
sys.stderr = _StreamLogger(logging.getLogger(), logging.WARN)
self._logger.debug('logging initialized')
def _setup_services(self, services):
for service in services:
self._logger.info('setting up service {}'.format(service))
service.setup()
def _run_services(self, services):
"""Service runner main loop."""
if not services:
self._logger.critical('no services to run, bailing!')
return
service_thread_map = {service: threading.Thread(target=service.run) for service in services}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info('starting service {}'.format(service))
try:
service_thread.start()
except (RuntimeError, service.ServiceError):
self.shutdown(service_thread_map)
raise self.StartupFailure('service {} failed to start, shutting down!'.format(service))
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise self.RuntimeFailure('service failure for {}, shutting down!'.format(service))
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def _run(self):
"""Synchronously run pantsd."""
# Switch log output to the daemon's log stream from here forward.
self._setup_logging(self._log_level)
self._logger.info('pantsd starting, log level is {}'.format(self._log_level))
# Purge as much state as possible from the pants run that launched us.
clean_global_runtime_state()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title('pantsd [{}]'.format(self._build_root))
# Write service socket information to .pids.
self._write_named_sockets(self._socket_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def pre_fork(self):
"""Pre-fork() callback for ProcessManager.daemonize()."""
# Teardown the RunTracker's SubprocPool pre-fork.
RunTracker.global_instance().shutdown_worker_pool()
# TODO(kwlzn): This currently aborts tracking of the remainder of the pants run that launched
# pantsd.
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemonize()."""
self._run()
|
from ethereum.slogging import get_logger
log = get_logger('eth.block_creation')
from ethereum.block import Block, BlockHeader
from ethereum.common import mk_block_from_prevstate, validate_header, \
verify_execution_results, validate_transaction_tree, \
set_execution_results, add_transactions, post_finalize
from ethereum.consensus_strategy import get_consensus_strategy
from ethereum.messages import apply_transaction
from ethereum.state import State
from ethereum.utils import sha3, encode_hex
import rlp
# Applies the block-level state transition function
def apply_block(state, block):
# Pre-processing and verification
snapshot = state.snapshot()
cs = get_consensus_strategy(state.config)
try:
# Start a new block context
cs.initialize(state, block)
# Basic validation
assert validate_header(state, block.header)
assert cs.check_seal(state, block.header)
assert cs.validate_uncles(state, block)
assert validate_transaction_tree(state, block)
# Process transactions
for tx in block.transactions:
apply_transaction(state, tx)
# Finalize (incl paying block rewards)
cs.finalize(state, block)
# Verify state root, tx list root, receipt root
assert verify_execution_results(state, block)
# Post-finalize (ie. add the block header to the state for now)
post_finalize(state, block)
except (ValueError, AssertionError) as e:
state.revert(snapshot)
raise e
return state
# Creates a candidate block on top of the given chain
def make_head_candidate(chain, txqueue=None,
parent=None,
timestamp=None,
coinbase='\x35'*20,
extra_data='moo ha ha says the laughing cow.',
min_gasprice=0):
log.info('Creating head candidate')
if parent | is None:
temp_state = State.from_snapshot(chain.state.to_snapshot(root_only=True | ), chain.env)
else:
temp_state = chain.mk_poststate_of_blockhash(parent.hash)
cs = get_consensus_strategy(chain.env.config)
# Initialize a block with the given parent and variables
blk = mk_block_from_prevstate(chain, temp_state, timestamp, coinbase, extra_data)
# Find and set the uncles
blk.uncles = cs.get_uncles(chain, temp_state)
blk.header.uncles_hash = sha3(rlp.encode(blk.uncles))
# Call the initialize state transition function
cs.initialize(temp_state, blk)
# Add transactions
add_transactions(temp_state, blk, txqueue, min_gasprice)
# Call the finalize state transition function
cs.finalize(temp_state, blk)
# Set state root, receipt root, etc
set_execution_results(temp_state, blk)
log.info('Created head candidate successfully')
return blk, temp_state
|
from django import forms
from .models import PassType, Registration
class SignupForm(forms.ModelForm):
pass_type = forms.ModelChoiceField(
queryset=PassType.objects.filter(active=True),
widget=forms.widgets.RadioSelect(),
)
class Meta:
model = Registration
fields = (
"first_name",
"last_name",
"email",
"residing_country",
"dance_role",
"pass_type",
"workshop_partner_name",
"workshop_partner_email",
"lunch",
)
widgets = {
"dance_role": forms.widgets.RadioSelect(),
"lunch": forms.widgets.RadioSelect(),
}
class Media:
css = {"all": ("css/forms.css",)}
email_repeat = forms.EmailField()
agree_to_terms = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields["pass_type"].empty_label = None
self.fields["lunch"].empty_label = None
def clean_workshop_partner_email(self):
"""
Take care of uniquenes | s constraint ourselves
"""
email = self.cleaned_data.get("workshop_partner_email")
qs = Registration.objects.filter(workshop_partner_email=email).exists()
if email and qs:
raise forms.ValidationError("Workshop parter already taken.")
return email
def clean_agree_to_terms(self):
data = self.cleaned_data["agree_to_terms"]
if data is False:
raise forms.ValidationError("You must agree to the terms.")
re | turn data
def clean(self):
cleaned_data = super().clean()
email = cleaned_data.get("email")
email_repeat = cleaned_data.get("email_repeat")
ws_partner_email = cleaned_data.get("workshop_partner_email")
if email != email_repeat:
raise forms.ValidationError("Ensure email verfication matches.")
if email and ws_partner_email and email == ws_partner_email:
raise forms.ValidationError("You can't partner with yourself.")
|
# -*- coding: utf-8 -*-
{
'name': "Better validation for Attendance",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "Jörn Mankiewicz",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master | /openerp/addons/base/mod | ule/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '8.0.0.1',
# any module necessary for this one to work correctly
'depends': ['base','hr_attendance','hr_timesheet_improvement'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/hr_attendance.xml',
],
# only loaded in demonstration mode
'demo': [
'demo.xml',
],
}
|
import sys
import networkx as nx
def main(graphml):
g = nx.read_graphml(graphml)
| nx.write_graphml(g, graphml)
if __name__ == '__main__':
main(sys.argv[1])
| |
import numpy as np
# Example taken from : http://cs231n.github.io/python-numpy-tutorial/#nump | y
x = np.array([[1,2],[3,4]])
print np.sum(x) # Compute sum of all elements; prints "10"
print np.sum(x, axis=0) # Compute sum of each | column; prints "[4 6]"
print np.sum(x, axis=1) # Compute sum of each row; prints "[3 7]"
|
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import cairo
import draw
def render(writer, res, options, filename):
handlers = {
"png": (lambda w, h: cairo.Ima | geSurface(cairo.FORMAT_ARGB32, w, h), \
lambda sfc: sfc.write_to_png(filename)),
"pdf": (lambda w, h: cairo.PDFSurface(filename, w, h), lambda sfc: 0),
"svg": (lambda w, h: cairo.S | VGSurface(filename, w, h), lambda sfc: 0)
}
if options.format is None:
fmt = filename.rsplit('.', 1)[1]
else:
fmt = options.format
if not (fmt in handlers):
writer.error ("Unknown format '%s'." % fmt)
return 10
make_surface, write_surface = handlers[fmt]
(w, h) = draw.extents (1.0, *res)
w = max (w, draw.MIN_IMG_W)
surface = make_surface (w, h)
ctx = cairo.Context (surface)
draw.render (ctx, options, 1.0, *res)
write_surface (surface)
writer.status ("bootchart written to '%s'" % filename)
|
#!/usr/bin/env python
import httplib
try:
import simplejson as json
except ImportError:
import json
import os
import sys
from urlparse import urljoin
try:
import requests
except ImportError:
raise ImportError('Missing dependency "requests". Do ``pip install requests``.')
try:
import yaml
except ImportError:
raise ImportError(' | Missing dependency "pyyaml". Do ``pip install pyyaml``.')
# ST2 configuration
ST2_CONFIG_FILE = './config.yaml'
ST2_API_BASE_URL = 'http://localhost:9101/v1'
ST2_AUTH_BASE_URL = 'http://localhost:9100'
ST2_USERNAME = None
ST2_PASSWORD = None |
ST2_AUTH_TOKEN = None
ST2_AUTH_PATH = 'tokens'
ST2_WEBHOOKS_PATH = 'webhooks/st2/'
ST2_TRIGGERS_PATH = 'triggertypes/'
ST2_TRIGGERTYPE_PACK = 'sensu'
ST2_TRIGGERTYPE_NAME = 'event_handler'
ST2_TRIGGERTYPE_REF = '.'.join([ST2_TRIGGERTYPE_PACK, ST2_TRIGGERTYPE_NAME])
REGISTERED_WITH_ST2 = False
OK_CODES = [httplib.OK, httplib.CREATED, httplib.ACCEPTED, httplib.CONFLICT]
def _create_trigger_type():
try:
url = _get_st2_triggers_url()
payload = {
'name': ST2_TRIGGERTYPE_NAME,
'pack': ST2_TRIGGERTYPE_PACK,
'description': 'Trigger type for sensu event handler.'
}
# sys.stdout.write('POST: %s: Body: %s\n' % (url, payload))
headers = {}
headers['Content-Type'] = 'application/json; charset=utf-8'
if ST2_AUTH_TOKEN:
headers['X-Auth-Token'] = ST2_AUTH_TOKEN
post_resp = requests.post(url, data=json.dumps(payload), headers=headers)
except:
sys.stderr.write('Unable to register trigger type with st2.')
raise
else:
status = post_resp.status_code
if status not in OK_CODES:
sys.stderr.write('Failed to register trigger type with st2. HTTP_CODE: %d\n' %
status)
raise
else:
sys.stdout.write('Registered trigger type with st2.\n')
def _get_auth_url():
return urljoin(ST2_AUTH_BASE_URL, ST2_AUTH_PATH)
def _get_auth_token():
global ST2_AUTH_TOKEN
auth_url = _get_auth_url()
try:
resp = requests.post(auth_url, json.dumps({'ttl': 5 * 60}),
auth=(ST2_USERNAME, ST2_PASSWORD))
except:
raise Exception('Cannot get auth token from st2. Will try unauthed.')
else:
ST2_AUTH_TOKEN = resp.json()['token']
def _register_with_st2():
global REGISTERED_WITH_ST2
try:
url = urljoin(_get_st2_triggers_url(), ST2_TRIGGERTYPE_REF)
# sys.stdout.write('GET: %s\n' % url)
if not ST2_AUTH_TOKEN:
_get_auth_token()
if ST2_AUTH_TOKEN:
get_resp = requests.get(url, headers={'X-Auth-Token': ST2_AUTH_TOKEN})
else:
get_resp = requests.get(url)
if get_resp.status_code != httplib.OK:
_create_trigger_type()
else:
body = json.loads(get_resp.text)
if len(body) == 0:
_create_trigger_type()
except:
raise
else:
REGISTERED_WITH_ST2 = True
def _get_st2_triggers_url():
url = urljoin(ST2_API_BASE_URL, ST2_TRIGGERS_PATH)
return url
def _get_st2_webhooks_url():
url = urljoin(ST2_API_BASE_URL, ST2_WEBHOOKS_PATH)
return url
def _post_event_to_st2(url, body):
headers = {}
headers['X-ST2-Integration'] = 'sensu.'
headers['Content-Type'] = 'application/json; charset=utf-8'
if ST2_AUTH_TOKEN:
headers['X-Auth-Token'] = ST2_AUTH_TOKEN
try:
sys.stdout.write('POST: url: %s, body: %s\n' % (url, body))
r = requests.post(url, data=json.dumps(body), headers=headers)
except:
sys.stderr.write('Cannot connect to st2 endpoint.')
else:
status = r.status_code
if status not in OK_CODES:
sys.stderr.write('Failed posting sensu event to st2. HTTP_CODE: %d\n' % status)
else:
sys.stdout.write('Sent sensu event to st2. HTTP_CODE: %d\n' % status)
def main(args):
body = {}
body['trigger'] = ST2_TRIGGERTYPE_REF
body['payload'] = json.loads(sys.stdin.read().strip())
_post_event_to_st2(_get_st2_webhooks_url(), body)
if __name__ == '__main__':
try:
if not os.path.exists(ST2_CONFIG_FILE):
sys.stderr.write('Configuration file not found. Exiting.\n')
sys.exit(1)
with open(ST2_CONFIG_FILE) as f:
config = yaml.safe_load(f)
ST2_USERNAME = config['st2_username']
ST2_PASSWORD = config['st2_password']
ST2_API_BASE_URL = config['st2_api_base_url']
ST2_AUTH_BASE_URL = config['st2_auth_base_url']
if not REGISTERED_WITH_ST2:
_register_with_st2()
except:
sys.stderr.write('Failed registering with st2. Won\'t post event.\n')
else:
main(sys.argv)
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# W | ITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
from nose.tools import eq_
from ryu.ofproto.ofproto_common import *
LOG = logging.getLogger('test_ofproto_common')
class TestOfprotCommon(unittest.TestCase):
""" Test case for ofprotp_common
"""
def t | est_struct_ofp_header(self):
eq_(OFP_HEADER_PACK_STR, '!BBHI')
eq_(OFP_HEADER_SIZE, 8)
def test_define_constants(self):
eq_(OFP_TCP_PORT, 6633)
eq_(OFP_SSL_PORT, 6633)
|
tro:
hijos = ComposicionFamiliar.objects.filter(persona = obj.productor,familia = '3').count()
lista_hijos.append(hijos)
hijas = ComposicionFamiliar.objects.filter(persona = obj.productor,familia = '4').count()
lista_hijas.append(hijas)
sumatoria = hijos + hijas
lista_sumatoria.append(sumatoria)
result = []
#promedio,mediana,desviacion standard, minimo y maximo
promedios = [np.mean(lista_hijos),np.mean(lista_hijas),np.mean(lista_sumatoria)]
mediana = [np.median(lista_hijos),np.median(lista_hijas),np.median(lista_sumatoria)]
desviacion = [np.std(lista_hijos),np.std(lista_hijas),np.std(lista_sumatoria)]
minimo = [min(lista_hijos),min(lista_hijas),min(lista_sumatoria)]
maximo = [max(lista_hijos),max(lista_hijas),max(lista_sumatoria)]
# agregando a la lista
result.append(promedios)
result.append(mediana)
result.append(desviacion)
result.append(minimo)
result.append(maximo)
#grafico nivel educativo de los padres en las familias
ESCOLARIDAD_CHOICES = (
(1,'Ninguno'),(2,'Primaria Incompleta'),(3,'Primaria'),
(4,'Secundaria Incompleta'),(5,'Secundaria'),(6,'Técnico'),
(7,'Universitario'),(8,'Profesional'))
escolaridad = collections.OrderedDict()
for obj in ESCOLARIDAD_CHOICES:
madre = filtro.filter(productor__composicionfamiliar__familia = '2',
productor__composicionfamiliar__escolaridad = obj[0]).distinct('productor__composicionfamiliar').count()
padre = filtro.filter(productor__composicionfamiliar__familia = '1',
productor__composicionfamiliar__escolaridad = obj[0]).distinct('productor__composicionfamiliar').count()
#hijos--------------------
hijos_5_12 = filtro.filter(productor__composicionfamiliar__familia = '3',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (5,12)).distinct('productor__composicionfamiliar').count()
hijos_13_18 = filtro.filter(productor__composicionfamiliar__familia = '3',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (13,18)).distinct('productor__composicionfamiliar').count()
hijos_19 = filtro.filter(productor__composicionfamiliar__familia = '3',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (19,100)).distinct('productor__composicionfamiliar').count()
#hijas--------------------
hijas_5_12 = filtro.filter(productor__composicionfamiliar__familia = '4',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (5,12)).distinct('productor__composicionfamiliar').count()
hijas_13_18 = filtro.filter(productor__composicionfamiliar__familia = '4',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (13,18)).distinct('productor__composicionfamiliar').count()
hijas_19 = filtro.filter(productor__composicionfamiliar__familia = '4',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (19,100)).distinct('productor__composicionfamiliar').count()
escolaridad[obj[1]] = (madre,padre,
hijos_5_12,hijos_13_18,hijos_19,
hijas_5_12,hijas_13_18,hijas_19)
#--------------------------------------------------------------------------------
SI_NO_CHOICES = ((1,'Si'),(2,'No'))
FAMILIA_CHOICES = ((1,'Padre'),(2,'Madre'),(3,'Hijo'),(4,'Hija'),(5,'Hermano'),
(6,'Hermana'),(7,'Sobrino'),(8,'Sobrina'),(9,'Abuelo'),
(10,'Abuela'),(11,'Cuñado'),(12,'Cuñada'),(13,'Yerno'),
(14,'Nuera'),(15,'Otro'),)
list_participacion = []
for obj in FAMILIA_CHOICES:
total = filtro.filter(productor__composicionfamiliar__familia = obj[0]).distinct(
'productor__composicionfamiliar').count()
si_participa = filtro.filter(productor__composicionfamiliar__familia = obj[0],
productor__composicionfamiliar__participacion = '1').distinct(
'productor__composicionfamiliar').count()
promedio = total / float(productores)
promedio = round(promedio, 2)
list_participacion.append((obj[1],saca_porcentajes(si_participa,total,False),promedio))
return render(request, template, locals())
def georeferencia(request,template="granos_basicos/monitoreos/georeferencia.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
lista_mapa = filtro.values('nombre_parcela','latitud','longitud')
mapa = []
fo | r obj in lista_mapa:
if obj['latitud'] != None and obj['longitud'] != None:
mapa.append((obj['nombre_ | parcela'],obj['latitud'],obj['longitud']))
return render(request, template, locals())
def caracteristicas_parcela(request,template="granos_basicos/monitoreos/caracteristicas_parcela.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
lista_parcela = []
lista_inclinado = []
lista_plano = []
#edad parcela y profundidad capa arable
parcela = filtro.values('edad_parcela','profundidad_capa')
for obj in parcela:
if obj['edad_parcela'] != None and obj['profundidad_capa'] != None:
lista_parcela.append((obj['edad_parcela'],obj['profundidad_capa']))
#edad de las parcelas
menor_5 = filtro.filter(edad_parcela__range = (0,5)).count()
edad_6_20 = filtro.filter(edad_parcela__range = (5.1,20)).count()
mayor_20 = filtro.filter(edad_parcela__range = (20.1,100)).count()
for obj in filtro:
# % area inclinado > 60%
area = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '1').values_list('inclinado',flat = True)
for x in area:
if x >= 60:
inclinado = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '2').values_list('inclinado','monitoreo__profundidad_capa')
lista_inclinado.append(inclinado)
# % area plano > 60%
area1 = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '1').values_list('plano',flat = True)
for y in area1:
if y >= 60:
plano = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '2').values_list('plano','monitoreo__profundidad_capa')
lista_plano.append(plano)
#acceso agua
SI_NO_CHOICES = ((1,'Si'),(2,'No'))
acceso_agua = {}
conteo_si = 0
for obj in SI_NO_CHOICES:
conteo = filtro.filter(acceso_agua = obj[0]).count()
acceso_agua[obj[1]] = conteo
#fuente agua
fuente_agua = {}
conteo_si = filtro.filter(acceso_agua = 1).count()
for obj in ACCESO_AGUA_CHOICES:
conteo = filtro.filter(fuente_agua__icontains = obj[0]).count()
fuente_agua[obj[1]] = saca_porcentajes(conteo,conteo_si,False)
return render(request, template, locals())
def ciclo_productivo(request,template="granos_basicos/monitoreos/ciclo_productivo.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
#siembra
fecha_siembra = filtro.values_list('datosmonitoreo__fecha_siembra',flat = True)
lista_siembra = []
for obj in fecha_siembra:
if obj != None:
x = obj.isocalendar()[1]
lista_siembra.append(x)
l_siembra = sorted(lista_siembra)
dic_siembra = collections.OrderedDict()
for v in l_siembra:
count = l_siembra.count(v)
dic_siembra[v] = count
#cosecha
fecha_cosecha = filtro.values_list('datosmonitoreo__fecha_cosecha',flat = True)
lista_cosecha = []
for obj in fecha_cosecha:
if obj != None:
x = obj.isocalendar()[1]
lista_cosecha.append(x)
l_cosecha = sorted(lista_cosecha)
dic_cosecha = collections.OrderedDict()
for v in l_cosecha:
count = l_cosecha.count(v)
dic_cosecha[v] = count
return render(request, template, locals())
def uso_suelo(request,template="granos_basicos/monitoreos/uso_suelo.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
USO_SUELO_CHOICES = ((1,'Área Total'),(2,'Cultivos Anuales (GB)'),(3,'Cultivos perennes'),
(4,'Tacotales'),(5,'Potreros'),(6,'Pasto de Corte'))
total = filtro.filter(productor__usosuelo__uso = '1').aggregate(total = Sum('productor__usosuelo__cantidad'))['total']
uso_suelo = collections.OrderedDict()
for obj in USO_SUELO_CHOICES:
#tabla 1
familias |
Filter.current().blacklist_ips
assert u'1.1.0.0' in cblacklist
assert u'1.1.0.1' in cblacklist
assert u'1.1.1.0' in cblacklist
assert u'1.2.0.0' not in cblacklist
class RestrictedCourseTest(CacheIsolationTestCase):
"""Test RestrictedCourse model. """
ENABLED_CACHES = ['default']
def test_unicode_values(self):
course_id = CourseLocator('abc', '123', 'doremi')
restricted_course = RestrictedCourse.objects.create(course_key=course_id)
assert six.text_type(restricted_course) == six.text_type(course_id)
def test_restricted_course_cache_with_save_delete(self):
course_id = CourseLocator('abc', '123', 'doremi')
RestrictedCourse.objects.create(course_key=course_id)
# Warm the cache
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(course_id)
RestrictedCourse.is_disabled_access_check(course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(course_id)
RestrictedCourse.is_disabled_access_check(course_id)
assert not RestrictedCourse.is_disabled_access_check(course_id)
# add new the course so the cache must get delete and again hit the db
new_course_id = CourseLocator('def', '123', 'doremi')
RestrictedCourse.objects.create(course_key=new_course_id, disable_access_check=True)
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(new_course_id)
RestrictedCourse.is_disabled_access_check(new_course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(new_course_id)
RestrictedCourse.is_disabled_access_check(new_course_id)
assert RestrictedCourse.is_disabled_access_check(new_course_id)
# deleting an object will delete cache also.and hit db on
# get the is_restricted course
abc = RestrictedCourse.objects.get(course_key=new_course_id)
abc.delete()
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(new_course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(new_course_id)
class CountryTest(TestCase):
"""Test Country model. """
def test_unicode_values(self):
country = Country.objects.create(country='NZ')
assert six.text_type(country) == 'New Zealand (NZ)'
class CountryAccessRuleTest(CacheIsolationTestCase):
"""Test CountryAccessRule model. """
ENABLED_CACHES = ['default']
def test_unicode_values(self):
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
access_rule = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.WHITELIST_RULE,
country=country
)
assert six.text_type(access_rule) == u'Whitelist New Zealand (NZ) for {course_key}'.format(course_key=course_id)
course_id = CourseLocator('def', '123', 'doremi')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
access_rule = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.BLACKLIST_RULE,
country=country
)
assert six.text_type(access_rule) == u'Blacklist New Zealand (NZ) for {course_key}'.format(course_key=course_id)
def test_unique_together_constraint(self):
"""
Course with specific country can be added either as whitelist or blacklist
trying to add with both types will raise error
"""
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.obj | ects.create(course_key=course_id)
CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=C | ountryAccessRule.WHITELIST_RULE,
country=country
)
with pytest.raises(IntegrityError):
CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.BLACKLIST_RULE,
country=country
)
def test_country_access_list_cache_with_save_delete(self):
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
course = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.WHITELIST_RULE,
country=country
)
# Warm the cache
with self.assertNumQueries(1):
CountryAccessRule.check_country_access(course_id, 'NZ')
with self.assertNumQueries(0):
CountryAccessRule.check_country_access(course_id, 'NZ')
# Deleting an object will invalidate the cache
course.delete()
with self.assertNumQueries(1):
CountryAccessRule.check_country_access(course_id, 'NZ')
class CourseAccessRuleHistoryTest(TestCase):
"""Test course access rule history. """
def setUp(self):
super(CourseAccessRuleHistoryTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.course_key = CourseLocator('edx', 'DemoX', 'Demo_Course')
self.restricted_course = RestrictedCourse.objects.create(course_key=self.course_key)
self.countries = {
'US': Country.objects.create(country='US'),
'AU': Country.objects.create(country='AU')
}
def test_course_access_history_no_rules(self):
self._assert_history([])
self.restricted_course.delete()
self._assert_history_deleted()
def test_course_access_history_with_rules(self):
# Add one rule
us_rule = CountryAccessRule.objects.create(
restricted_course=self.restricted_course,
country=self.countries['US'],
rule_type=CountryAccessRule.WHITELIST_RULE
)
self._assert_history([('US', 'whitelist')])
# Add another rule
au_rule = CountryAccessRule.objects.create(
restricted_course=self.restricted_course,
country=self.countries['AU'],
rule_type=CountryAccessRule.BLACKLIST_RULE
)
self._assert_history([
('US', 'whitelist'),
('AU', 'blacklist')
])
# Delete the first rule
us_rule.delete()
self._assert_history([('AU', 'blacklist')])
# Delete the second rule
au_rule.delete()
self._assert_history([])
def test_course_access_history_delete_all(self):
# Create a rule
CountryAccessRule.objects.create(
restricted_course=self.restricted_course,
country=self.countries['US'],
rule_type=CountryAccessRule.WHITELIST_RULE
)
# Delete the course (and, implicitly, all the rules)
self.restricted_course.delete()
self._assert_history_deleted()
def test_course_access_history_change_message(self):
# Change the message key
self.restricted_course.enroll_msg_key = 'embargo'
self.restricted_course.access_msg_key = 'embargo'
self.restricted_course.save()
# Expect a history entry with the changed keys
self._assert_history([], enroll_msg='embargo', access_msg='embargo')
def _assert_history(self, country_rules, enroll_msg='default', access_msg='default'):
"""Check the latest history entry.
Arguments:
country_rules (list): List of rules, each of which are tuples
of the form `(country_code, rule_type)`.
Keyword Arguments:
enroll_msg (str): |
import pyaf.Bench.TS_datasets as tsds
impor | t tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, | transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0); |
#!/usr/bin/python3
#import nltk
#import pattern.en
from nltk import word_tokenize
from nltk import pos_tag
from nltk.corpus import wordnet
#import nltk.fuf.linearizer
from nltk.stem.wordnet import WordNetLemmatizer as wnl
from re import sub
import string
import random
from .genderPredictor import genderPredictor
#nltk.download()
# nltk downloads: maxent_ne_chunker, maxent_treebank_pos_tagger, punkt, wordnet
# install numpy
# WordNetLemmatizer().lemmatize(word,'v')
class Extrapolate:
def __init__(self):
self.sent_syns = []
print("Setting up Gender Predictor: ")
self.gp = genderPredictor.genderPredictor()
accuracy = self.gp.trainAndTest()
# print("Accuracy:", accuracy)
# print ('Most Informative Features')
feats = self.gp.getMostInformativeFeatures(10)
# for feat in feats:
# print (feat)
def change_gender(self, pnoun, gender):
pnlist = [(("her", "F"), ("him", "M")),
(("she", "F"), ("he", "M")),
(("hers", "F"), ("his", "M")),
(("herself", "F"), ("himself", "M"))]
for pair in pnlist:
for pi, p in enumerate(pair):
if p[0] == pnoun and p[1] != gender:
return pair[(pi-1)%2][0]
else:
return pnoun
def find_synonyms(self, w, wpos):
syn_words = []
synsets = wordnet.synsets(w, pos=wpos)
for s in synsets:
for l in s.lemmas():
syn_words.append(l.name())
return syn_words
def replace_synonyms(self, o_sent, n_sent):
o_tagged = pos_tag(word_tokenize(o_sent))
n_tagged = pos_tag(word_tokenize(n_sent))
for n in n_tagged:
for sdx, syn_list in enumerate(self.sent_syns):
for syn in syn_list:
if (n[0] == syn):
n_sent = sub(r"\b%s\b" %n[0], o_tagged[sdx][0], n_sent)
return n_sent
def replace_proper_nouns(self, o_sent, n_sent):
proper_nouns = []
p_pnouns = []
o_tagged = pos_tag(word_tokenize(o_sent))
n_tagged = pos_tag(word_tokenize(n_sent))
# print("\nTransforming the output:")
# print("Input sentence:", o_sent)
# print("Found sentence:", n_sent)
# print("Input sentence tagged:", o_tagged)
# print("Found sentence tagged:", n_tagged)
for o in o_tagged:
if o[1] == 'NNP' and o not in proper_nouns:
proper_nouns.append(o)
for n in n_tagged:
if (n[1] == 'PRP' or n[1] == 'PRP$' or n[1] == 'NNP') and n not in p_pnouns:
p_pnouns.append(n)
# print("")
if (len(proper_nouns) == 1) and (len(p_pnouns) > 0):
n_sent = sub(r"\b%s\b" %p_pnouns[0][0] , proper_nouns[0][0], n_sent, 1)
gender = self.gp.classify(proper_nouns[0][0])
# print(proper_nouns[0][0], "is classified as", gender)
for pnoun in p_pnouns:
n_pnoun = self.change_gender(pnoun[0], gender)
n_sent = sub(r"\b%s\b" %pnoun[0] , n_pnoun, n_sent)
elif len(proper_nouns) < 1:
print("No proper nouns to replace")
else:
print("Not yet implemented, :P")
return n_sent
def transform(self, o_sent, n_sent):
n_sent = self.replace_proper_nouns(o_sent, n_sent)
n_sent = self.replace_synonyms(o_sent, n_sent)
return(n_sent)
def strip_pos_copy(self, tag):
new_tag = []
for item in tag:
new_tag.append(item[0])
return new_tag
def extrapolate(self, sent):
# tags the part of speech in each word
tagged = pos_tag(word_tokenize(sent))
tag_list = []
for item in tagged:
tag_list.append(list(item))
# puts nouns and verbs in their base form
for idx, item in enumerate(tag_list):
if item[1][0] == 'V':
tag_list[idx][0] = wnl().lemmatize(item[0],'v')
elif item[1] == 'NN' or item[1] == 'NNS':
tag_list[idx][0] = wnl().lemmatize(item[0],'n')
synonyms = [[] for i in range(len(tag_list))]
# finds synonyms for each wnoun, verb, adj in tag_list -> puts in corresponding index in synonyms
for idx, item in enumerate(tag_list):
if item[1][0] == 'V':
synonyms[idx] = self.find_synonyms(item[0], wordnet.VERB)
#for v in synonyms[idx]:
# v = en.verb.past(v)
elif item[1] == 'NN' or item[1] == 'NNS':
synonyms[idx] = self.find_synonyms(item[0], wordnet.NOUN)
elif item[1][0] == 'J':
synonyms[idx] = self.find_synonyms(item[0], wordnet.ADJ)
# gets rid of duplicates
for si, s in enumerate(synonyms):
s = list(set(s))
# print(tag_list[si][0], ": ", s)
self.sent_syns = synonyms
search_sent = []
# creates a list of similar sentences to search for
for idx, item in enumerate(tag_list):
# looks for synonyms at the corresponding index
for s in synonyms[idx]:
temp = sub(r"\b%s\b" %item[0], s, sent)
search_sent.append(temp)
# will get rid of duplicates once i make it hashable
search_sent = list(set(search_sent))
# print("\nSample list of synonymous sentences:")
# for i in range(min(len(search_sent), 20)):
# print(search_sent[i])
return search_sent
if __name__ == '__main__':
#list of pretend sentences to search through
sent_list = []
sent_list.append("She danced with the prince and they fall in love.")
sent_list.append("The emperor realized he was swindled but continues the parade anyway.")
sent_list.append("He and his wife were very poor.")
sent_list.append("She promised anything if he would get it for her. ")
sent_list.append("The bears came home and frightened her and she ran away.")
sent_list.append("They came upon a house made of sweets and they ate some. ")
sent_list.append("He climbed the beanstalk and found a giant there who had gold coins that he wanted. ")
sent_list.append("The rats follow him and he led them into the harbor and they die.")
sent_list.append("He begged to be spared and told him about his poor father.")
sent_list.append("The two were married and live happily everafter.")
sent_list.append("The good fairies made another spell so that she would only sleep for 100 years and a prince would awaken her. ")
sent_list.append("The stepmother ordered her to be killed but the huntsman spared her life.")
sent_list.append("The wolf fell into it and died.")
sent_list.append("A fairy granted her wish and gave her a seed to plant. ")
sent_list.append("He decided to run away and came across a cottage. ")
#instantiating extrapolate class, TAKES NOTHING
e = Extrapolate()
#expected input from storytellingbot
o_sent = "Elsa took a sharp sword to slay the monster"
print("\nInput:" + o_sent)
#o_sent = input("Enter a sentence: ")
search_sent = e.extrapolate(o_sent)
index = 6
#index = random.randint(0, len(se | nt_list)-1) |
print("\nTest index: "+ str(index+1))
#index = int(input("Enter a number between 1 and "+str(len(sent_list))+": "))-1
#print(sent_list[index])
output = e.transform(o_sent, sent_list[index])
print(output)
#this would be the post
|
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General | Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/l | icenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
# entry of test
def main():
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
# create qt quick application
checkedTargets, projectName = createNewQtQuickApplication(tempDir(), "SampleApp")
# build it - on all build configurations
availableConfigs = iterateBuildConfigs(len(checkedTargets))
if not availableConfigs:
test.fatal("Haven't found a suitable Qt version - leaving without building.")
for kit, config in availableConfigs:
selectBuildConfig(len(checkedTargets), kit, config)
# try to compile
test.log("Testing build configuration: " + config)
clickButton(waitForObject(":*Qt Creator.Build Project_Core::Internal::FancyToolButton"))
waitForCompile()
# check output if build successful
ensureChecked(waitForObject(":Qt Creator_CompileOutput_Core::Internal::OutputPaneToggleButton"))
waitFor("object.exists(':*Qt Creator.Cancel Build_QToolButton')", 20000)
cancelBuildButton = findObject(':*Qt Creator.Cancel Build_QToolButton')
waitFor("not cancelBuildButton.enabled", 30000)
compileOutput = waitForObject(":Qt Creator.Compile Output_Core::OutputWindow")
if not test.verify(compileSucceeded(compileOutput.plainText),
"Verifying building of simple qt quick application."):
test.log(compileOutput.plainText)
# exit qt creator
invokeMenuItem("File", "Exit")
|
from __future__ import print_function
import fileinput
from datetime import datetime
for line in fileinput.input():
line = line.rstrip()
# This condition removes CDX header lines
i | f line[0] is ' ':
continue
# Extract just the timestamp from line
timestamp = line.split(' ', 2)[1]
# Datetiem format in CDX is 20121125005312
date_object = datetime.strptime(timestamp, '%Y%m%d%H%M%S')
# print(date_object.s | trftime('%Y-%m-%d'))
print(date_object.strftime('%Y-%m-%d %H:%M:%S'))
|
from redbreast.core.utils import *
import pytest
from uliweb import manage, functions
import os
def test_import():
a = CommonUtils.get_class('redbreast.core.spec.TaskSpec')
assert str(a) == "<class 'redbreast.core.spec.task.TaskSpec'>"
def test_import_not_exist():
a = CommonUtils.get_class('redbreast.core.spec.NotExistSpec')
assert str(a) == 'None'
def test_import_error():
with pytest.raises(ImportError):
a = CommonUtils.get_class('not.exist.module.NotExistSpec')
class TestUtilInProject(object):
def setup(self):
locate_dir = os.path.dirname(__file__)
os.chdir(locate_dir)
os.chdir('test_project')
import shutil
shutil.rmtree('database.db', ignore_errors=True)
manage.call('uliweb syncdb')
manage.call('uliweb syncspec')
from uliweb.manage import make_simple_application
app = make_simple_application(apps_dir='./apps')
def teardown(self):
import shutil
shutil.rmtree('database.db', ignore_errors=True)
def test_import(self):
maps = {
'simple_task' : 'redbreast.core.spec.task.SimpleTask',
'join_task' : 'redbreast.core.spec.task.JoinTask',
'choice_task' : 'redbreast.core.spec.task.ChoiceTask',
'split_task' : 'redbreast.core.spec.task.SplitTask',
'multichocie_task' : 'redbreast.core | .spec.task.MultiChoiceTask',
'auto_simple_task' : 'redbreast.core.spec.task.AutoSimpleTask',
'auto_join_task' : 'redbreast.core.spec.task.AutoJoinTask',
'auto_choice_task' | : 'redbreast.core.spec.task.AutoChoiceTask',
'auto_split_task' : 'redbreast.core.spec.task.AutoSplitTask',
'auto_multichoice_task' :'redbreast.core.spec.task.AutoMultiChoiceTask',
}
for spec in maps:
a = CommonUtils.get_spec(spec)
assert str(a) == "<class '%s'>" % maps[spec]
spec1 = maps[spec].replace("redbreast.core.spec.task.", "")
b = CommonUtils.get_spec(spec1)
assert str(b) == "<class '%s'>" % maps[spec]
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import read_events, Epochs, pick_types, read_cov
from mne.chan | nels import read_layout
from mne.io import read_raw_fif
from mne.utils import slow_test, run_tests_if_main
from mne.viz.evoked import _butterfly_onselect, | plot_compare_evokeds
from mne.viz.utils import _fake_click
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.1
n_chan = 6
layout = read_layout('Vectorview-all')
def _get_raw():
"""Get raw data."""
return read_raw_fif(raw_fname, preload=False, add_eeg_ref=False)
def _get_events():
"""Get events."""
return read_events(event_name)
def _get_picks(raw):
"""Get picks."""
return pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
def _get_epochs():
"""Get epochs."""
raw = _get_raw()
raw.add_proj([], remove_existing=True)
events = _get_events()
picks = _get_picks(raw)
# Use a subset of channels for plotting speed
picks = picks[np.round(np.linspace(0, len(picks) - 1, n_chan)).astype(int)]
picks[0] = 2 # make sure we have a magnetometer
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), add_eeg_ref=False)
epochs.info['bads'] = [epochs.ch_names[-1]]
return epochs
def _get_epochs_delayed_ssp():
"""Get epochs with delayed SSP."""
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject,
add_eeg_ref=False)
return epochs_delayed_ssp
@slow_test
def test_plot_evoked():
"""Test plotting of evoked."""
import matplotlib.pyplot as plt
evoked = _get_epochs().average()
with warnings.catch_warnings(record=True):
fig = evoked.plot(proj=True, hline=[1], exclude=[], window_title='foo')
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded & spatial_colors & zorder
evoked.plot(exclude='bads')
evoked.plot(exclude=evoked.info['bads'], spatial_colors=True, gfp=True,
zorder='std')
# test selective updating of dict keys is working.
evoked.plot(hline=[1], units=dict(mag='femto foo'))
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
evoked_delayed_ssp.plot(proj='interactive')
evoked_delayed_ssp.apply_proj()
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
evoked_delayed_ssp.info['projs'] = []
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', axes='foo')
plt.close('all')
# test GFP only
evoked.plot(gfp='only')
assert_raises(ValueError, evoked.plot, gfp='foo')
evoked.plot_image(proj=True)
# plot with bad channels excluded
evoked.plot_image(exclude='bads', cmap='interactive')
evoked.plot_image(exclude=evoked.info['bads']) # does the same thing
plt.close('all')
evoked.plot_topo() # should auto-find layout
_butterfly_onselect(0, 200, ['mag', 'grad'], evoked)
plt.close('all')
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
evoked.plot_white(cov)
evoked.plot_white([cov, cov])
# plot_compare_evokeds: test condition contrast, CI, color assignment
plot_compare_evokeds(evoked.copy().pick_types(meg='mag'))
evoked.rename_channels({'MEG 2142': "MEG 1642"})
assert len(plot_compare_evokeds(evoked)) == 2
colors = dict(red='r', blue='b')
linestyles = dict(red='--', blue='-')
red, blue = evoked.copy(), evoked.copy()
red.data *= 1.1
blue.data *= 0.9
plot_compare_evokeds([red, blue], picks=3) # list of evokeds
plot_compare_evokeds([[red, evoked], [blue, evoked]],
picks=3) # list of lists
# test picking & plotting grads
contrast = dict()
contrast["red/stim"] = list((evoked.copy(), red))
contrast["blue/stim"] = list((evoked.copy(), blue))
# test a bunch of params at once
plot_compare_evokeds(contrast, colors=colors, linestyles=linestyles,
picks=[0, 2], vlines=[.01, -.04], invert_y=True,
truncate_yaxis=False, ylim=dict(mag=(-10, 10)),
styles={"red/stim": {"linewidth": 1}})
assert_raises(ValueError, plot_compare_evokeds,
contrast, picks='str') # bad picks: not int
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
colors=dict(fake=1)) # 'fake' not in conds
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
styles=dict(fake=1)) # 'fake' not in conds
assert_raises(ValueError, plot_compare_evokeds, [[1, 2], [3, 4]],
picks=3) # evoked must contain Evokeds
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
styles=dict(err=1)) # bad styles dict
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
gfp=True) # no single-channel GFP
assert_raises(TypeError, plot_compare_evokeds, evoked, picks=3,
ci='fake') # ci must be float or None
contrast["red/stim"] = red
contrast["blue/stim"] = blue
plot_compare_evokeds(contrast, picks=[0], colors=['r', 'b'],
ylim=dict(mag=(1, 10)))
# Hack to test plotting of maxfiltered data
evoked_sss = evoked.copy()
evoked_sss.info['proc_history'] = [dict(max_info=None)]
evoked_sss.plot_white(cov)
evoked_sss.plot_white(cov_fname)
plt.close('all')
evoked.plot_sensors() # Test plot_sensors
plt.close('all')
run_tests_if_main()
|
"""n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M | _jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = 0.4218 * 10**9
secondmoon = Body()
secondmoon.mas | s = M_gan
secondmoon.px = 0.48945554 * 10**9
thirdmoon = Body()
thirdmoon.mass = M_gan
thirdmoon.px = 0.59293316 * 10**9
fourthmoon = Body()
fourthmoon.mass = M_gan
fourthmoon.px = 0.77696224 * 10**9
fithmoon = Body()
fithmoon.mass = M_gan
fithmoon.px = 1.23335068 * 10**9
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
thirdmoon.vy = math.sqrt(G * planet.mass * (2 / thirdmoon.px - 1 / thirdmoon.px))
fourthmoon.vy = math.sqrt(G * planet.mass * (2 / fourthmoon.px - 1 / fourthmoon.px))
fithmoon.vy = math.sqrt(G * planet.mass * (2 / fithmoon.px - 1 / fithmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
gravity_thirdmoon = (thirdmoon.mass / planet.mass) * thirdmoon.px
gravity_fourthmoon = (fourthmoon.mass / planet.mass) * fourthmoon.px
gravity_fithmoon = (fithmoon.mass / planet.mass) * fithmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon + gravity_thirdmoon + gravity_fourthmoon + gravity_fithmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *fithmoon.px ** 3) / (G * (fithmoon.mass + planet.mass)))
orbit_duration = orbit_duration * 1.002
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon, thirdmoon, fourthmoon, fithmoon])
# Output information
print 'TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]'
print 'TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]'
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparison with eccentric moon
plt.xlim(-0.11, +0.11)
plt.ylim(-0.8, +0.8)
plt.annotate(r"5:4:3:2:1", xy=(-0.105, +0.7), size=16)
plt.savefig("fig_system_22.eps", bbox_inches = 'tight')
|
import platform
import socket
import sys
from mule_local.JobGeneration import *
from mule.JobPlatformResources import *
from . import JobPlatformAutodetect
import multiprocessing
# Underscore defines symbols to be private
_job_id = None
def _whoami(depth=1):
"""
String of function name to recycle code
https://www.oreilly.com/library/view/python-cookbook/0596001673/ch14s08.html
Returns
-------
string
Return function name
"""
return sys._getframe(depth).f_code.co_name
def p_gen_script_info(jg : JobGeneration):
global _job_id
return """#
# Generating function: """+_whoami(2)+"""
# Platform: """+get_platform_id()+"""
# Job id: """+_job_id+"""
#
"""
def get_platform_autodetect():
"""
Returns
-------
bool
True if current platform matches, otherwise False
"""
return JobPlatformAutodetect.autodetect()
def get_platform_id():
"""
Return platform ID
Returns
-------
string
unique ID of platform
"""
return "ppeixoto_usp_gnu"
def get_platform_resources():
"""
Return information about hardware
"""
h = JobPlatformResources()
h.num_cores_per_node = multiprocessing.cpu_count()
h.num_nodes = 1
# TODO: So far, we only assume a single socket system as a fallback
h.num_cores_per_socket = h.num_cores_per_node
return h
def jobscript_setup(jg : JobGeneration):
"""
Setup data to generate job script
"""
global _job_id
_job_id = jg.runtime.getUniqueID(jg.compile)
return
def jobscript_get_header(jg : JobGeneration):
"""
These headers typically contain the information on e.g. Job exection, number of compute nodes, etc.
Returns
-------
string
multiline text for scripts
"""
content = """#! /bin/bash
"""+p_gen_script_info(jg)+"""
"""
return content
def jobscript_get_exec_prefix(jg : JobGeneration):
"""
Prefix before executable
Returns
-------
string
multiline text for scripts
"""
p = jg.parallelization
content = ""
content += jg.runtime.get_jobscript_plan_exec_prefix(jg.compile, jg.runtime)
if jg.compile.threading != 'off':
content += """
export OMP_NUM_THREADS="""+str(p.num_threads_per_rank)+"""
export OMP_DISPLAY_ENV=VERBOSE
"""
if p.core_oversubscription:
raise Exception("Not supported with this script!")
else:
if p.core_affinity != None:
content += "\necho \"Affnity: "+str(p.core_affinity)+"\"\n"
if p.core_affinity == 'compact':
content += "source $MULE_ROOT/platforms/bin/setup_omp_places.sh nooversubscription close\n"
#content += "\nexport OMP_PROC_BIND=close\n"
elif p.core_affinity == 'scatter':
raise Exception("Affinity '"+str(p.core_affinity)+"' not supported")
content += "\nexport OMP_PROC_BIND=spread\n"
else:
raise Exception("Affinity '"+str(p.core_affinity)+"' not suppor | ted")
content += "\n"
return content
def jobscript_get_exec_command(jg : JobGeneration):
"""
Prefix to executable command
Returns
-------
string
multiline text for scripts
"""
p = jg.parallelizati | on
content = """
"""+p_gen_script_info(jg)+"""
# mpiexec ... would be here without a line break
EXEC=\""""+jg.compile.getProgramPath()+"""\"
PARAMS=\""""+jg.runtime.getRuntimeOptions()+"""\"
echo \"${EXEC} ${PARAMS}\"
"""
if jg.compile.sweet_mpi == 'enable':
content += 'mpiexec -n '+str(p.num_ranks)+' '
content += "$EXEC $PARAMS || exit 1"
content += "\n"
content += "\n"
return content
def jobscript_get_exec_suffix(jg : JobGeneration):
"""
Suffix before executable
Returns
-------
string
multiline text for scripts
"""
content = ""
content += jg.runtime.get_jobscript_plan_exec_suffix(jg.compile, jg.runtime)
return content
def jobscript_get_footer(jg : JobGeneration):
"""
Footer at very end of job script
Returns
-------
string
multiline text for scripts
"""
content = """
"""+p_gen_script_info(jg)+"""
"""
return content
def jobscript_get_compile_command(jg : JobGeneration):
"""
Compile command(s)
This is separated here to put it either
* into the job script (handy for workstations)
or
* into a separate compile file (handy for clusters)
Returns
-------
string
multiline text with compile command to generate executable
"""
content = """
SCONS="scons """+jg.compile.getSConsParams()+' -j 4"'+"""
echo "$SCONS"
$SCONS || exit 1
"""
return content
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.