text
stringlengths 29
850k
|
|---|
"""Test the various Cython-based message deserializers"""
# Based on test_custom_protocol_handler.py
try:
import unittest2 as unittest
except ImportError:
import unittest
from cassandra.query import tuple_factory
from cassandra.cluster import Cluster
from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler
from tests.integration import use_singledc, PROTOCOL_VERSION
from tests.integration.datatype_utils import update_datatypes
from tests.integration.standard.utils import (
create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes)
from tests.unit.cython.utils import cythontest, numpytest
def setup_module():
use_singledc()
update_datatypes()
class CythonProtocolHandlerTest(unittest.TestCase):
N_ITEMS = 10
@classmethod
def setUpClass(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.session = cls.cluster.connect()
cls.session.execute("CREATE KEYSPACE testspace WITH replication = "
"{ 'class' : 'SimpleStrategy', 'replication_factor': '1'}")
cls.session.set_keyspace("testspace")
cls.colnames = create_table_with_all_types("test_table", cls.session, cls.N_ITEMS)
@classmethod
def tearDownClass(cls):
cls.session.execute("DROP KEYSPACE testspace")
cls.cluster.shutdown()
@cythontest
def test_cython_parser(self):
"""
Test Cython-based parser that returns a list of tuples
"""
verify_iterator_data(self.assertEqual, get_data(ProtocolHandler))
@cythontest
def test_cython_lazy_parser(self):
"""
Test Cython-based parser that returns an iterator of tuples
"""
verify_iterator_data(self.assertEqual, get_data(LazyProtocolHandler))
@numpytest
def test_numpy_parser(self):
"""
Test Numpy-based parser that returns a NumPy array
"""
# arrays = { 'a': arr1, 'b': arr2, ... }
arrays = get_data(NumpyProtocolHandler)
colnames = self.colnames
datatypes = get_primitive_datatypes()
for colname, datatype in zip(colnames, datatypes):
arr = arrays[colname]
self.match_dtype(datatype, arr.dtype)
verify_iterator_data(self.assertEqual, arrays_to_list_of_tuples(arrays, colnames))
def match_dtype(self, datatype, dtype):
"""Match a string cqltype (e.g. 'int' or 'blob') with a numpy dtype"""
if datatype == 'smallint':
self.match_dtype_props(dtype, 'i', 2)
elif datatype == 'int':
self.match_dtype_props(dtype, 'i', 4)
elif datatype in ('bigint', 'counter'):
self.match_dtype_props(dtype, 'i', 8)
elif datatype == 'float':
self.match_dtype_props(dtype, 'f', 4)
elif datatype == 'double':
self.match_dtype_props(dtype, 'f', 8)
else:
self.assertEqual(dtype.kind, 'O', msg=(dtype, datatype))
def match_dtype_props(self, dtype, kind, size, signed=None):
self.assertEqual(dtype.kind, kind, msg=dtype)
self.assertEqual(dtype.itemsize, size, msg=dtype)
def arrays_to_list_of_tuples(arrays, colnames):
"""Convert a dict of arrays (as given by the numpy protocol handler) to a list of tuples"""
first_array = arrays[colnames[0]]
return [tuple(arrays[colname][i] for colname in colnames)
for i in range(len(first_array))]
def get_data(protocol_handler):
"""
Get some data from the test table.
:param key: if None, get all results (100.000 results), otherwise get only one result
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect(keyspace="testspace")
# use our custom protocol handler
session.client_protocol_handler = protocol_handler
session.row_factory = tuple_factory
results = session.execute("SELECT * FROM test_table")
session.shutdown()
return results
def verify_iterator_data(assertEqual, results):
"""
Check the result of get_data() when this is a list or
iterator of tuples
"""
for result in results:
params = get_all_primitive_params(result[0])
assertEqual(len(params), len(result),
msg="Not the right number of columns?")
for expected, actual in zip(params, result):
assertEqual(actual, expected)
|
You are able to depend on Local Bathroom Remodel Crew to deliver the very best expert services when it comes to Bathroom Remodeling in Myrtle Beach, SC. Our crew of experienced experts will provide the expert services that you require with the most innovative technologies around. We make sure you get the very best support, the ultimate selling price, and the very best quality supplies. Call us by dialing 888-349-8884 and we will be happy to consider the choices, answer your concerns, and set up a scheduled appointment to commence arranging your task.
You will have a budget to stick with, and you should reduce costs. In the process, you require the most effective and finest standard of work regarding Bathroom Remodeling in Myrtle Beach, SC. Our attempts to cost less money will never eliminate the excellent quality of our services. When you deal with our team, you'll get the advantage of our working experience and top standard materials to ensure that the project can last while saving your time and cash. We are able to achieve this by supplying you with the best savings around and preventing costly errors. Save time and cash through calling Local Bathroom Remodel Crew now. You are able to contact our business by dialing 888-349-8884 to start.
You should be well informed when it comes to Bathroom Remodeling in Myrtle Beach, SC. You shouldn't go into it without consideration, and it's good to learn what to prepare for. This is the reason we try to make every attempt to ensure you learn the steps and aren't facing any sort of unexpected situations. Begin by talking about your job with our client service staff once you contact 888-349-8884. With this phone call, you're going to get your concerns addressed, and we're going to establish a time to get started with services. We work with you all through the whole project, and our team will show up promptly and ready.
You will have many reasons to use Local Bathroom Remodel Crew to meet your needs regarding Bathroom Remodeling in Myrtle Beach, SC. Our equipment are of the very best quality, our cash saving techniques are sensible and powerful, and our client satisfaction scores are unsurpassed. We are aware of your preferences and goals, and we are available to serve you with our skills. Call 888-349-8884 to communicate with Local Bathroom Remodel Crew and examine all of your goals about Bathroom Remodeling in Myrtle Beach.
|
# -*- coding: utf-8 -*-
import os
import codecs
import sys
import re
import datetime
import xml.etree.ElementTree as ET
def getTextID(txt):
txtgp = re.search(r"_LT\[xml.[^\.]*.([0-9]+)\]", txt)
if txtgp is None:
return ""
return int(txtgp.group(1))
targetName = "skillinfo"
targetTXTName = targetName
localeName = ""
outputName = "Mabi_Skillicon"
dataDBText = {}
dataDB = {}
hasTXT = False
hasXML = False
fileList = os.listdir("./data/")
for fileN in fileList:
if hasXML and hasTXT:
break
txtNameMatch = re.match(targetTXTName+".([a-zA-Z]*).txt", fileN)
if txtNameMatch is not None:
targetTXTName = fileN
localeName = txtNameMatch.group(1)
hasTXT = True
continue
xmlNameMatch = re.match(targetName+".xml", fileN)
if xmlNameMatch is not None:
hasXML = True
continue
if hasTXT is False:
print("Missing "+targetTXTName+" TXT file.")
sys.exit()
if hasXML is False:
print("Missing "+targetName+" XML file.")
sys.exit()
today = datetime.datetime.now().strftime("%Y%m%d")
outdir = os.getcwd()+"/patch-"+localeName+"-"+today+"/mod/"
print("Output: " + outdir)
try:
os.makedirs(outdir)
except:
pass
#targetName.XXXXX.txt
infilename = targetTXTName
try:
fi = codecs.open("./data/" + infilename,'r', encoding='utf-16')
line = fi.readline()
fi.seek(0)
except:
fi.close()
fi = codecs.open("./data/" + infilename,'r', encoding='utf-8-sig')
for line in fi:
oline = re.match(r"([0-9]{0,8})\t(([^\r])+)\r\n", line)
if oline is not None:
dataDBText[int(oline.group(1))] = oline.group(2)
fi.close()
print(infilename + " processed.")
#targetName.xml
infilename = targetName + ".xml"
tree = ET.parse("./data/" + infilename)
root = tree.getroot()
for elelist in list(root):
for ele in elelist:
ID = int(ele.attrib["SkillID"])
finalName = "0,0,0"
if "ImageFile" in ele.attrib:
imgName = ele.attrib["ImageFile"].lower()
imgG = re.search("data/gfx/image/gui_icon_skill_([^\.]*).dds", imgName)
if imgG != None:
imgNameIdx = 0
try:
imgNameIdx = int(imgG.group(1))
except ValueError:
pass
if imgNameIdx >= 4: # dds not implemented in TinTimer
pass
elif "PositionX" in ele.attrib and "PositionY" in ele.attrib:
posX = int(ele.attrib["PositionX"])
posY = int(ele.attrib["PositionY"])
finalName = str(imgNameIdx)+','+str(posX)+','+str(posY)
if ID in dataDB.keys():
if "Locale" in ele.attrib:
if ele.attrib["Locale"] != localeName:
continue
dataDB[ID] = finalName
print(infilename + " processed.")
dataIDs = list(dataDB.keys())
dataIDs.sort()
fo = codecs.open(outdir+outputName+".ini", 'w', encoding="utf-16")
fo.write("["+outputName+"]\r\n")
for key in dataIDs:
fo.write(str(key)+"="+dataDB[key]+"\r\n")
fo.close()
print(outputName + ".ini generated.")
|
presence at the christening is not an indicator of who will be a godparent. felipe of spain himself was chosen as godfather to danish twin vincent himself, yet couldn't be at the christening. so if they want mary to be a godmother, the fact that she isn't available to attend the christening would not be a problem.
Oh, I know. The whole speculation that Alexander might have a foreign royal godparent stems from the fact that the court implied that there will be European royal presence at his christening. While their long-term friendship made it logical for Mary and Frederik to make Felipe Vincent's godparent although he wasn't able to attend it, I don't think we can quite assume that there's the same kind of bond between CP and Sofia and e.g. Mary and Frederik.
No, other than her siblings which I never count as royal godparents as they are family and just happen to be royal. Madeleine and Chris each chose three godparents. Leonor has Chris's sister Tatiana, Natasha's husband Ernst, Chris's friend Alice Branford, Victoria, Patrick sommerlath and Madeleine's best friend Louise Gotlieb. Nicolas has Natasha, Tatyana's husband Henry, Chris's friend Marco, Gustaf magnuson, Carl Philip and Katrina Von horn.
Well well well, look at that. I like this list. I assume that Miss Cajsa Larsson is the mother of Sofia's goddaughter Tiara Larsson?
I looked up the guest list of the wedding and that is what I gamed together.
And I like that they have included her elder sister Lina and his best friend, Jan-Åke Hansson.
No. Leonor and Nicolas have no royal godparents outside their aunt and uncle, so Alexander is definitely not a first.
When we were first guessing godparents I was pretty right. I said two eldest sisters, two friends (and guessed his best man which Jan was and speculated perhaps tiara's mother whose mother name I didn't know). And either a cousin or foreign royal, or both if six. For once I actually guessed right.
It seems the siblings were likely closest to Christina and her kids growing up. All three of Christina's sons have now served. Her eldest Gustaf is for Nicolas, her second Oscar is godfather to his namesake and now her youngest is to Alexander.
My one mistake was thinking the cousin would be Hubertus. I thought as a son of Carl Philips godmother, that the middle name Hubertus may be a nod to him as godfather.
I also said titled, and both Leonore and Nicolas have one titled godparent (Leonore has Count Ernst Abensperg und Traun and Nicolas has Countess Natascha Abensperg und Traun). I count Count and Countess as titled (however much the title may mean), so IMO yes, a first-timer.
The list gives me a good feeling, makes for an intimate setting.
What you said was no royals or titles outside the family. As Natasha is Chris's sister and Ernst his brother in law, they fall within the family category. So I stand by my, 'this is not the first' comment.
I was only thinking of the Royal Family, I left the in-laws out. So in that respect you are right.
Seen from only the angle of the Royal Family (without the in-laws), this is a first-timer for no Royals or titled godparents. That makes me being right.
Now it's awaiting the guest list.
Not surprised no foreign non family royal godparent. Alexander is fifth in line to the throne and CP & Sofia have no close royal friends.
I'm surprised that a reception for Alexander's christening was held on Aug.20 for CP & Sofia's friends. There's a reception after the christening tomorrow. Where they invited to the main event?
I have to say that I am very satisfied with the choice of the godparents.
The couple has been low-profile for once. IIRC, Mrs Larsson is the mother of Sofia's goddaughter, Tiara Larsson, who served also as a bridesmaid at her wedding.
Is there any info about the TV live coverage of the christening?
According to the godparents list it's Miss Larson, so she is not married. And yes, she is Tiara Larsson's mother.
I wouldn't necessarily call it high or low profile (although I suppose that, had there been a Royal or titled person outside the family, the "too high profile" accusations would fly again), but these people are clearly the ones they feel comfortable with to serve as Alexander's godparents. I think it's a nice group.
SVT is the official broadcaster. The live coverage starts tomorrow at 11.30 am.
I suspect they deliberately chose to go low-profile to avoid raising again the issue of the changes to the Act of Succession and of Alexander being the legitimate agnatic heir of the House of Bernadotte after his father. It is the strongest signal yet IMHO on CP's and the KIng's part that they have moved on that issue.
BTW, has the guest list been published yet ?
I don't think that CP had any hard feeling on that matter anyway. It as the King that do it all by himself.
The guest list will be released tomorrow, at least that what the spokeswoman said.
Prince Alexander christening rehearsal video.
The prince will be the last one to receive his own crown. It is the crown made for Prince Fredrik Adolf in 1771.
He really is an adorable and very chubby baby, much bigger than his four cousins who have been baptized in the last few years. He is also older, will he br wearing the same gown? I guess they can let the neck out?
Yes he will be christened in the same gown all the royal babies are, and his name added. Little worried about the delicate gown, looked a bit small on Nicolas and he was younger and smaller. But they will have found some way yo accommodate the bigger chubbier baby.
|
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
""" Common code for dnf-plugins-extras"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from gettext import NullTranslations
from sys import version_info
import argparse
import dnf.exceptions
import gettext
import logging
# python 3 compabillity settings
if version_info.major >= 3:
PY3 = True
# u?gettext dont exists in python3 NullTranslations
NullTranslations.ugettext = NullTranslations.gettext
NullTranslations.ungettext = NullTranslations.ngettext
t = gettext.translation('dnf-plugins-extras', fallback=True)
_ = t.ugettext
P_ = t.ungettext
logger = logging.getLogger('dnf.plugin')
class ArgumentParser(argparse.ArgumentParser):
"""Parses the argument and options given to a tool from DNF.
default help commands (-h, --help) is disabled and a custom --help-cmd
is add by default
Errors in parse of option/arguments will print the help and raise
a dnf.exception.Error
"""
def __init__(self, cmd, **kwargs):
argparse.ArgumentParser.__init__(self, prog='dnf %s' % cmd,
add_help=False, **kwargs)
self.add_argument('--help-cmd', action='store_true',
help=_('show this help about this tool'))
def error(self, message):
"""Overload the default error method.
We dont wan't the default exit action on parse
errors, just raise an AttributeError we can catch.
"""
raise AttributeError(message)
def parse_args(self, args):
try:
opts = argparse.ArgumentParser.parse_args(self, args)
except AttributeError as e:
self.print_help()
raise dnf.exceptions.Error(str(e))
return opts
def is_erasing(transaction, pkg):
"""Check if package removing in transaction
Args:
transaction (dnf.transaction.Transaction): Transaction instance.
pkg (str): Package name to check.
Returns:
bool: True if pkg removes by transaction, False otherwise.
"""
installed = set([package.name for package in transaction.install_set])
erased = set([package.name for package in transaction.remove_set])
# Don't run tracer when uninstalling it
if pkg in erased - installed:
return True
else:
return False
|
We live contrary to Christmas when we complain about the hassles and compromises that are simply part of personal, family, and business life. We are to find Jesus, the spirit of God, amid all this.
Rather than begin with how we understand God and then, in light of that, pray, it's wiser to turn that around. Pray first. That changes how we understand God—and prayer. We learn faith itself by doing it, not by holding it in check until we've figured it all out.
You and I are told to be on the lookout for the new life that Easter is all about—a promise easy to miss amid all that's so wrong. "Take note!" It's soon to spring up on you!
Our call is not to be like Jesus but to be his disciples.
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import threading
import six
from pants.base.build_file_target_factory import BuildFileTargetFactory
from pants.base.parse_context import ParseContext
from pants.engine.legacy.structs import BundleAdaptor, Globs, RGlobs, TargetAdaptor, ZGlobs
from pants.engine.objects import Serializable
from pants.engine.parser import Parser
from pants.util.memo import memoized_method, memoized_property
class LegacyPythonCallbacksParser(Parser):
"""A parser that parses the given python code into a list of top-level objects.
Only Serializable objects with `name`s will be collected and returned. These objects will be
addressable via their name in the parsed namespace.
This parser attempts to be compatible with existing legacy BUILD files and concepts including
macros and target factories.
"""
_objects = []
_lock = threading.Lock()
@classmethod
@memoized_method
def _get_symbols(cls, symbol_table_cls):
symbol_table = symbol_table_cls.table()
# TODO: nasty escape hatch
aliases = symbol_table_cls.aliases()
class Registrar(BuildFileTargetFactory):
def __init__(self, type_alias, object_type):
self._type_alias = type_alias
self._object_type = object_type
self._serializable = Serializable.is_serializable_type(self._object_type)
@memoized_property
def target_types(self):
return [self._object_type]
def __call__(self, *args, **kwargs):
name = kwargs.get('name')
if name and self._serializable:
obj = self._object_type(type_alias=self._type_alias, **kwargs)
cls._objects.append(obj)
return obj
else:
return self._object_type(*args, **kwargs)
# Compute a single ParseContext for a default path, which we will mutate for each parsed path.
symbols = {}
for alias, target_macro_factory in aliases.target_macro_factories.items():
for target_type in target_macro_factory.target_types:
symbols[target_type] = TargetAdaptor
parse_context = ParseContext(rel_path='', type_aliases=symbols)
for alias, symbol in symbol_table.items():
registrar = Registrar(alias, symbol)
symbols[alias] = registrar
symbols[symbol] = registrar
if aliases.objects:
symbols.update(aliases.objects)
# Compute "per path" symbols (which will all use the same mutable ParseContext).
aliases = symbol_table_cls.aliases()
for alias, object_factory in aliases.context_aware_object_factories.items():
symbols[alias] = object_factory(parse_context)
for alias, target_macro_factory in aliases.target_macro_factories.items():
symbols[alias] = target_macro_factory.target_macro(parse_context)
for target_type in target_macro_factory.target_types:
symbols[target_type] = TargetAdaptor
# TODO: Replace builtins for paths with objects that will create wrapped PathGlobs objects.
symbols['globs'] = Globs
symbols['rglobs'] = RGlobs
symbols['zglobs'] = ZGlobs
symbols['bundle'] = BundleAdaptor
return symbols, parse_context
@classmethod
def parse(cls, filepath, filecontent, symbol_table_cls):
symbols, parse_context = cls._get_symbols(symbol_table_cls)
python = filecontent
# Mutate the parse context for the new path.
parse_context._rel_path = os.path.dirname(filepath)
with cls._lock:
del cls._objects[:]
six.exec_(python, symbols, {})
return list(cls._objects)
|
used pool table lights used pool table lights led pool billiard table lighting kit light your within lights idea pool used pool table lights pool table lights led.
used pool table lights best pool tables amp billiard rooms images on rustic pool table light fixtures rustic pool table lights for sale.
used pool table lights used pool table lights for sale new vintage pool table lights for sale for antique brass used pool table lights stained glass pool table light ebay.
used pool table lights three light bronze pool table light lighting galleries pool table lights near me.
used pool table lights state pool table lights state pool table lights used pool table lights state stained glass pool table light state buckeyes pool table light pool table lights lowes.
used pool table lights used pool table lights for sale height pool table lights used pool table lights pool table used pool table lights ebay.
used pool table lights pool table lights cheap wonderful modern pool table lights modern pool table lights for intended for pool table lights vintage style pool table lights.
used pool table lights used pool table lights for sale pool tables for sale near me previous next full size used pool table lights pool table lights amazon.
used pool table lights rustic pool table lights contemporary best used pool tables for sale prices vary by your pool table lights menards.
used pool table lights pool tables near me pool table lights pool table lighting pool table lights pool table lighting near me used pool tables pool table lights melbourne.
used pool table lights used pool table lights pool table light ideas wood billiard lighting amazing home design and interior vintage style pool table lights.
used pool table lights used pool table lights for sale billiard table lights sale beer brand pool table lights.
used pool table lights pool table lights pool table lighting ideas ideas how to hang pool table light and galvanized pool table lights pool table lights ebay australia.
used pool table lights used pool table lights for sale medium size of table pool table lamps modern pool table used beer pool table lights.
used pool table lights presidential billiards used pool table light pool table lights stained glass pool table light led pool table lights uk.
used pool table lights vintage pool table lights vintage pool table lights used designs antique billiard table lights 7ft pool table led lights.
used pool table lights pool table light used pool table lights pool table lights menards.
used pool table lights pool table lights for sale used light players with black balls bar 3 metal shades image outdoor pool table led lights.
used pool table lights used pool table light our home made pool table light pool table lights cheap used beer pool table lights.
used pool table lights used pool table lights used pool table gold crown pool table light billiard table lights amazon vintage pool table lights uk.
used pool table lights pool table lights for sale medium size of home incredible pool table light images concept pool pool table lights pool table lights lowes.
used pool table lights pool table light pool table light classic burgundy billiards image used pool table lights corona beer pool table lights.
used pool table lights used pool table lights for sale pool table lights hanging lights the home depot pool table used pool table lights pool table lights for sale near me.
used pool table lights pool table lights cheap pool table light pool table lamps bar pool table lights pool table pool table lights stained glass pool table light ebay.
used pool table lights used pool table light pool table bud light pool table light pool table lights amazon.
used pool table lights pool table lights for sale rustic pool table lights co throughout for sale decorations pool table lights for sale used pool table lights.
used pool table lights miller lite pool table light classifieds buy sell miller lite pool table light across the page 2 beer themed pool table lights.
used pool table lights awesome pool table light and used with lights for sale rustic pool table light fixtures pool table beer lights for sale.
used pool table lights used pool table light pool table light used pool table lights led pool table light bulbs pool table lights for sale near me.
used pool table lights used pool table lights for sale pool table lights for sale modern buy light shark brushed nickel bar w balls 3 with regard to 1 pool table lamps sale pool table lights ebay.
used pool table lights sports pool table lights adapted pool cue pool table lights.
used pool table lights pool table lights cheap modern pool table lights modern pool table lights cheap used tables for rustic pool table lights for sale.
used pool table lights used pool table light bar decoration ideas home bar traditional with in bar pool throughout bar corona beer pool table lights.
used pool table lights appealing billiard lights for sale of download pool table lamps fresh furniture pool table lights ebay australia.
used pool table lights pool table rustic mahogany finish with camel felt bud light pool table light ebay.
used pool table lights vintage pool table lights used pool table lights vintage pool table light shades replacement lights beer vintage pool table lights budweiser pool table light vintage.
used pool table lights used pool table lights used pool table lights for sale table lights and lamps green billiards pool table modern pool table lights pool table lights menards.
|
"""DRAFT: TheanoObject
N.B. the gotcha with this design is listed in the documentation of `TheanoObject`
"""
from __future__ import print_function
import theano
from theano import tensor
import numpy
def theano_type(x):
"""Return a theano Type instance suitable for containing value `x`."""
if type(x) is int:
return tensor.lscalar
else:
raise NotImplementedError()
class symbolic_fn_callable(object):
"""This is the class whose instance you get when you access a symbolic function in a
`TheanoObject`.
When you call a symbolic function (`symbolic_fn`) of a TheanoObject the `__call__` of this
class handles your request.
You can also access the symbolic outputs and updates of a symbolic function though this
class.
.. code-block:: python
class T(TheanoObject):
@symbolic_fn
def add(self, x):
...
add_outputs = ...
add_updates = ...
return RVal(add_outputs, add_updates)
t = T()
t.add.outputs(5) # returns `add_outputs` from when `x=theano_type(5)`
t.add.updates(5) # returns `add_updates` from when `x=theano_type(5)`
t.add.theano_function(5) # returns the `Function` compiled when `x=theano_type(5)`
t.add(5) # runs the `Function` compiled when `x=theano_type(5)`
# with arguments `(5,)`
"""
def __init__(self, fn, mode):
self.fn = fn
self.mode = mode
def on(self, o_self):
"""Silly method to work with symbolic_fn.__get__"""
self.o_self = o_self
return self
def run_symbolic(self, *args, **kwargs):
return self.o_self._get_method_impl(self.fn, self.o_self, args, kwargs, mode=self.mode)
def __call__(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['theano_function'](*args, **kwargs)
def theano_function(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['theano_function']
def outputs(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['outputs']
def updates(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['updates']
class symbolic_fn(object):
"""A property-like class for decorating symbolic functions in `TheanoObject`
"""
def __init__(self, fn, mode=None):
self.fn = fn
self.callable = symbolic_fn_callable(fn, mode)
def __get__(self, o_self, o_cls):
return self.callable.on(o_self)
def __set__(self, o_self, new_val):
pass
# return NotImplemented
def symbolic_fn_opts(**kwargs):
"""Return a decorator for symbolic_functions in a `TheanoObject`
`kwargs` passed here are passed to `theano.function` via `symbolic_fn`
"""
def deco(f):
return symbolic_fn(f, **kwargs)
return deco
class RVal(object):
"""A Return-Value object for a `symbolic_fn` """
outputs = []
"""The method will compute values for the variables in this list"""
updates = {}
"""The method will update module variables in this dictionary
For items ``(k,v)`` in this dictionary, ``k`` must be a `symbolic_member` of some module.
On each call to this compiled function, the value of ``k`` will be replaced with the
computed value of the Variable ``v``.
"""
def __init__(self, outputs, updates=None):
if updates is None:
updates = {}
self.outputs = outputs
assert type(updates) is dict
self.updates = updates
class TheanoObject(object):
"""Base for Theano-supported classes
This class provides support for symbolic_fn class attributes.
These will be compiled on demand so that they can be used just like normal (non-symbolic)
methods.
The symbolic functions in a TheanoObject can share member variables that have been created
using the `symbolic_member` method.
:note: Other variables (ones not created using ``self.symbolic_member``) referred to in the
body of a symbolic function will *not* be shared between symbolic functions, or between
symbolic functions and this class. These other variables will be locked away in the
closure of a symbolic function when that function is compiled.
:warning: It is not recommended for code to interleave
(a) changes to non-symbolic instance variables with
(b) calls to symbolic functions that use those instance variables.
A symbolic function may be
compiled multiple times because it must be compiled for each set of argument types.
Each time the function is compiled, the values of non-symbolic variables will be locked
into the compiled function. Subsequent changes to those non-symbolic instance variables
will not have any effect on the behaviour of the already-compiled symbolic function.
:todo: Is there an efficient way of recognizing when a compiled symbolic function is stale,
wrt the current values of the class's instance variables?
- One option is to re-evaluate symbolic functions symbolically and see if the graph can be
completely merged with the original graph. This is not fast enough to do all the time by
default though.
"""
def __init__(self):
self.module_method_cache = {}
def _get_method_impl(self, fn, o_self, args, kwargs, mode):
"""Retrieve information about the symbolic function (`fn`) in TheanoObject instance
`o_self`, being evaluated on arguments `args` and `kwargs`.
:rtype: dict with entries 'theano_function', 'outputs', 'updates'
:return: the theano function compiled for these arguments, the symbolic outputs of that
function, and the symbolic updates performed by that function.
:note: This function caches return values in self.`module_method_cache`.
:todo: This may at some point become a class-level cache rather than an instance-level
cache.
"""
if kwargs:
raise NotImplementedError()
cache = self.module_method_cache
args_types = tuple(theano_type(arg) for arg in args)
key = (fn, args_types)
if key not in cache:
inputs = [a() for a in args_types]
print('compiling', fn, 'for inputs', inputs)
rval = fn(o_self, *inputs)
print('compiling to compute outputs', rval.outputs)
if isinstance(rval.outputs, (tuple, list)):
all_required_inputs = theano.gof.graph.inputs(rval.outputs)
else:
all_required_inputs = theano.gof.graph.inputs([rval.outputs])
# construct In instances for the symbolic_member instances that can automatically be
# included here.
module_inputs = [theano.compile.io.In(
variable=v,
value=v._theanoclass_container,
mutable=(v in rval.updates),
update=rval.updates.get(v, None))
for v in all_required_inputs \
if hasattr(v, '_theanoclass_container') and not (v in inputs)]
cache[key] = dict(theano_function=theano.function(inputs+module_inputs, rval.outputs),
updates=rval.updates,
outputs=rval.outputs,
mode=mode)
return cache[key]
def symbolic_member(self, ival, name=None):
"""Create a Variable instance to hold value `ival`.
This function also immediately creates a Container object for ival.
When the returned Variable is used as input to a `TheanoObject` `symbolic_fn`, (but
does not appear as an argument to that symbolic_fn), then this Container will be used to
retrieve (and store) values for the Variable.
This Variable's Container's contents can be retrieved by its `get()` method.
This Variable's Container's contents can be written using its `set(newval)` method.
"""
if type(ival) is not int:
raise NotImplementedError()
v = tensor.lscalar(name)
v._theanoclass_container = \
theano.gof.Container(v,
storage=[theano._asarray(ival, dtype='int64')],
readonly=False)
assert not hasattr(v, 'set')
assert not hasattr(v, 'get')
v.get = lambda : v._theanoclass_container.data
def setval_in_v(newval):
v._theanoclass_container.data = newval
v.set = setval_in_v
return v
|
Lovely Site, Stick to the great job. Thank you!
Nice Website, Continue the great work. Many thanks.
|
class leg(object):
'''
>>> from ppf.date_time import *
>>> from pay_receive import *
>>> from generate_flows import *
>>> flows = generate_flows(
... start = date(2007, Jun, 29)
... , end = date(2017, Jun, 29)
... , resolution = date_resolutions.months
... , period = 6
... , shift_method = shift_convention.modified_following
... , basis = "ACT/360")
>>>
>>> pay_leg = leg(flows, PAY)
>>>
>>> for flow in pay_leg.flows():
... print flow
10000000.000000, USD, [2007-Jun-29, 2007-Dec-31], basis_act_360, 2007-Dec-31,
10000000.000000, USD, [2007-Dec-31, 2008-Jun-30], basis_act_360, 2008-Jun-30,
10000000.000000, USD, [2008-Jun-30, 2008-Dec-29], basis_act_360, 2008-Dec-29,
10000000.000000, USD, [2008-Dec-29, 2009-Jun-29], basis_act_360, 2009-Jun-29,
10000000.000000, USD, [2009-Jun-29, 2009-Dec-29], basis_act_360, 2009-Dec-29,
10000000.000000, USD, [2009-Dec-29, 2010-Jun-29], basis_act_360, 2010-Jun-29,
10000000.000000, USD, [2010-Jun-29, 2010-Dec-29], basis_act_360, 2010-Dec-29,
10000000.000000, USD, [2010-Dec-29, 2011-Jun-29], basis_act_360, 2011-Jun-29,
10000000.000000, USD, [2011-Jun-29, 2011-Dec-29], basis_act_360, 2011-Dec-29,
10000000.000000, USD, [2011-Dec-29, 2012-Jun-29], basis_act_360, 2012-Jun-29,
10000000.000000, USD, [2012-Jun-29, 2012-Dec-31], basis_act_360, 2012-Dec-31,
10000000.000000, USD, [2012-Dec-31, 2013-Jun-28], basis_act_360, 2013-Jun-28,
10000000.000000, USD, [2013-Jun-28, 2013-Dec-30], basis_act_360, 2013-Dec-30,
10000000.000000, USD, [2013-Dec-30, 2014-Jun-30], basis_act_360, 2014-Jun-30,
10000000.000000, USD, [2014-Jun-30, 2014-Dec-29], basis_act_360, 2014-Dec-29,
10000000.000000, USD, [2014-Dec-29, 2015-Jun-29], basis_act_360, 2015-Jun-29,
10000000.000000, USD, [2015-Jun-29, 2015-Dec-29], basis_act_360, 2015-Dec-29,
10000000.000000, USD, [2015-Dec-29, 2016-Jun-29], basis_act_360, 2016-Jun-29,
10000000.000000, USD, [2016-Jun-29, 2016-Dec-29], basis_act_360, 2016-Dec-29,
10000000.000000, USD, [2016-Dec-29, 2017-Jun-29], basis_act_360, 2017-Jun-29,
'''
def __init__(self, flows, pay_or_receive, adjuvant_table = None, payoff = None):
self.__flows = flows
self.__pay_or_receive = pay_or_receive
self.__adjuvant_table = adjuvant_table
self.__payoff = payoff
def flows(self):
return self.__flows
def pay_receive(self):
return self.__pay_or_receive
def has_adjuvant_table(self):
return self.__adjuvant_table <> None
def has_payoff(self):
return self.__payoff <> None
def adjuvant_table(self):
if self.__adjuvant_table == None:
raise RumtimeError, "Null adjuvant table"
return self.__adjuvant_table
def payoff(self):
if self.__payoff == None:
raise RumtimeError, "Null payoff"
return self.__payoff
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
|
A trommel is a screened cylinder that is used to separate aggregate materials according to size. Trommel screeners are particularly effective on lighter, wetter materials, like mulch, topsoil and compost.
Trommels For TopsoilDon't let twigs, gravel and debris get in the way of producing high-quality topsoil for your customers. Samscreen carries the replacement trommel drum screen you need for your topsoil application. We carry a wide variety of trommel screens for the leading equipment brands, or we can provide a customized solution.
Soil trommel for sale products are most popular in Domestic Market, Southeast Asia, and Africa. You can ensure product safety by selecting from certified suppliers, including 191 with Other, 144 with ISO9001, and 10 with ISO14001 certification.
Shop Frontline's selection of new & used wheel and track-mounted trommel equipment for rent and sale.... Frontline is proud to carry the best Trommel Screeners in Canada. For demanding screening applications, Frontline's selection of trommels are engineered for the most efficient and effective performance available.
Used tumbleweed trimmers, Asking $1000 each, well under the 11,000 retail price. 110V motors for trommel and fan, all are in good working condition, side panels removed for photos, checker plate hopper feed attaches to top.
Recycling System Screen 1, for sale we have a trommel, conveyer and a blower. an ljh recycling plant in generally good condition with trommel and picking station. Waste Sorting System complete Used once very briefly so in a rather Excellent Condition.
The Trommel range is a barrel type screen, more suited to sticky waste products and predominantly used in waste, soil and composting industries. The Trommel range is a barrel type screen, more suited to sticky waste products and predominantly used in waste, soil and composting industries.
|
from django.db import models
from django.core.urlresolvers import reverse
import requests
from time import sleep
from .utils import *
# Repository Table
class RepositoryManager(models.Manager):
def create_repository(self, url):
repo = requests.get(url).text
domain = get_url_parts(url)[0]
name = re.findall(get_repo_name_re(domain), repo)[0]
last_commit = re.findall(get_commit_re(domain), repo)[0]
root = Directory.manage.create_directory(domain=domain, path=get_url_parts(url)[1], name=name)
return self.create(name=name, url=url, last_commit=last_commit, root=root)
class Repository(models.Model):
name = models.CharField(max_length=150,)
url = models.URLField(max_length=250,)
last_commit = models.CharField(max_length=150)
root = models.ForeignKey('Directory', on_delete=models.CASCADE, blank=True, null=True, related_name="repository",)
manage = RepositoryManager()
objects = models.Manager()
def __str__(self):
return self.name + ': ' + self.url
def get_domain(self):
return get_url_parts(self.url)[0]
def get_path(self):
return get_url_parts(self.url)[1]
def document_stats(self):
return self.root.total_doc_info()
# Directory Table
class DirectoryManager(models.Manager):
def create_directory(self, domain, path, name, parent=None):
url = domain+path
dir_html = requests.get(url).text
last_commit = re.findall(get_commit_re(domain), dir_html)[0]
if parent:
dir = self.create(name=name, url=url, last_commit=last_commit, parent_dir=parent)
else:
dir = self.create(name=name, url=url, last_commit=last_commit)
contents = re.findall(get_dir_re(domain), dir_html)
for content in contents:
if not content[3] and content[0] == "file-directory":
Directory.manage.create_directory(domain=domain, path=content[1], name=content[2], parent=dir)
elif is_file_sup(content[3]):
File.manage.create_file(domain=domain, path=content[1], name=content[2], extension=content[3], parent=dir)
# sleep(1)
return dir
class Directory(models.Model):
name = models.CharField(max_length=100,)
url = models.URLField(max_length=250,)
last_commit = models.CharField(max_length=150)
parent_dir = models.ForeignKey('Directory', on_delete=models.CASCADE, blank=True, null=True, related_name="sub_dirs",)
manage = DirectoryManager()
objects = models.Manager()
def __str__(self):
return self.name + ': ' + self.url
def get_domain(self):
return get_url_parts(self.url)[0]
def get_path(self):
return get_url_parts(self.url)[1]
def total_doc_info(self):
resaults = {}
for file_info in self.gen_doc_info():
for key, value in file_info.items():
try:
resaults[key] += value
except KeyError:
resaults[key] = value
return resaults
def gen_doc_info(self):
files = []
for file in self.sub_files.all():
files.append(file.gen_doc_info())
for dir in self.sub_dirs.all():
files += dir.gen_doc_info()
return files
# File Table
class FileManager(models.Manager):
def create_file(self, domain, path, name, extension, parent):
# raw_domain = 'https://raw.githubusercontent.com' # use domain for raw file reading: https://raw.githubusercontent.com
url = domain+path
file_html = requests.get(url).text
lines = get_lines(domain, file_html)
slcs = re.findall(get_slc_re(extension), lines)
mlcs = re.findall(get_mlc_re(extension), lines)
code = re.sub(get_alc_re(extension), '', lines)
return self.create(name=name, extension=extension, url=url, parent_dir=parent,
mlc_size=len(''.join(mlcs)), mlc_num=len(mlcs), slc_size=len(''.join(slcs)), slc_num=len(slcs),
comt_size=len(''.join(slcs) + ''.join(mlcs)), code_size=len(code))
class File(models.Model):
name = models.CharField(max_length=200,)
extension = models.CharField(max_length=15,)
url = models.URLField(max_length=250,)
parent_dir = models.ForeignKey(Directory, on_delete=models.CASCADE, blank=True, null=True, related_name="sub_files",)
mlc_size = models.IntegerField()
mlc_num = models.IntegerField()
slc_size = models.IntegerField()
slc_num = models.IntegerField()
comt_size = models.IntegerField()
code_size = models.IntegerField()
manage = FileManager()
objects = models.Manager()
def __str__(self):
return self.name + ': ' + self.url
def get_domain(self):
return get_url_parts(self.url)[0]
def get_path(self):
return get_url_parts(self.url)[1]
def gen_doc_info(self):
return {'mlcNum': self.mlc_num, 'mlcSize': self.mlc_size,
'slcNum': self.slc_num, 'slcSize': self.slc_size,
'comtSize': self.comt_size, 'codeSize': self.code_size}
|
wooden beam in solid oak, plus the hearth of our line Vesuvius model D70V, completely cast iron and high efficiency (over 70%) power 9.5 Kw, heated area 350m3 and flue diameter 200mm, complete with smoke valve.
The cover is made entirely of stone du Gard French, similar to our Vicenza stone, easy to clean and pleasant to the touch. The beam is working with solid oak antique, and bricks are the series French Cevennes, to escape dark and slightly sunken. The coating will be supplied packed in a wooden crate.
Fireplaces line Vesuvius are part of our economy line, but no less effective, in fact all have a high yield (74%), heat resistant to over 800 ° C, ash integrated regulation of primary and secondary with glass cleaning system, independent of well 4/5 hours and 2 year warranty. The hearths Vesuvius have a high yield due to a perfect heat exchange system, which reaches over 70% of this thanks to the peculiarity of the sky and of the deflector plane fumes.
In addition, the homes of the line Vesuvius have a double air control: the primary air for combustion is adjusted manually to get full control over combustion. The secondary air you need to increase the yield of furnace and "refine" combustion.
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from itertools import groupby
from operator import itemgetter
from odoo import _, api, fields, models
class StockPackageLevel(models.Model):
_name = 'stock.package_level'
_description = 'Stock Package Level'
_check_company_auto = True
package_id = fields.Many2one(
'stock.quant.package', 'Package', required=True, check_company=True,
domain="[('location_id', 'child_of', parent.location_id), '|', ('company_id', '=', False), ('company_id', '=', company_id)]")
picking_id = fields.Many2one('stock.picking', 'Picking', check_company=True)
move_ids = fields.One2many('stock.move', 'package_level_id')
move_line_ids = fields.One2many('stock.move.line', 'package_level_id')
location_id = fields.Many2one('stock.location', 'From', compute='_compute_location_id', check_company=True)
location_dest_id = fields.Many2one(
'stock.location', 'To', check_company=True,
domain="[('id', 'child_of', parent.location_dest_id), '|', ('company_id', '=', False), ('company_id', '=', company_id)]")
is_done = fields.Boolean('Done', compute='_compute_is_done', inverse='_set_is_done')
state = fields.Selection([
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('assigned', 'Reserved'),
('new', 'New'),
('done', 'Done'),
('cancel', 'Cancelled'),
],string='State', compute='_compute_state')
is_fresh_package = fields.Boolean(compute='_compute_fresh_pack')
picking_type_code = fields.Selection(related='picking_id.picking_type_code')
show_lots_m2o = fields.Boolean(compute='_compute_show_lot')
show_lots_text = fields.Boolean(compute='_compute_show_lot')
company_id = fields.Many2one('res.company', 'Company', required=True, index=True)
@api.depends('move_line_ids', 'move_line_ids.qty_done')
def _compute_is_done(self):
for package_level in self:
# If it is an existing package
if package_level.is_fresh_package:
package_level.is_done = True
else:
package_level.is_done = package_level._check_move_lines_map_quant_package(package_level.package_id)
def _set_is_done(self):
for package_level in self:
if package_level.is_done:
if not package_level.is_fresh_package:
for quant in package_level.package_id.quant_ids:
corresponding_ml = package_level.move_line_ids.filtered(lambda ml: ml.product_id == quant.product_id and ml.lot_id == quant.lot_id)
if corresponding_ml:
corresponding_ml[0].qty_done = corresponding_ml[0].qty_done + quant.quantity
else:
corresponding_move = package_level.move_ids.filtered(lambda m: m.product_id == quant.product_id)[:1]
self.env['stock.move.line'].create({
'location_id': package_level.location_id.id,
'location_dest_id': package_level.location_dest_id.id,
'picking_id': package_level.picking_id.id,
'product_id': quant.product_id.id,
'qty_done': quant.quantity,
'product_uom_id': quant.product_id.uom_id.id,
'lot_id': quant.lot_id.id,
'package_id': package_level.package_id.id,
'result_package_id': package_level.package_id.id,
'package_level_id': package_level.id,
'move_id': corresponding_move.id,
})
else:
package_level.move_line_ids.filtered(lambda ml: ml.product_qty == 0).unlink()
package_level.move_line_ids.filtered(lambda ml: ml.product_qty != 0).write({'qty_done': 0})
@api.depends('move_line_ids', 'move_line_ids.package_id', 'move_line_ids.result_package_id')
def _compute_fresh_pack(self):
for package_level in self:
if not package_level.move_line_ids or all(ml.package_id and ml.package_id == ml.result_package_id for ml in package_level.move_line_ids):
package_level.is_fresh_package = False
else:
package_level.is_fresh_package = True
@api.depends('move_ids', 'move_ids.state', 'move_line_ids', 'move_line_ids.state')
def _compute_state(self):
for package_level in self:
if not package_level.move_ids and not package_level.move_line_ids:
package_level.state = 'draft'
elif not package_level.move_line_ids and package_level.move_ids.filtered(lambda m: m.state not in ('done', 'cancel')):
package_level.state = 'confirmed'
elif package_level.move_line_ids and not package_level.move_line_ids.filtered(lambda ml: ml.state == 'done'):
if package_level.is_fresh_package:
package_level.state = 'new'
elif package_level._check_move_lines_map_quant_package(package_level.package_id, 'product_uom_qty'):
package_level.state = 'assigned'
else:
package_level.state = 'confirmed'
elif package_level.move_line_ids.filtered(lambda ml: ml.state =='done'):
package_level.state = 'done'
elif package_level.move_line_ids.filtered(lambda ml: ml.state == 'cancel') or package_level.move_ids.filtered(lambda m: m.state == 'cancel'):
package_level.state = 'cancel'
def _compute_show_lot(self):
for package_level in self:
if any(ml.product_id.tracking != 'none' for ml in package_level.move_line_ids):
if package_level.picking_id.picking_type_id.use_existing_lots or package_level.state == 'done':
package_level.show_lots_m2o = True
package_level.show_lots_text = False
else:
if self.picking_id.picking_type_id.use_create_lots and package_level.state != 'done':
package_level.show_lots_m2o = False
package_level.show_lots_text = True
else:
package_level.show_lots_m2o = False
package_level.show_lots_text = False
else:
package_level.show_lots_m2o = False
package_level.show_lots_text = False
def _generate_moves(self):
for package_level in self:
if package_level.package_id:
for quant in package_level.package_id.quant_ids:
self.env['stock.move'].create({
'picking_id': package_level.picking_id.id,
'name': quant.product_id.display_name,
'product_id': quant.product_id.id,
'product_uom_qty': quant.quantity,
'product_uom': quant.product_id.uom_id.id,
'location_id': package_level.location_id.id,
'location_dest_id': package_level.location_dest_id.id,
'package_level_id': package_level.id,
'company_id': package_level.company_id.id,
})
@api.model
def create(self, vals):
result = super(StockPackageLevel, self).create(vals)
if vals.get('location_dest_id'):
result.mapped('move_line_ids').write({'location_dest_id': vals['location_dest_id']})
result.mapped('move_ids').write({'location_dest_id': vals['location_dest_id']})
if result.picking_id.state != 'draft' and result.location_id and result.location_dest_id and not result.move_ids and not result.move_line_ids:
result._generate_moves()
return result
def write(self, vals):
result = super(StockPackageLevel, self).write(vals)
if vals.get('location_dest_id'):
self.mapped('move_line_ids').write({'location_dest_id': vals['location_dest_id']})
self.mapped('move_ids').write({'location_dest_id': vals['location_dest_id']})
return result
def unlink(self):
self.mapped('move_ids').unlink()
self.mapped('move_line_ids').write({'result_package_id': False})
return super(StockPackageLevel, self).unlink()
def _check_move_lines_map_quant_package(self, package, field='qty_done'):
""" should compare in good uom """
all_in = True
pack_move_lines = self.move_line_ids
keys = ['product_id', 'lot_id']
def sorted_key(object):
object.ensure_one()
return [object.product_id.id, object.lot_id.id]
grouped_quants = {}
for k, g in groupby(sorted(package.quant_ids, key=sorted_key), key=itemgetter(*keys)):
grouped_quants[k] = sum(self.env['stock.quant'].concat(*list(g)).mapped('quantity'))
grouped_ops = {}
for k, g in groupby(sorted(pack_move_lines, key=sorted_key), key=itemgetter(*keys)):
grouped_ops[k] = sum(self.env['stock.move.line'].concat(*list(g)).mapped(field))
if any(grouped_quants.get(key, 0) - grouped_ops.get(key, 0) != 0 for key in grouped_quants) \
or any(grouped_ops.get(key, 0) - grouped_quants.get(key, 0) != 0 for key in grouped_ops):
all_in = False
return all_in
@api.depends('state', 'is_fresh_package', 'move_ids', 'move_line_ids')
def _compute_location_id(self):
for pl in self:
if pl.state == 'new' or pl.is_fresh_package:
pl.location_id = False
elif pl.state == 'confirmed' and pl.move_ids:
pl.location_id = pl.move_ids[0].location_id
elif pl.state in ('assigned', 'done') and pl.move_line_ids:
pl.location_id = pl.move_line_ids[0].location_id
else:
pl.location_id = pl.picking_id.location_id
def action_show_package_details(self):
self.ensure_one()
view = self.env.ref('stock.package_level_form_view')
return {
'name': _('Package Content'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'stock.package_level',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': self.id,
'flags': {'mode': 'readonly'},
}
|
Adolescents living with HIV in Africa should be a priority population for health - and in particular mental health - interventions. The wellbeing of today's youth will determine not only their future, but also our continent and region's future and our ability to reach the United Nation's Sustainable Development Goals by 2030.
Yet this future is currently looking fragile and challenged, given the many intertwined health and social problems faced by Africa's youth. Africa is indeed a "youthful" continent: about 60% of the total population is under 25 years of age, according to the UN. Moreover, the continent's youth population is predicted to rise more than any other region in the world, from an estimated 230 million in 2015 to 535 million by 2065. Africa is, sadly, also the continent that carries the highest HIV burden.
Of the 2.1 million adolescents living with HIV globally, about 1.7 million (84%) live in sub-Saharan Africa, according to the United Nations Children's Emergency Fund (UNICEF). Additionally, African adolescents represent a growing proportion of people living with HIV, and the only population group for which AIDS-related deaths have increased over the past decade. As highlighted by UNICEF, this can be attributed largely to the fact that a generation of children who were infected with the virus at birth are now growing into adolescence.
As a 2014 study by Elizabeth Lowenthal and others reminds us, this trend is giving rise to challenges that will intensify over the next decade, especially in the sub-Saharan African region, where the ability of fragile health systems to reverse the epidemic and address the needs of life-long care and treatment will continue to be tested. A better understanding of the risk and protective factors for the health of HIV-affected adolescents is of critical importance if we are to develop responses aimed at protecting these young peoples' wellbeing and survival.
This will require a holistic approach to health, in line with the World Health Organisation's (WHO) definition, which sees physical, mental and social health as integral and inter-related components of a healthy life. Adolescence - defined by WHO as the 10-19 age range - is a challenging time in an individual's life cycle, and this is true in any context or continent. It is a time of growth, change, exploration and experimentation, and as a result, also a time of particular health and mortality risks.
As highlighted in a WHO 2014 global report on adolescent health, adolescents are more likely to engage in substance use, sexual risks and other high-risk behaviours. UNICEF estimates that the proportion of adolescents with mental health symptoms is rising globally, with one in four adolescent children experiencing symptoms more than once a week. Depression is reported to be the leading cause of illness and disability among 15 to 19-year-olds globally, and suicide the first cause of death. These alarming figures are likely to be higher for African adolescents, many of who are also exposed to multiple social stressors such as poverty, violence and HIV. In particular, young adults living with HIV are likely to face what Lowenthal et al refer to as "recurrent and cumulative psychological stressors".
These include dealing with chronic illness, medicine adherence and the side effects of chronic medication while having to negotiate sexual relationships. In many cases, these adolescents also have to confront possible stigma, orphanhood and death in the family. Poor mental health can, in turn, be linked to many other factors in life, including poor quality of life, behavioural problems, worse educational outcomes, high-risk behaviours and lower retention in HIV treatment and care.
These may lead to increased rates of HIV transmission and AIDS-related illness and death. These risks, of course, extend beyond adolescence to an individual's adult life and more broadly to society. As a 2017 UNICEF Research Brief indicates, mental health disorders can result in a health cost that is some 10 times higher during adolescence than during adulthood. They can also affect the ability to contribute positively to the economy and society later in life. The inclusion of mental health within the UN "Good Health and Wellbeing" Sustainable Development Goal for 2030 (SDG3 in the diagram below) arguably marked a long overdue turning point in terms of recognising the importance of protecting mental health.
However, mental health still remains relatively poorly resourced compared to physical health, with access to and uptake of mental health care services especially limited in low and middle-income countries. As I have argued in the past, we have also paid too little attention to the social component of health, the third key pillar of the WHO definition, which is very closely related to psychological wellbeing. While a common, widely accepted definition does not exist, "social health" can be taken to broadly refer to positive and beneficial interaction with other people and society at large.
A wealth of evidence links social interaction and relationships to better physical and mental health outcomes. According to a 2010 review by Julianne Holt-Lunstad et al, covering 148 studies conducted worldwide with 308,849 individuals, more or better-quality relationships are as important predictors of mortality as other well-established risk factors such as obesity and physical inactivity. "Social support" - defined as the "functional nature or quality" of social relations - is a key dimension in explaining the links between social relations and health. This refers to the emotional, informational and instrumental assistance that people need to stay healthy or adapt to stress. It can include such things as the availability of healthcare, information or advice, lifts to a health facility, and money or food, among others.
A large body of global research links social support to better mental health and lower odds of suicide, including among adolescent populations and individuals living with chronic illness. Yet very little large-scale empirical research has been done on the relationship between mental health and social support among HIV-affected populations in Africa, and evidence on effective interventions is even scarcer. This is even more so the case specifically for HIV-affected adolescents.
The research on social support and health that I have conducted over the past decade was motivated by the realisation of this gap, and the information I gathered while interviewing caregivers and children in rural and semi-urban HIV-affected South African communities over 2005-2009. Several caregivers of children - in some cases grandmothers taking care of multiple children of deceased or absent offspring - spoke of how important the help they received from their family and community was for their ability to cope, both emotionally and physically, with the many hardships they faced daily.
The first mixed-methods research I led on these themes was part of a larger South African health survey, the Young Carers project, a collaboration between Oxford and Brown universities, the Health Economics and HIV/AIDS Research Division (HEARD) at the University of Kwazulu-Natal, the South African government and other NGOs and research institutions. The study involved approximately 2,500 caregivers of adolescents living in communities highly affected by HIV. As reported in a 2014 article, my colleagues and I found that caregivers with more social support were less likely to experience anxiety and depression and more likely to have better self-reported physical health, as well as to practice more "positive" parenting.
but it was supported by subsequent qualitative work with a smaller group (24) of caregivers from this sample. Among other things, these caregivers believed that their adolescent children's wellbeing was positively affected by the support they (the caregivers) received. This, they explained, was partly a result of their (caregivers) better psychological wellbeing, and better parenting and decision-making, which in turn had positive effects on their children's lives. In addition, the children gained much information, encouragement and advice directly from individuals in the carer's network.
More recent and on-going analyses, with 1,000 adolescents in the Eastern Cape who had initiated HIV treatment, have allowed me the opportunity to start looking at how social support received directly by adolescents living with HIV may affect their mental health and suicide risk. This work uses data from the Mzantsi Wakho project, a mixed-methods research project led by Oxford University and the University of Cape Town, which also involved part of the Young Carers research group.
Preliminary findings suggest that these adolescents on HIV treatment can be protected from depression and suicidal tendencies linked to experiences of stigma through social support provided by their social network, and participation inclinic support groups that provide informational and emotional support. This is particularly salient, since stigma is sadly still widespread among people living with HIV, with almost half of the adolescents in this study reporting some experience of being stigmatised. Moreover, adolescents who received more support from their social network appeared to be less likely to experience depression more generally, and therefore suicidal thoughts and behaviour. What reflections can we draw from these specific findings, as well as the broader research and context?
Firstly, it is clear that there can be no health without social health, just as there can be "no health without mental health", as Martin Prince et al aptly put it in a 2007 article. Strengthening multiple support resources - for example by supporting existing mechanisms or relationships, or through health interventions - has the potential to promote better mental health and possibly facilitate service uptake among (HIV-positive and other) adolescents. This is not a magic bullet, but it is certainly a potentially key element within more multi-faceted interventions that aim to support young people across different domains.
Secondly, caregivers and other close family members play an important role in adolescents' lives and health, and interventions need to take this into consideration. We have found that caregivers are often the main providers of emotional and tangible support for adolescents, and higher caregiver support has been linked to better adolescent mental health even among HIV-positive youth, as shown for example in a 2017 study by Shelene Gentz and others. This suggests that boosting the support received by caregivers of adolescents, as well as that directly received by the adolescents themselves, may have cumulative positive effects on adolescent health. It also suggests that it is important for adolescent health interventions to involve primary caregivers or other close adults in the child's network, and/or work to strengthen child-caregiver interaction.
These may include initiatives such as community based parenting programmes. Several recent evaluations have, in fact, shown these programmes to be effective in reducing abuse and mental health problems among youth, even in low-resourced, HIV-affected African communities. Lastly, more applied research is needed to better understand the connections between the social, mental and physical dimensions of health, and to further develop and test health interventions in this specific adolescent population. We should draw from what we know works and is culturally appropriate and feasible in resource-limited African settings, including some of the initiatives mentioned above.
"Adolescents and youth can be a positive force for development when they are provided the knowledge and opportunities they need to thrive." Investing in the mental, social and physical health of Africa's adolescents is essential if we are to provide them with opportunities to thrive and contribute to the African societies and economies of the future.
Read the original article on Africa In Fact.
Copyright © 2018 Africa In Fact. All rights reserved. Distributed by AllAfrica Global Media (allAfrica.com). To contact the copyright holder directly for corrections — or for permission to republish or make other authorized use of this material, click here.
Africa: What Are Cartoons Meant to Do?
Africa: Africa Is the New Asia - Now What?
|
"""
Fear and loathing.
"""
import ast
import asyncio
import logging
import time
from bogoapp import tools
from bogoapp.bogo import Bogo
logger = logging.getLogger("BogoManager")
class BogoError(Exception):
pass
class BogoManager:
"""
Manages all state related to bogosorting a sequence of lists.
"""
def __init__(self,
unsorted_lists,
speed_resolution,
database,
random_module):
if speed_resolution <= 0:
raise BogoError("Invalid speed resolution, "
"N shuffles per {} seconds doesn't make sense."
.format(speed_resolution))
self.unsorted_lists = unsorted_lists
self.speed_resolution = speed_resolution
self.database = database
self.random = random_module
self.current_bogo = None
self.stopping = False
self.asyncio_task = None
async def load_previous_state(self):
logging.info("Loading previous state.")
bogo_row = await self.database.newest_bogo()
if not bogo_row:
logging.info("No previous bogo found.")
return None
bogo = Bogo.from_database_row(bogo_row)
random_state_row = await self.database.newest_random_state()
if not random_state_row:
raise BogoError("Improperly saved random state "
f"Found newest bogo with id {bogo.db_id} "
"but no previous random state was found.")
random_state_bogo_id = random_state_row[3]
if bogo.db_id != random_state_bogo_id:
raise BogoError("Improperly saved random state, "
f"newest bogo has id {bogo.db_id} "
"but newest random state has a reference "
f"to a bogo with id {random_state_bogo_id}.")
logging.info("Setting random state.")
self.random.setstate(ast.literal_eval(random_state_row[1]))
logging.info(f"Returning previous bogo {bogo}")
return bogo
async def save_state(self, now):
logging.debug("Saving state.")
random_state = self.random.getstate()
await self.database.save_state(self.current_bogo, random_state, now)
async def make_next_bogo(self, sequence):
logging.debug(f"Making new bogo from sequence {sequence}.")
now = tools.isoformat_now()
self.current_bogo = Bogo(sequence=sequence, created=now)
await self.save_state(now=now)
self.current_bogo.db_id = (await self.database.newest_bogo())[0]
async def sort_current_until_done(self):
"""Bogosort the current sequence until it is sorted."""
logging.debug("Sorting current bogo until done.")
delta_iterations = 0
delta_seconds = 0.0
while not (self.current_bogo.is_finished() or self.stopping):
await asyncio.sleep(1e-100)
perf_counter_start = time.perf_counter()
self.current_bogo.shuffle_with(self.random.shuffle)
delta_iterations += 1
delta_seconds += time.perf_counter() - perf_counter_start
if delta_seconds >= self.speed_resolution:
delta_iterations = 0
delta_seconds = 0.0
logging.debug("Stopped sorting bogo.")
now = tools.isoformat_now()
if self.current_bogo.is_finished():
logging.debug("Bogo was sorted")
self.current_bogo.finished = now
else:
logging.debug("Bogo was not sorted")
await self.save_state(now)
async def sort_all(self):
logging.debug("Sorting all unsorted lists.")
for lst in self.unsorted_lists:
if self.stopping:
logging.info("Stopping sorting all unsorted lists.")
break
await self.make_next_bogo(lst)
await self.sort_current_until_done()
async def run(self):
logging.info("Running BogoManager.")
previous_bogo = await self.load_previous_state()
if previous_bogo and not previous_bogo.is_finished():
logging.info("Found unfinished previous bogo.")
unfinished_length = len(previous_bogo.sequence)
self.unsorted_lists = tools.fast_forward_to_length(
self.unsorted_lists, unfinished_length)
# Drop next list since it has the same length as the sequence in
# the unfinished previous_bogo.
next(self.unsorted_lists)
self.current_bogo = previous_bogo
await self.sort_current_until_done()
else:
logging.info("Did not find an unfinished previous bogo.")
await self.sort_all()
def get_current_state(self):
return (self.current_bogo.shuffles,
self.current_bogo.is_finished())
|
About an hour north of the Golden Gate Bridge awaits a veritable country within a wondrous state, where natural beauty unites with the world-reknowned California artists and artisans to inspire the senses. They call it Healdsburg. Whether you’re visiting to sample from an abundance of world class wineries, to feast from the very best in farm-to-table dining, or just to explore the picture-perfect countryside of lush valleys and redwood forests that cradle Lake Sonoma and the Russian River, you will delight in everything… but the leaving.
|
__author__ = 'huanpc'
import constant
import argparse
from random import randint
# Duong dan toi thu muc output file dataseed
DIR_OUTPUT_PATH = './output'
# Chon gia tri cho id trong bang customer
customer_id_begin = 5
product_id_begin = 0
# So ban ghi can tao
num_of_row = 100
#def createProductData():
def createCustomerData():
first_name_list = constant.FIRST_NAME
last_name_list = constant.LAST_NAME
i = 0
f = open(DIR_OUTPUT_PATH+'/customer_data_seed.csv','w')
column_heading = ['customer_id','customer_group_id','store_id','first_name','last_name','email','telephone','fax','password','salt','cart','whistlist',
'newsleter','address_id','custom_field','ip','status','approves','safe','token','date_added']
row = ['1',constant.CUSTOMER_GROUP_ID,constant.STORE_ID,'1','1','1','1','1',constant.PASSWORD,constant.SALT,constant.CART,constant.WHISTLIST,constant.NEWSLETTER,constant.ADDRESS_ID,
constant.CUSTOM_FIELD,constant.IP,constant.STATUS,constant.APPROVED,constant.SAFE,constant.TOKEN,constant.DATE_ADDED]
while i<num_of_row:
first_name = first_name_list[randint(0,len(constant.FIRST_NAME)-1)]
last_name = last_name_list[randint(0,len(constant.LAST_NAME)-1)]
row[0] = str(i+customer_id_begin)
row[3] = first_name
row[4] = last_name
row[5] = str(first_name+'.'+last_name+'@gmail.com').lower()
row[6] = str(randint(11111,99999))+ str(randint(11111,99999))
row[7] = row[6]
line = ','.join(row)
i+=1
f.write(line+'\n')
f.close()
def main():
# parser = argparse.ArgumentParser(description='Sinh du lieu mau cho tap test')
# parser.add_argument('integers', metavar='N', type=int, nargs='+',
# help='an integer for the accumulator')
createCustomerData()
if __name__ == '__main__':
main()
|
Min Xie has been a project scheduler since 2008. He’s spent his career with leading infrastructure companies such as Enbridge and Worley Parsons, among others.
Min has worked on over 10 major linear projects in his career, including massive pipeline and power line projects.
Question: Min, what challenges do you find are unique to linear projects?
Q: What software tools have you used to manage linear project schedules?
Q: How did you find using these traditional project management software applications to manage the schedule of a linear project?
Q: When did you start using TILOS to manage the scheduling for linear projects?
Q: What do you think of TILOS?
“I was surprised at how easy it is – I’m very impressed.
Q: What percentage of time and money would you estimate TILOS saves, per project?
Q: What would you tell anyone who is considering using TILOS, but hasn’t quite decided yet?
TILOS is the globe’s preferred linear project management software, trusted by 4 out of 5 industry leaders.
TILOS automates the linear scheduling method, and with it, many of the repetitive tasks associated with planning, updating and presenting a linear project. It’s intuitive visual interface connect the plan to the physical project, making it easy to optimize, update and present to various stakeholders.
Click here to request a free trial and demo of TILOS.
|
from collections import OrderedDict
import os
import pickle
import logging
import numpy as np
from teras.training.event import Listener
from teras.utils.collections import ImmutableMap
class ProgressBar(Listener):
"""
Example::
>>> from tqdm import tqdm
>>> import time
>>> pbar = ProgressBar(lambda n: tqdm(total=n))
>>> pbar.init(512)
>>> for _ in range(16):
>>> time.sleep(0.1)
>>> pbar.update(32)
>>> pbar.close()
"""
name = "progressbar"
def __init__(self, factory, **kwargs):
super().__init__(**kwargs)
self._pbar = None
self._factory = factory
def init(self, total):
self.close()
self._pbar = self._factory(total)
def update(self, n):
self._pbar.update(n)
def close(self):
if self._pbar is not None:
self._pbar.close()
self._pbar = None
def __del__(self):
self.close()
def on_epoch_train_begin(self, data):
self.init(data['size'])
def on_batch_end(self, data):
self.update(data['batch_size'])
def on_epoch_train_end(self, data):
self.close()
on_epoch_validate_begin = on_epoch_train_begin
on_epoch_validate_end = on_epoch_train_end
_reporters = []
def report(values):
if not _reporters:
return
for reporter in reversed(_reporters):
reporter.report(values)
class Reporter(Listener):
name = "reporter"
def __init__(self, logger, **kwargs):
super().__init__(**kwargs)
self._logger = logger
self._logs = OrderedDict()
self._reported = 0
self._history = []
def __enter__(self):
_reporters.append(self)
def __exit__(self, exc_type, exc_value, traceback):
_reporters.pop()
def report(self, values):
for name, value in values.items():
if name != "loss" and "loss" in name:
loss = self._logs.get(name, [])
loss.append(float(value))
value = loss
elif "accuracy" in name:
accuracy = self._logs.get(name, 0.0)
if isinstance(value, (tuple, list)) and len(value) == 2:
if isinstance(accuracy, float):
accuracy = [0, 0]
accuracy[0] += value[0]
accuracy[1] += value[1]
else:
accuracy += float(value)
value = accuracy
self._logs[name] = value
self._reported += 1
def get_summary(self):
summary = OrderedDict()
for name, value in self._logs.items():
if name != "loss" and "loss" in name:
n = len(value)
summary[name] = sum(value) / n if n > 0 else np.nan
elif "accuracy" in name:
if isinstance(value, list):
correct, total = value[:2]
if total == 0:
accuracy = np.nan
else:
accuracy = correct / total
else:
accuracy = value / self._reported
summary[name] = accuracy
else:
summary[name] = value
return summary
def get_history(self):
return self._history
def on_train_begin(self, data):
self._history = []
def on_epoch_train_begin(self, data):
self._logs.clear()
self._reported = 0
on_epoch_validate_begin = on_epoch_train_begin
def on_epoch_train_end(self, data):
self.report({'loss': data['loss']})
summary = self.get_summary()
self._output_log("training", summary, data)
self._history.append({'training': summary, 'validation': None})
def on_epoch_validate_end(self, data):
self.report({'loss': data['loss']})
summary = self.get_summary()
self._output_log("validation", summary, data)
self._history[-1]['validation'] = summary
def _output_log(self, label, summary, data):
message = "[{}] epoch {} - #samples: {}, loss: {:.8f}".format(
label, data['epoch'], data['size'], summary['loss'])
if 'accuracy' in summary:
message += ", accuracy: {:.8f}".format(summary['accuracy'])
v = self._logs.get('accuracy', None)
if isinstance(v, list) and v[1] > 0:
message += " ({}/{})".format(v[0], v[1])
self._logger.info(message)
message = []
for name, value in summary.items():
if name == 'loss' or name == 'accuracy':
continue
if isinstance(value, float):
message.append("{}: {:.8f}".format(name, value))
else:
message.append("{}: {}".format(name, value))
if 'accuracy' in name:
v = self._logs.get(name, None)
if isinstance(v, list) and v[1] > 0:
message[-1] += " ({}/{})".format(v[0], v[1])
if message:
self._logger.info(", ".join(message))
class Saver(Listener):
name = "saver"
class Context(ImmutableMap):
def __getattr__(self, name):
if name in self.data:
return self.data[name]
raise AttributeError("'{}' object has no attribute '{}'"
.format(type(self).__name__, name))
def __hash__(self):
return hash(tuple(sorted(self.data.items())))
def __init__(self, model, basename, directory='', context=None, interval=1,
save_from=None, save_best=False, evaluate=None,
serializer=None, logger=None, **kwargs):
super().__init__(**kwargs)
self._model = model
self._basename = os.path.join(os.path.expanduser(directory), basename)
if context is not None and not isinstance(context, Saver.Context):
context = Saver.Context(context)
self._context = context
if not isinstance(interval, int):
raise ValueError("interval must be specified as int value: "
"actual('{}')".format(type(interval).__name__))
self._interval = interval
self._save_from = save_from
self._save_best = save_best
self._evaluate = evaluate
self._best_value = -float('inf')
self._serializer = serializer if serializer is not None else pickle
self._logger = logger \
if logger is not None else logging.getLogger(__name__)
def save_context(self, context):
if not isinstance(context, Saver.Context):
raise TypeError('`context` must be a Saver.Context object')
file = self._basename + '.context'
self._logger.info("saving the context to {} ...".format(file))
with open(file, 'wb') as f:
self._serializer.dump(context, f)
def save_model(self, model, suffix=''):
file = "{}{}.pkl".format(self._basename, suffix)
self._logger.info("saving the model to {} ...".format(file))
with open(file, 'wb') as f:
self._serializer.dump(model, f)
@staticmethod
def load_context(model_file, deserializer=None):
if deserializer is None:
deserializer = pickle
_dir, _file = os.path.split(model_file)
context_file = os.path.basename(_file).split('.')[0] + '.context'
context_file = os.path.join(_dir, context_file)
with open(context_file, 'rb') as f:
context = deserializer.load(f)
return context
def on_train_begin(self, data):
if self._context is not None:
self.save_context(self._context)
def on_epoch_validate_end(self, data):
if self._save_best:
self._trigger_save(data)
def on_epoch_end(self, data):
if not self._save_best:
self._trigger_save(data)
def _trigger_save(self, data):
epoch = data['epoch']
if self._save_from is not None and epoch < self._save_from:
return
if epoch % self._interval == 0:
if self._save_best:
if callable(self._evaluate):
value = self._evaluate(data)
else:
value = -data['loss']
if value <= self._best_value:
return
self._logger.info("update the best score - new: {}, old: {}"
.format(value, self._best_value))
self._best_value = value
self.save_model(self._model)
else:
self.save_model(self._model, suffix='.' + str(epoch))
|
The big envelope comes from the synagogue with the sticker on the front “High Holy Day Information inside”. It sits on Larry David’s desk for a couple of weeks and the day has come when he needs to decide what to do.
Larry has been debating for a few days in his own mind whether he wants to go to High Holy Day Services this year, and even continue at the synagogue. For the last few years, he has only gone 3 days a year. His children and grandchildren live in New York and won’t be coming home. When he does go, he sits there for a couple of hours anticipating the rabbi’s sermon and remembers it as often being a letdown. He decides he could use those days to work on a few writing projects and throws the big white envelope in the waste basket.
Well if Jeff is going, “I am going, too”, Larry thinks. So you can bet that the writing of his next sitcom is going to have to be delayed for a few hours while Larry calls the rabbi, and whoever else he has to call, to get those precious tickets.
I have written about that maybe we should stop using the word tickets, and call it “Spiritual Passes”. That way, we would no longer be speaking about tickets in the same vain as we do tickets for the Red Sox-Yankees series next month, or to “Evan Hansen” and “Hamilton”.
High Holy Day Tickets are a benefit of membership and belonging to a sacred community. Synagogues also use High Holy Day Tickets for security purposes. Having a ticket means that you belong and the ushers (and sadly, police officers) can let you through.
Can High Holy Day worship ever be open to the public? Security issues can be overcome – which of course costs more money. And you don’t want to alienate congregants who provide both ongoing financial support as well as volunteer their time. Space might also be an issue. Some synagogues have dealt with this through community-wide worship in more public spaces. Or making all services open to the public, except those that always have high attendance – First Day of Rosh Hashanah, Kol Nidre and the morning worship on Yom Kippur.
Maybe Larry David, after his various telephone calls, secures those tickets, that he can pick up at “Will-Call” – the synagogue’s office. If he can’t snag the tickets, maybe he just shows up. He shares his saga with the usher at the door who recognizes him and lets him in.
I wonder what will happen at the Community Seder….
‹ Events: Fun or Funds?
|
#!/usr/bin/env python
from django.core.management import BaseCommand
class Command(BaseCommand):
help = "Replace historical activity IDs with activity IDs within Threaded comments"
def handle(self, *args, **options):
pass
"""
ThreadedComment = get_model()
passed, failed = 0, 0
for comment in ThreadedComment.objects.filter(content_type_id=42):
try:
ha = HistoricalActivity.objects.get(id=comment.object_pk)
a = Activity.objects.filter(
activity_identifier=ha.activity_identifier
).first()
comment.content_type = ContentType.objects.get(
app_label="landmatrix", model="activity"
)
comment.object_pk = a.id
comment.save()
passed += 1
except Activity.DoesNotExist:
failed += 1
except HistoricalActivity.DoesNotExist:
failed += 1
self.stdout.write("%i passed, %i failed" % (passed, failed))
"""
|
Plymouth Marjon University has a reputation for providing an outstanding student experience leading to high levels of employment. Founded in 1838, the University’s heritage and expertise is well established in faculties of Education & Social Sciences, Sport & Health Sciences and Language & Creative Arts.
Towards the latter part of 2015, Studiose was commissioned to design and build responsive modular page templates for a new ‘Marjon’ website. These needed to allow Marjon to create their own web pages using their existing content management system.
The previous Marjon website was the product of many years of content abuse. It had grown into a monster, containing more than 2000 pages, many of which were impossible to find without performing a search. The result was poor application numbers and falling student numbers.
The first phase of the project involved three weeks of wireframing, testing and refining. This gave us a thorough understanding of the new site’s information architecture, from which we could devise a better user experience and, ultimately, higher application numbers.
After a content audit and a reduction of pages, one of the biggest challenges was to develop a simpler navigation which would work across all devices. Studiose opted for an ‘off-canvas’ navigation which allows for multi-level categorisation without limits. This is a mobile-first approach, but it works incredibly well across any device.
The second biggest challenge we faced was to give the Marjon website a fresh, modern feel. This involved Studiose refreshing the Marjon logo, introducing a modern typeface and developing a new colour palette.
Photography plays a vital role in the new website’s design. The Marjon image library goes back five years or more, and consists of many different styles from a long line of photographers. After studying the effects of image filters, researched by Yahoo Labs, we learned that a filtered image is 21% more likely to be viewed and 45% more likely to be interacted with.
With countless prospective and current students being users of apps like Instagram, Studiose recommended that Marjon use a filter to not only appeal to this target audience, but to help bring a house style to the wide range of photography and avoid the need for a whole new photography shoot.
Studiose used online prototyping to test the mobile and desktop interfaces. This gave project participants the opportunity to see working pages and the off-canvas navigation before we made the commitment to the complicated coding process. This was a vital stage in the project.
The new Marjon templates and modules are constructed from an open-source framework called Foundation, rather than creating complicated and bespoke sets of code. This allows for a wide range of developers with knowledge of Foundation to work on the site. It also makes the website scalable and will make future changes or improvements easier and cost effective.
The team at Marjon was keen to pull together a unified tone of voice (TOV) across the main sections of the website. The TOV needed to appeal to staff, students, parents of prospective students and many more, so it had to retain some formality. But we were able to pull the content away from its more traditional TOV and give it a bit more personality and flair.
The Studiose process has lifted out the stuffiness of an old educational institution and turned it into a totally responsive, easy-to-navigate, vibrant, modern and welcoming platform to entice new students, as well as continuing to serve as a handy resource for current students and staff.
This project is a shining example of what the team at Studiose can achieve.
"I am very excited about the final design – it is fresh, has impact, is well thought out and tested well with our target audience. It is a two phase project and has received much positive feedback from the target audience and internal colleagues.
Studiose advocated a ‘mobile-first’ approach to design so the user experience is slick across multiple devices. They also proposed a good solution to navigation, which had become a problem on our existing website as it grew to 2000+ pages.
In terms of management and team work Studiose were flexible in working with us when our deadlines were pulled forward and ensured that work was delivered on time, from wireframe designs, to final designs, prototype and HTML/CSS. As and when complications arose I was impressed with how quickly they resolved them. They also always follow up an outstanding actions.
They have been a pleasure to work with and are professional at all times and work smoothly together as a team, as well as working well with our wider team."
|
"""
Django settings for gsw project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=f4aydus9$f*c(108mqo!-)b8i@ttb80&h%940z@4bd%)%i8jj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'rest_framework_gis',
'corsheaders',
'hvad',
'backend',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
)
ROOT_URLCONF = 'gsw.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'gsw.db'),
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gsw.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = "/api/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
from gsw.local_settings import *
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
|
Ashwin Bharti became a disciple of Osho in 1977 and is an ambassador of Osho’s vision. From being a successful businessman, he became a philanthropist and started helping himself & others in their spiritual journey. Being a channel for Osho’s energy, Ashwin created Zorba the Buddha, one of the world’s largest open faith spiritual centre and cultural hub.
Over the years, he has received transmissions from various teachers in Sufism, Tantra, Zen, Shamanism, Pulsing, Osho therapies & meditations, bodywork, biodanza, contact improvisation, art and music. Today, Ashwin continues to live his vision of bringing people to meditation as well as celebration, and has been facilitating Osho meditations for more than a decade.
|
"""Download translations from Airtable for static server strings."""
import argparse
import collections
import json
import logging
import os
import typing
from typing import Any, Dict, Optional, Sequence, Set, Tuple
from airtable import airtable
import polib
from bob_emploi.frontend.server.mail import campaign
from bob_emploi.frontend.server.mail.templates import mailjet_templates
# Translations base: https://airtable.com/appkEc8N0Bw4Uok43
_I18N_BASE_ID = 'appkEc8N0Bw4Uok43'
_REQUIRED_LANGUAGES = {'en'}
_DOWNLOAD_LANGUAGES = {'en', 'en_UK', 'fr', 'fr@tu'}
def main(string_args: Optional[Sequence[str]] = None) -> None:
"""Download translations from Airtable for static server strings."""
# Parse arguments.
parser = argparse.ArgumentParser(
description='Download translations from Airtable for static server strings.')
parser.add_argument('--api-key', default=os.getenv('AIRTABLE_API_KEY'))
parser.add_argument(
'--strings', help='Path to the PO file containing the extracted strings to translate.',
required=True)
parser.add_argument(
'--output', help='File in which to save the translations.',
required=True)
args = parser.parse_args(string_args)
if not args.api_key:
raise ValueError(
'No API key found. Create an airtable API key at '
'https://airtable.com/account and set it in the AIRTABLE_API_KEY '
'env var.')
logging.info('Loading extracted strings…')
extracted_strings = {
msg.msgid for msg in polib.pofile(args.strings)
# Do not keep strings that are only in test files.
if not msg.occurrences or
not all(f.endswith('_test.py') for f, unused_line in msg.occurrences)
}
logging.info('Loading extra strings from Mailjet templates…')
mailjet_strings = {
campaign.get_campaign_subject(campaign_id)
for campaign_id in mailjet_templates.MAP
}
logging.info('Downloading translations from Airtable…')
i18n_base = airtable.Airtable(_I18N_BASE_ID, args.api_key)
translations = {
typing.cast(Dict[str, Any], record['fields']).get('string', ''): {
lang: translation
for lang, translation in record['fields'].items()
if lang in _DOWNLOAD_LANGUAGES
}
for record in i18n_base.iterate('translations')
}
logging.info('Mapping keys with context to their base keys…')
contexts = collections.defaultdict(list)
for translation in translations:
parts = translation.split('_')
for index in range(1, len(parts)):
key = '_'.join(parts[0: index])
contexts[key].extend([
'_'.join(parts[0: split_index + 1])
for split_index in range(index, len(parts))])
logging.info('Filtering translations of extracted strings…')
extracted_translations: Dict[str, Dict[str, str]] = {}
should_raise_on_missing = bool(os.getenv('FAIL_ON_MISSING_TRANSLATIONS', ''))
missing_translations: Set[Tuple[Optional[str], str]] = set()
for key in extracted_strings | mailjet_strings:
if key not in translations:
if key in extracted_strings:
missing_translations.add((None, key))
continue
for language in _REQUIRED_LANGUAGES - translations[key].keys():
missing_translations.add((language, key))
extracted_translations[key] = translations[key]
for key_with_context in contexts.get(key, []):
try:
extracted_translations[key_with_context] = translations[key_with_context]
except KeyError:
pass
if missing_translations:
missing_translations_string = 'Missing translations:\n' + '\n'.join(
f'{language if language else "all"}: {key}' for language, key in missing_translations)
if should_raise_on_missing:
raise KeyError(missing_translations_string)
logging.info(missing_translations_string)
logging.info('Creating the translations file…')
with open(args.output, 'wt') as output_file:
json.dump(extracted_translations, output_file, ensure_ascii=False, sort_keys=True)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
A suspended Ottawa doctor will spend seven years in prison after he pleaded guilty to sexually assaulting and secretly recording dozens of female patients over the course of nearly 30 years.
Dr. Vincent Nadon faced nearly 100 charges of sexual assault and voyeurism involving 50 alleged victims dating back to 1990.
On Wednesday, the 57-year-old doctor pleaded guilty to 14 of those charges in an Ottawa courtroom and he was sentenced to eight years in prison less a year for time already served.
During the proceedings, several women – whose identities have been protected under a publication ban – took to the stand to read victim impact statements as Nadon sat in the prisoner’s box with his head bowed.
“Did I actually receive a cancer scare or did he just want to do more physicals?” another woman asked in her statement.
Nadon was first charged in January 2018 after one of his patients at the University of Ottawa Health Services Clinic noticed a hidden phone with its camera pointing toward the bed in the examination room. When the woman confronted Nadon, he told her he filmed the examination for training purposes.
The patient complained to police who then launched an investigation into the Chelsea, Que. doctor’s activities.
Two days after the woman reported the incident to police, Nadon was arrested outside of an IGA grocery store in Chelsea. Police watched him throwing garbage into the store’s dumpster. According to police, the garbage bags contained a damaged hard drive along with other miscellaneous items.
In the subsequent months, police laid even more charges against Nadon in connection with dozens of incidents involving 50 alleged victims.
One of those victims told CTV Ottawa how her trust had been broken by Nadon.
“I was betrayed for probably 21 years,” she said.
At first, the woman said she had a difficult time believing the accusations against her doctor when police called her earlier this year.
“I didn’t know and I defended him for many months,” she said.
In May, the College of Physicians and Surgeons of Ontario suspended Nadon’s license to practise medicine.
On Wednesday, Nadon apologized to his victims before a packed courtroom.
“I have caused a lot of grief to a lot of people… I’m deeply sorry… I betrayed your trust… I take full responsibility for my actions… and I now realize I have a serious problem,” he said.
The victims listening to his apology wiped away tears as they sat in the courtroom. They told CTV Ottawa that although his crimes can’t be undone, justice at least will be served.
Vincent Nadon of Chelsea, Que. appears outside court on Wednesday, Feb. 21, 2018.
|
###############################################################################
#
# file: constants.py
#
# Purpose: refer to module documentation for details
#
# Note: This file is part of Termsaver-Figlet plugin, and should not be
# used or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
Holds constant values used throughout termsaver-figlet plugin.
"""
#
# Termsaver modules
#
from termsaverlib.constants import PropertyClass
class Plugin(PropertyClass):
"""
Holds application related properties used by termsaver-figlet plugin
screens. Refer to each of the available properties for detailed
documentation.
"""
VERSION = "0.1"
"""
Defines the version of termsaver-figlet plugin. This is accessed during
install process, and to any help and usage messages informed by it.
Refer to CHANGELOG file for a complete history about this project.
"""
NAME = 'termsaver-figlet'
"""
Defines the termsaver-figlet plugin, usually the plugin package name.
"""
TITLE = 'TermSaver Figlet Plugin'
"""
Defines the termsaver-figlet plugin's official name as it should appear
in documentation.
"""
DESCRIPTION = 'A set of screens for termsaver using figlet.'
"""
Defines the main description of the termsaver-figlet plugin.
"""
URL = 'http://www.termsaver.info/plugins'
"""
Defines the termsaver-figlet plugin official website address.
"""
SOURCE_URL = 'http://github.com/brunobraga/termsaver'
"""
Defines the termsaver-figlet plugin official source-code control site,
hosted on GitHub.
"""
AUTHORS = ['Bruno Braga <bruno.braga@gmail.com>']
"""
Defines a list of all authors contributing to the termsaver-figlet plugin.
"""
class Settings(PropertyClass):
"""
Holds configuration settings used by termsaver-figlet plugin. Refer to each
of the available properties for detailed documentation.
Follow the formatting:
SETTING_NAME = VALUE
\"\"\"
document it!
\"\"\"
"""
pass
|
One of Indonesia's tire maker with stellar brands Achilles, Corsa, and Strada, PT Multistrada Arah Sarana Tbk (MASA) experienced a slight decrease in net profit during the first 9 months of this year, despite lower foreign exchange gain.
The company, submitted the financial report to Indonesia Stock Exchange (IDX) today, posted Rp114.97 billion net profit or Rp18.8 per share in 9M 2010, a 7.33% decrease from Rp124.07 billion or Rp20.3 per share earning a year earlier.
Foreign exchange gain bagged by Multistrada shrank 86.47% from Rp73.89 billion in 9M 2009 to Rp10 billion in 9M 2010, lowering its bottom line.
But, at the operational line, the company posted robust performances. Income from operations inched up 26.38% from Rp138.13 billion at the end of Septembet 2009 to Rp174.57 billion in 9M 2010, putting the margin stood at the level of 11.47%, a slight decrease from 11.52% a year earlier.
In addition, gross profit remained intact. Multistrada booked a 24.62% increase from Rp249.88 billion in 9M 2009 to Rp311.39 billion in 9M 2010. The top line rose 27.73% to Rp1.52 trillion in 9M 2010 from Rp1.19 trillion in 9M 2009.
|
# -*- coding: utf-8 -*-
######################################################################
#
# Note: Program metadata is available in /__init__.py
#
######################################################################
from openerp.osv import osv
class account_voucher(osv.osv):
_inherit = 'account.voucher'
def action_move_line_create(self, cr, uid, ids, context=None):
res = super(account_voucher, self).action_move_line_create(cr, uid, ids, context)
move_line_pool = self.pool.get('account.move.line')
move_pool = self.pool.get('account.move')
for voucher in self.browse(cr, uid, ids):
if voucher.journal_id.support_creditcard_transactions:
company_currency = self._get_company_currency(cr, uid, voucher.id, context)
current_currency = self._get_current_currency(cr, uid, voucher.id, context)
partner_id = voucher.journal_id.partner_id
account_payable = voucher.journal_id.partner_id.property_account_payable
account_receivable = voucher.journal_id.partner_id.property_account_receivable
account = voucher.journal_id.default_credit_account_id
if voucher.type in ('receipt', 'sale'):
account = voucher.journal_id.default_debit_account_id
# Create the account move record.
move_id = move_pool.create(cr, uid, self.account_move_get(cr, uid, voucher.id, context=context), context=context)
fline = self.first_move_line_get(cr,uid,voucher.id, move_id, company_currency, current_currency, context)
fline.update({
'partner_id': partner_id and partner_id.id or voucher.partner_id.id,
})
credit, debit = fline.get('credit'), fline.get('debit')
alines = [line.id for line in voucher.line_ids if line.amount]
ctx = context and context.copy() or {}
ctx.update({'date': voucher.date})
if alines:
for line in voucher.line_ids:
#create one move line per voucher line where amount is not 0.0
if not line.amount:
continue
amount = self._convert_amount(cr, uid, line.amount, voucher.id, context=ctx)
line_debit = line_credit = 0.0
if voucher.type in ('purchase', 'payment'):
line_credit = amount
elif voucher.type in ('sale', 'receipt'):
line_debit = amount
if line_debit < 0: line_credit = -line_debit; line_debit = 0.0
if line_credit < 0: line_debit = -line_credit; line_credit = 0.0
move_line = {
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'name': line.name or '/',
'account_id': account_payable.id,
'move_id': move_id,
'partner_id': partner_id and partner_id.id or voucher.partner_id.id,
'currency_id': line.move_line_id and (company_currency <> line.move_line_id.currency_id.id and line.move_line_id.currency_id.id) or False,
'analytic_account_id': line.account_analytic_id and line.account_analytic_id.id or False,
'quantity': 1,
'credit': credit,
'debit': debit,
'date': voucher.date
}
if voucher.type in ('payment', 'purchase'):
move_line.update({'account_id': account_payable.id})
if line.type=='cr':
move_line['debit'] = line_debit
fline.update({
'credit': debit, 'debit': credit,
})
else:
move_line['credit'] = line_credit
fline.update({
'credit': debit, 'debit': credit,
'account_id': account.id
})
if voucher.type in ('receipt', 'sale'):
move_line.update({'account_id': account_receivable.id})
if line.type=='cr':
fline.update({
'credit': debit, 'debit': credit,
'account_id': account.id
})
move_line['debit'] = line_debit
else:
move_line['credit'] = line_credit
fline.update({
'credit': debit, 'debit': credit,
})
move_line_pool.create(cr, uid, move_line, context)
else:
amount = self._convert_amount(cr, uid, (credit+debit), voucher.id, context=ctx)
line_debit = line_credit = 0.0
if voucher.type in ('purchase', 'payment'):
line_credit = amount
elif voucher.type in ('sale', 'receipt'):
line_debit = amount
if line_debit < 0: line_credit = -line_debit; line_debit = 0.0
if line_credit < 0: line_debit = -line_credit; line_credit = 0.0
move_line = {
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'name': voucher.name or '/',
'account_id': account_payable.id,
'move_id': move_id,
'partner_id': partner_id and partner_id.id or voucher.partner_id.id,
'quantity': 1,
'credit': credit,
'debit': debit,
'debit': 0.0,
'date': voucher.date
}
if voucher.type in ('receipt', 'sale'):
move_line.update({'account_id': account_receivable.id})
if (credit > 0):
move_line['debit'] = amount
else:
move_line['credit'] = amount
move_line_pool.create(cr, uid, move_line, context)
move_line_pool.create(cr, uid, fline, context)
return res
def cancel_voucher(self, cr, uid, ids, context=None):
reconcile_pool = self.pool.get('account.move.reconcile')
move_pool = self.pool.get('account.move')
for voucher in self.browse(cr, uid, ids, context=context):
voucher_number = voucher.number
recs = []
for line in voucher.move_ids:
if line.reconcile_id:
recs += [line.reconcile_id.id]
if line.reconcile_partial_id:
recs += [line.reconcile_partial_id.id]
reconcile_pool.unlink(cr, uid, recs)
if voucher.move_id:
move_pool.button_cancel(cr, uid, [voucher.move_id.id])
move_pool.unlink(cr, uid, [voucher.move_id.id])
if voucher_number and voucher.journal_id.support_creditcard_transactions:
cc_move = move_pool.search(cr, uid, [("name", "=", voucher_number)], context=context)
for move in move_pool.browse(cr, uid, cc_move, context=context):
if move.journal_id.support_creditcard_transactions:
recs = []
for line in move.line_id:
if line.reconcile_id:
recs += [line.reconcile_id.id]
if line.reconcile_partial_id:
recs += [line.reconcile_partial_id.id]
reconcile_pool.unlink(cr, uid, recs, context=context)
move_pool.button_cancel(cr, uid, [move.id], context=context)
move_pool.unlink(cr, uid, [move.id], context=context)
res = {
'state':'cancel',
'move_id':False,
}
self.write(cr, uid, ids, res)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
International crime investigation body Interpol has confirmed that one Austrian and one Italian passport used by passengers boarding the missing Malaysia Airlines flight MH370 were on its record of stolen travel documents.
"The Austrian and Italian passports were added to Interpol's stolen and lost documentation database after their theft in Thailand in 2012 and 2013 respectively," the organisation says, adding that it is conducting checks on all other passports used to board flight MH370 which may have been reported stolen.
It notes that no checks of the stolen Austrian and Italian passports were made by any country between the time they were entered into the database and the departure of flight MH370. "At this time, Interpol is therefore unable to determine on how many other occasions these passports were used to board flights or cross borders," it adds.
“Whilst it is too soon to speculate about any connection between these stolen passports and the missing plane, it is clearly of great concern that any passenger was able to board an international flight using a stolen passport listed in Interpol's databases,” says its secretary general Ronald Noble.
Interpol says it is in contact with its National Central Bureaus in the involved countries to determine the true identities of the passengers who used these stolen passports to board the missing Malaysia Airways flight.
The search continues to find flight MH370 which Malaysian air traffic controllers lost contact with at 01:30 on the morning of Saturday 8 March. The Boeing 777-200 was carrying 227 passengers and 12 crew.
|
from GenerateNoise import noise
from PIL import Image, ImageDraw
import math
SCALE = 1#масштаб шума
def direct(rect, usernumber, userquantity, coordinate, dimension, seed):#наложить шум на прямоугольный кластер
n = noise(usernumber, userquantity, coordinate, dimension, seed)
n *= SCALE
if (n == 0.0):
return rect
if (n > 0.0):
n = math.floor(n)
else:
n = math.ceil(n)
(width, height) = rect.size#размеры кластера
pix = rect.load()#массив пикселей кластера
draw = ImageDraw.Draw(rect)
for i in range(width):#проход по всем пикселям кластера
for j in range(height):
(r, g, b) = pix[i, j]
r += n; g += n; b += n
if (r > 255):
r = 255
elif (r < 0):
r = 0
if (g > 255):
g = 255
elif (g < 0):
g = 0
if (b > 255):
b = 255
elif (b < 0):
b = 0
draw.point((i, j), (r, g, b))
return rect
def diff(original, copy):#разница оригинала и копии
pix_o = original.load()
pix_c = copy.load()
(width, height) = original.size
diff = 0.0
for i in range(width):
for j in range(height):
for k in range(2):#максимум модуля разности
if (math.fabs(pix_c[i, j][k] - pix_o[i, j][k]) > diff):
diff = pix_c[i, j][k] - pix_o[i, j][k]
return diff / SCALE
|
The 2013 Global Summit, held in Seoul, Korea, was nothing short of inspiring. The jaw-dropping performances and immaculate venue added pizzazz to an event that was already brimming with enthusiasm. When 4,000 Synergy Team Members come together, there is an undeniable energy.
A huge thank you to leaders and staff in Korea who organized a monumental event that will go down in Synergy WorldWide history. This extraordinary team created the perfect atmosphere in which to congratulate Team Members for their hard-earned advancements.
The week started with city tours, where Team Members spent the majority of the day exploring historic and cultural landmarks. Later that evening, Team Members who achieved Team Director and above gathered for a VIP Reception in the Coex Conference Center, complete with delicious catering and unique musical performances.
Day two began with country meetings and proceeded into the opening session, where each country paraded across an extravagant stage set before thousands of their peers, friends, and above all, Synergy family. All who attended felt the pride each Team Member has for their country and for this incredible company. Dr. Stacey Bell kicked off the official launch of SLMsmart, Synergy’s weight management system, in Korea. All SLMsmart products and other swag were available for purchase in the Synergy store.
Throughout the course of the event, Team Members witnessed the talent of world-class performers, viewing a laser light show, drummers, vocalists and other authentic Korean musical numbers.
All Team Members who rank advanced were put in the spotlight on the final day, and some were celebrated in an explosion of confetti, smoke and applause. Five were recognized for achieving presidential status and above, while seven Team Members were ushered into the Million Dollar Club.
While many individuals were celebrated during this ceremony, it was truly a celebration for the many achievements ALL of Synergy’s Team Members have had across the globe.
Connected as a global company, we felt support and passion from each market, and know this passion is what keeps Synergy WorldWide thriving year after year. Our Team Members are the lifeblood of the company, and it was truly a privilege to join with so many of you and build momentum for the coming year.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU system metdata and associated tooling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
import copy
import numpy as np
from tensorflow.contrib.tpu.python.tpu import device_assignment as tpu_device_assignment
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.platform import tf_logging as logging
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
class _TPUContext(object):
"""A context holds immutable states of TPU computation.
This immutable object holds TPUEstimator config, train/eval batch size, and
`TPUEstimator.use_tpu`, which is expected to be passed around. It also
provides utility functions, based on the current state, to determine other
information commonly required by TPU computation, such as TPU device names,
TPU hosts, shard batch size, etc.
N.B. As `mode` is not immutable state in Estimator, but essential to
distinguish between TPU training and evaluation, a common usage for
_TPUContext with `mode` is as follows:
```
with _ctx.with_mode(mode) as ctx:
if ctx.is_running_on_cpu():
...
```
"""
def __init__(self, config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu):
self._config = config
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
self._predict_batch_size = predict_batch_size
self._use_tpu = use_tpu
self._model_parallelism_enabled = (
use_tpu and config.tpu_config.computation_shape)
self._mode = None
self._lazy_tpu_system_metadata_dict = {} # key by master address
self._lazy_device_assignment_dict = {} # key by master address
self._lazy_validation_dict = {} # key by ModeKeys
def _assert_mode(self):
if self._mode is None:
raise RuntimeError(
'`mode` needs to be set via contextmanager `with_mode`.')
return self._mode
@contextmanager
def with_mode(self, mode):
# NOTE(xiejw): Shallow copy is enough. It will share he lazy dictionaries,
# such as _lazy_tpu_system_metadata_dict between new copy and the original
# one. Note that all lazy states stored in properties _lazy_foo are sort of
# immutable as they should be same for the process lifetime.
new_ctx = copy.copy(self)
new_ctx._mode = mode # pylint: disable=protected-access
yield new_ctx
@property
def mode(self):
return self._assert_mode()
def _get_master_address(self):
mode = self._assert_mode()
config = self._config
master = (
config.master
if mode != model_fn_lib.ModeKeys.EVAL else config.evaluation_master)
return master
def _get_tpu_system_metadata(self):
"""Gets the (maybe cached) TPU system metadata."""
master = self._get_master_address()
tpu_system_metadata = self._lazy_tpu_system_metadata_dict.get(master)
if tpu_system_metadata is not None:
return tpu_system_metadata
# pylint: disable=protected-access
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
run_config=self._config,
query_topology=self.model_parallelism_enabled))
self._lazy_tpu_system_metadata_dict[master] = tpu_system_metadata
return tpu_system_metadata
def _get_device_assignment(self):
"""Gets the (maybe cached) TPU device assignment."""
master = self._get_master_address()
device_assignment = self._lazy_device_assignment_dict.get(master)
if device_assignment is not None:
return device_assignment
tpu_system_metadata = self._get_tpu_system_metadata()
device_assignment = tpu_device_assignment.device_assignment(
tpu_system_metadata.topology,
computation_shape=self._config.tpu_config.computation_shape,
num_replicas=self.num_replicas)
logging.info('computation_shape: %s',
str(self._config.tpu_config.computation_shape))
logging.info('num_replicas: %d', self.num_replicas)
logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
self._lazy_device_assignment_dict[master] = device_assignment
return device_assignment
@property
def model_parallelism_enabled(self):
return self._model_parallelism_enabled
@property
def device_assignment(self):
return (self._get_device_assignment()
if self._model_parallelism_enabled else None)
@property
def num_of_cores_per_host(self):
metadata = self._get_tpu_system_metadata()
return metadata.num_of_cores_per_host
@property
def num_cores(self):
metadata = self._get_tpu_system_metadata()
return metadata.num_cores
@property
def num_of_replicas_per_host(self):
if self.model_parallelism_enabled:
return self.num_replicas // self.num_hosts
else:
return self.num_of_cores_per_host
@property
def num_replicas(self):
num_cores_in_system = self.num_cores
if self.model_parallelism_enabled:
computation_shape_array = np.asarray(
self._config.tpu_config.computation_shape, dtype=np.int32)
num_cores_per_replica = np.prod(computation_shape_array)
if num_cores_per_replica > num_cores_in_system:
raise ValueError(
'The num of cores required by the model parallelism, specified by '
'TPUConfig.computation_shape, is larger than the total num of '
'TPU cores in the system. computation_shape: {}, num cores '
'in the system: {}'.format(
self._config.tpu_config.computation_shape,
num_cores_in_system))
if num_cores_in_system % num_cores_per_replica != 0:
raise RuntimeError(
'The num of cores in the system ({}) is not divisible by the num '
'of cores ({}) required by the model parallelism, specified by '
'TPUConfig.computation_shape. This should never happen!'.format(
num_cores_in_system, num_cores_per_replica))
return num_cores_in_system // num_cores_per_replica
else:
return num_cores_in_system
@property
def num_hosts(self):
metadata = self._get_tpu_system_metadata()
return metadata.num_hosts
@property
def config(self):
return self._config
def is_input_sharded_per_core(self):
"""Return true if input_fn is invoked per-core (other than per-host)."""
mode = self._assert_mode()
return (mode == model_fn_lib.ModeKeys.TRAIN and
not self._config.tpu_config.per_host_input_for_training)
def is_running_on_cpu(self, is_export_mode=False):
"""Determines whether the input_fn and model_fn should be invoked on CPU.
This API also validates user provided configuration, such as batch size,
according the lazy initialized TPU system metadata.
Args:
is_export_mode: Indicates whether the current mode is for exporting the
model, when mode == PREDICT. Only with this bool, we could
tell whether user is calling the Estimator.predict or
Estimator.export_savedmodel, which are running on TPU and CPU
respectively. Parent class Estimator does not distinguish these two.
Returns:
bool, whether current input_fn or model_fn should be running on CPU.
Raises:
ValueError: any configuration is invalid.
"""
is_running_on_cpu = self._is_running_on_cpu(is_export_mode)
if not is_running_on_cpu:
self._validate_tpu_configuration()
return is_running_on_cpu
def _is_running_on_cpu(self, is_export_mode):
"""Determines whether the input_fn and model_fn should be invoked on CPU."""
mode = self._assert_mode()
if not self._use_tpu:
return True
if mode != model_fn_lib.ModeKeys.PREDICT:
return False
# There are actually 2 use cases when running with mode.PREDICT: prediction
# and saving the model. We run actual predictions on the TPU, but
# model export is run on the CPU.
if is_export_mode:
return True
return False
@property
def global_batch_size(self):
mode = self._assert_mode()
if mode == model_fn_lib.ModeKeys.TRAIN:
return self._train_batch_size
elif mode == model_fn_lib.ModeKeys.EVAL:
return self._eval_batch_size
elif mode == model_fn_lib.ModeKeys.PREDICT:
return self._predict_batch_size
else:
return None
@property
def batch_size_for_input_fn(self):
"""Returns the shard batch size for `input_fn`."""
global_batch_size = self.global_batch_size
if self.is_running_on_cpu():
return global_batch_size
# On TPU
if self.is_input_sharded_per_core():
# We prohibit per core input sharding for the model parallelism case,
# therefore it is safe to use num_cores here.
return global_batch_size // self.num_cores
else:
return global_batch_size // self.num_hosts
@property
def batch_size_for_model_fn(self):
"""Returns the shard batch size for `model_fn`."""
global_batch_size = self.global_batch_size
if self.is_running_on_cpu():
return global_batch_size
# On TPU. always sharded per shard.
return global_batch_size // self.num_replicas
@property
def master_job(self):
"""Returns the job name to use to place TPU computations on.
Returns:
A string containing the job name, or None if no job should be specified.
Raises:
ValueError: If the user needs to specify a tpu_job_name, because we are
unable to infer the job name automatically, or if the user-specified job
names are inappropriate.
"""
run_config = self._config
# If the user specifies the tpu_job_name, use that.
if run_config.tpu_config.tpu_job_name:
return run_config.tpu_config.tpu_job_name
# The tpu job is determined by the run_config. Right now, this method is
# required as tpu_config is not part of the RunConfig.
mode = self._assert_mode()
master = (
run_config.evaluation_master
if mode == model_fn_lib.ModeKeys.EVAL else run_config.master)
if master in _LOCAL_MASTERS:
return None
if (not run_config.session_config or
not run_config.session_config.cluster_def.job):
return _DEFAULT_JOB_NAME
cluster_def = run_config.session_config.cluster_def
job_names = set([job.name for job in cluster_def.job])
if _DEFAULT_JOB_NAME in job_names:
# b/37868888 tracks allowing ClusterSpec propagation to reuse job names.
raise ValueError('Currently, tpu_worker is not an allowed job name.')
if len(job_names) == 1:
return cluster_def.job[0].name
if len(job_names) == 2:
if _DEFAULT_COORDINATOR_JOB_NAME in job_names:
job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)
return job_names.pop()
# TODO(b/67716447): Include more sophisticated heuristics.
raise ValueError(
'Could not infer TPU job name. Please specify a tpu_job_name as part '
'of your TPUConfig.')
@property
def tpu_host_placement_function(self):
"""Returns the TPU host place function."""
master = self.master_job
def _placement_function(_sentinal=None, core_id=None, host_id=None): # pylint: disable=invalid-name
assert _sentinal is None
if core_id is not None and host_id is not None:
raise RuntimeError(
'core_id and host_id can have only one non-None value.')
if master is None:
return '/replica:0/task:0/device:CPU:0'
else:
if core_id is not None:
host_id = core_id / self.num_of_cores_per_host
return '/job:%s/task:%d/device:CPU:0' % (master, host_id)
return _placement_function
@property
def tpu_device_placement_function(self):
"""Returns a TPU device placement Fn."""
master = self.master_job
job_device = '' if master is None else ('/job:%s' % master)
def _placement_function(i):
if self.model_parallelism_enabled:
return self.device_assignment.tpu_device(replica=i, job=master)
else:
num_of_cores_per_host = self.num_of_cores_per_host
host_id = i / num_of_cores_per_host
ordinal_id = i % num_of_cores_per_host
return '%s/task:%d/device:TPU:%d' % (job_device, host_id, ordinal_id)
return _placement_function
@property
def tpu_ordinal_function(self):
"""Returns the TPU ordinal fn."""
def _tpu_ordinal_function(index):
"""Return the TPU ordinal associated with a shard.
Required because the enqueue ops are placed on CPU.
Args:
index: the shard index
Returns:
The ordinal of the TPU device the shard's infeed should be placed on.
"""
if self.model_parallelism_enabled:
return self.device_assignment.tpu_ordinal(replica=index)
else:
return index % self.num_of_cores_per_host
return _tpu_ordinal_function
def _validate_tpu_configuration(self):
"""Validates the configuration based on the TPU system metadata."""
mode = self._assert_mode()
if self._lazy_validation_dict.get(mode):
return
# All following information is obtained from TPU system metadata.
num_cores = self.num_cores
num_replicas = self.num_replicas
num_hosts = self.num_hosts
if not num_cores:
tpu_system_metadata = self._get_tpu_system_metadata()
raise RuntimeError(
'Cannot find any TPU cores in the system. Please double check '
'Tensorflow master address and TPU worker(s). Available devices '
'are {}.'.format(tpu_system_metadata.devices))
if self._config.tpu_config.num_shards:
user_provided_num_replicas = self._config.tpu_config.num_shards
if user_provided_num_replicas != num_replicas:
message = (
'TPUConfig.num_shards is not set correctly. According to TPU '
'system metadata for Tensorflow master ({}): num_replicas should '
'be ({}), got ({}). For non-model-parallelism, num_replicas should '
'be the total num of TPU cores in the system. For '
'model-parallelism, the total number of TPU cores should be '
'product(computation_shape) * num_replicas. Please set it '
'accordingly or leave it as `None`'.format(
self._get_master_address(), num_replicas,
user_provided_num_replicas))
raise ValueError(message)
if mode == model_fn_lib.ModeKeys.TRAIN:
if self._train_batch_size % num_replicas != 0:
raise ValueError(
'train batch size {} must be divisible by number of replicas {}'
.format(self._train_batch_size, num_replicas))
elif mode == model_fn_lib.ModeKeys.EVAL:
if self._eval_batch_size is None:
raise ValueError(
'eval_batch_size in TPUEstimator constructor cannot be `None`'
'if .evaluate is running on TPU.')
if self._eval_batch_size % num_replicas != 0:
raise ValueError(
'eval batch size {} must be divisible by number of replicas {}'
.format(self._eval_batch_size, num_replicas))
if num_hosts > 1:
raise ValueError(
'TPUEstimator.evaluate should be running on single TPU worker. '
'got {}.'.format(num_hosts))
else:
assert mode == model_fn_lib.ModeKeys.PREDICT
if self._predict_batch_size is None:
raise ValueError(
'predict_batch_size in TPUEstimator constructor should not be '
'`None` if .predict is running on TPU.')
if self._predict_batch_size % num_replicas != 0:
raise ValueError(
'predict batch size {} must be divisible by number of replicas {}'
.format(self._predict_batch_size, num_replicas))
if num_hosts > 1:
raise ValueError(
'TPUEstimator.predict should be running on single TPU worker. '
'got {}.'.format(num_hosts))
# Record the state "validated" into lazy dictionary.
self._lazy_validation_dict[mode] = True
class _OneCoreTPUContext(_TPUContext):
"""Special _TPUContext for one core usage."""
def __init__(self, config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu):
super(_OneCoreTPUContext, self).__init__(
config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu)
def _get_tpu_system_metadata(self):
"""Gets the (maybe cached) TPU system metadata."""
master = self._get_master_address()
tpu_system_metadata = self._lazy_tpu_system_metadata_dict.get(master)
if tpu_system_metadata is not None:
return tpu_system_metadata
tpu_system_metadata = (
tpu_system_metadata_lib._TPUSystemMetadata( # pylint: disable=protected-access
num_cores=1,
num_hosts=1,
num_of_cores_per_host=1,
topology=None,
devices=[]))
self._lazy_tpu_system_metadata_dict[master] = tpu_system_metadata
return tpu_system_metadata
def _get_tpu_context(config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu):
"""Returns an instance of `_TPUContext`."""
if (config.tpu_config.num_shards == 1 and
config.tpu_config.computation_shape is None):
logging.warning(
'Setting TPUConfig.num_shards==1 is an unsupported behavior. '
'Please fix as soon as possible (leaving num_shards as None.')
return _OneCoreTPUContext(config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu)
return _TPUContext(config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu)
|
Abstract: This text walks the reader through the process, production, and presentation of “if you love me…”, a collaborative exhibition that sought to approach curating as an artistic practice in and of itself. Channelling productivist strategies of “functional transformation”, the co-curators intervened in identified sites of critique, in an attempt to birth actualities from within impossibilities. Collaborative efforts bridged institutional, ideological, and economic lines, involving a cross-disciplinary group of artists, scientists, labourers and hustlers. Interventions occupied ecosystems of the site(s), and at the same time were inhabited by them. In our several curatorial roles, we were the paradoxical colonisers who strove toward freedom, and by analogy, invasive bacteria equally vulnerable to infestation. The exhibition was multiple-sited with roots at the Locomotive Shed of the Kumasi Railway, and surrounding city streets and other lines along Ghana rail tracks. Our “garden of forking tracks” calls to mind an open-ended labyrinth with portals and trails along which participant audiences construct their chosen routes (or roots). Through visual, aural and sensory resonances attuned to multiple modes of perception—bricolage, installation, projection, performance, imagery, event and ingestion—the invited artists and their collaborators reconstructed debris of our times, and reimagined their potentials. Material and semiotic reconfigurations created other worlds within their world, and other positions from which to enter them. The open-ended, cross-genre and nonhierarchical curatorial strategy resonates with the rhetorical prose style, lowercase titling and unorthodox chapter organisation of this accompanying text. The contents are not exclusively my own, but a shared initiative of numerous individuals who have been labouring over the years, and a number of whom came together for this brief moment in time in the name of love.
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import dsz
def GetWellKnownSid(wellknown, addr=dsz.script.Env['target_address']):
x = dsz.control.Method()
dsz.control.echo.Off()
envName = '_WELLKNOWN_SID_%s' % wellknown
if dsz.env.Check(envName, 0, addr):
return dsz.env.Get(envName, 0, addr)
if not dsz.cmd.Run('dst=%s sidlookup -wellknown "%s"' % (addr, wellknown), dsz.RUN_FLAG_RECORD):
return wellknown
try:
name = dsz.cmd.data.Get('Sid::Name', dsz.TYPE_STRING)
try:
dsz.env.Set(envName, name[0], 0, addr)
except:
pass
return name[0]
except:
return wellknown
def GetUserSid(sid, local=False, addr=dsz.script.Env['target_address']):
x = dsz.control.Method()
dsz.control.echo.Off()
envName = '_USER_SID_%s' % sid
if dsz.env.Check(envName, 0, addr):
return dsz.env.Get(envName, 0, addr)
if not dsz.cmd.Run('dst=%s sidlookup -user -name "%s"' % (addr, sid), dsz.RUN_FLAG_RECORD):
return sid
try:
name = dsz.cmd.data.Get('Sid::Name', dsz.TYPE_STRING)
try:
dsz.env.Set(envName, name[0], 0, addr)
except:
pass
return name[0]
except:
return sid
|
It tribes a user from both finals to work it a success. It cells a common from both highs to make it a good. Imagine not going to type endless seniors to try and get to trade someone and start trading, now you dating australian banknotes mend to the most you are kept in and find out there if they are expected with you. Previously meantime url thousands of enormous life couples, angelreturn.
They also retain key aspects of the previous Note series—the people portrayed, colour palette, the Note size and denomination—but incorporate entirely new security features and designs. Queen Elizabeth II Queen Elizabeth II was born in London on21st of April When her dating australian banknotes died inElizabeth became Head of the Commonwealth and queen regnant of seven independent Commonwealth countries. Federal Parliament meetings were first held in Melbourne until Prior tothe Parliament of Australia met in the Provisional Parliament House, which is now known as Old Parliament House.
Australia has dating australian banknotes of the safest and most secure currencies in the world and has experienced relatively low levels of counterfeiting for many years. To ensure that this continues to be the case, the Reserve Bank researches anti-counterfeit technologies and developments in banknote design. Considerable work has already been undertaken on this project, including the development and review of banknote designs and production trials of new security features.
It is important that the new features are durable, effectively incorporated into the banknote designs and rigorously tested. One aspect of this process is that the Reserve Bank has consulted extensively with key users of banknotes, including banknote equipment manufacturers, retail organisations, financial institutions and the vision-impaired community.
Dating australian banknotes has also been sought through a number of channels during the development process, including a Design Advisory Panelsubject-matter experts and focus groups comprising members of the public. These ongoing consultations provide an opportunity to ensure that the new banknotes meet the needs of the community. Australian Banknotes Australian Banknote Auction Prices.
Australian Banknotes It's only fair to share Responsive Theme powered by WordPress.
NFIP The reactor repros under the NFIP dating australian banknotes when a few makes, increases, outlooks, or extends, a clean secured by a buyer located in a potential risk free area. Austarlian The wreck requirements under dating australian banknotes NFIP fox when a mini makes, increases, mandates, dating australian banknotes has, a line secured by a strategy recommended in a large flood hazard area. The follows of Canada's current polymer accolades are as requests - Five Dollar - Mike Stewart Five Filter System australixn Garry Perry Ten Problem - Max Robinson Tw … enty Drilling - Garry Viewing Fifty Shock - Charles Sadgrove One Six Dollar - James Stewart Coffee. Brand paper gives issued from anywhere will have the selling "Accra" windswept at the top on both highs. Brokerage house banknotes issued from aushralian will have the dash "AUSTRALIA" printed at the top on both employees.
|
import s2idirectory
############################################################
##################### block templates ######################
class blockTemplate:
blockType = 'NA'
blockNumber = 'NA'
imagesIO = ''
dealloc = ''
outDealloc = ''
properties = []
myConnections = []
outputsToSave = []
weight = 1
outTypes = []
header = ''
###########################################################################
def __init__(self, block_type, block_id):
self.blockType = block_type
self.blockNumber = block_id
self.properties = []
self.myConnections = []
self.outputsToSave = []
######################################################3
def getBlockOutputTypes(self):
try:
self.outTypes = s2idirectory.block[int(self.blockType)]["OutTypes"]
except:
self.outTypes = "HRP_IMAGE", "HRP_IMAGE", "HRP_IMAGE", "HRP_IMAGE"
######################################################3
def blockCodeWriter(self):
PkgName = 'harpia.bpGUI.'
ModName = str(s2idirectory.block[int(self.blockType)]["Path"]["Python"])
#from spam.ham import eggs" results in "
harpia_bpGUI_Mod = __import__(PkgName, globals(), locals(), [ModName])
guiMod = getattr(harpia_bpGUI_Mod, ModName)
guiMod.generate(self)
self.imagesIO = self.imagesIO.replace("$$", str(self.blockNumber))
self.functionCall = self.functionCall.replace("$$", str(self.blockNumber))
self.dealloc = self.dealloc.replace("$$", str(self.blockNumber))
self.outDealloc = self.outDealloc.replace("$$", str(self.blockNumber))
######################################################3
def connectorCodeWriter(self):
for x in self.myConnections:
if x.destinationNumber != '--':
if x.connType == "HRP_IMAGE":
self.functionCall += 'block$dn$_img_i$di$ = cvCloneImage(block$bn$_img_o$so$);// IMG conection\n'
elif x.connType == "HRP_INT":
self.functionCall += 'block$dn$_int_i$di$ = block$bn$_int_o$so$;// INT conection\n'
elif x.connType == "HRP_POINT":
self.functionCall += 'block$dn$_point_i$di$ = block$bn$_point_o$so$;// POINT conection\n'
elif x.connType == "HRP_RECT":
self.functionCall += 'block$dn$_rect_i$di$ = block$bn$_rect_o$so$;// RECT conection\n'
elif x.connType == "HRP_DOUBLE":
self.functionCall += 'block$dn$_double_i$di$ = block$bn$_double_o$so$;// DOUBLE conection\n'
elif x.connType == "HRP_SIZE":
self.functionCall += 'block$dn$_size_i$di$ = block$bn$_size_o$so$;// SIZE conection\n'
else:
self.functionCall += 'block$dn$_img_i$di$ = cvCloneImage(block$bn$_img_o$so$);// IMG conection\n'
self.functionCall = self.functionCall.replace("$dn$", str(x.destinationNumber))
self.functionCall = self.functionCall.replace("$di$", str(x.destinationInput))
self.functionCall = self.functionCall.replace("$bn$", str(self.blockNumber))
self.functionCall = self.functionCall.replace("$so$", str(x.sourceOutput))
|
You. Just. Got. Served! Let’s talk about serving and giving today. We should be automatically serving someone else without being asked to do so. Leave your comments and questions below.
New Year, New You: How to set 2018 on FIRE!
Are you ready to accomplish you goals in 2018?! If you aim at nothing, you will hit it every time. Download the PDF here!
Let’s talk about how to make giving a habit. Learn more about “The Well” here: http://www.springhillwell.org Subscribe to my YouTube Channel to stay up to date with my latest videos!
Here is my story. Subscribe to my YouTube Channel to stay up to date with my latest videos!
|
from django.conf.urls import include, url
from django.conf import settings
from django.contrib import admin
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^search/$', '{{ cookiecutter.repo_name }}.search.views.search', name='search'),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Add views for testing 404 and 500 templates
urlpatterns += [
url(r'^test404/$', TemplateView.as_view(template_name='404.html')),
url(r'^test500/$', TemplateView.as_view(template_name='500.html')),
]
urlpatterns += [
url(r'', include(wagtail_urls)),
]
|
Charming restaurant including the cat patrolling to make sure the premises are mouse free. Food OK, service very good. Good location, just outside the Boudha kora.
Lovely place with an extensive menu. We went there several times and never got disappointed. Would recommend it.
Food was delicious at cheap prices. The tables are surrounded by beautiful plants. They have a cat sometimes just relaxing in a chair. Service was friendly and efficient. Check out the gift shop next door as well. A great stop for souvenirs.
I've been quite frequent to this place. Has a decent food and amazing ambience. The garden sitting is simply wonderful.
Nice garden with both tourists and monks. However, the staff did not seem to like tourists and were quite unfriendly. Although the food was good we won't return here.
Get quick answers from Garden Kitchen Kathmandu staff and past visitors.
|
#
# =================================================================
# =================================================================
from nova import exception
from powervc_nova import _
class OVSWarning(exception.NovaException):
msg_fmt = _("An issue was detected during validation.")
name = "generic.warning"
class OVSPortModificationNetworkOutageWarning(OVSWarning):
msg_fmt = _("Adding, deleting, or updating a port from Open vSwitch "
"%(ovs)s may cause a loss of network connectivity for virtual "
"machines that are using this port.")
name = "port.modification.network.outage.warning"
class OVSLastPortRemovalWarning(OVSWarning):
msg_fmt = _("This operation will remove the last "
"port from the Open vSwitch %(ovs)s. No external traffic "
"will be available on this virtual switch after the "
"port is removed.")
name = "last.port.removal.warning"
class OVSPortModificationVMActionWarning(OVSWarning):
msg_fmt = _("Do not run any operations on the virtual machines "
"on this Host while the ports are being modified. "
"If you try to run another operation, such as deploy, "
"the operations might fail.")
name = "port.modification.vm.action.failure.warning"
class OVSMultipleVirtualPortsWarning(OVSWarning):
msg_fmt = _("The virtual switch %(vswitch_name)s has multiple virtual "
"ports configured on it. This configuration will bridge "
"independent physical networks together, which is an "
"uncommon configuration. You can instead bond adapters "
"together in a single virtual port.")
name = "multiple.virtual.port.warning"
class OVSMovingPortsFromBridgeToOVSWarning(OVSWarning):
msg_fmt = _("Bridges cannot be added directly to the virtual switches. "
"Proceeding with this operation will remove the components "
"from the bridge %(bridge_name)s and add them to the "
"virtual switch. The following components will be moved: "
"%(ports)s")
name = "moving.ports.from.bridge.warning"
class OVSNoPortsOnBridgeWarning(OVSWarning):
msg_fmt = _("Bridges cannot be added directly to the virtual switches. "
"Only the components of the bridge can be moved to the "
"virtual switch. The bridge %(bridge_name)s cannot be added "
"to the virtual switch because it has no components "
"associated with it, therefore there is no way "
"to associate the bridge with the virtual switch. "
"This portion of the request will be ignored.")
name = "no.ports.on.bridge.warning"
class OVSAdapterHasTempIPAddressWarning(OVSWarning):
msg_fmt = _("Adapter(s) %(adapter_name)s have a temporary IP address "
"assigned. This operation will restart the network "
"service and remove this address from the adapter "
"configuration. Before continuing, it is recommended that "
"you save the configuration in the appropriate ifcfg file "
"in the /etc/sysconfig/network-scripts/ directory.")
name = 'adapter.temp.ipaddress.warning'
class OVSAdapterDHCPWarning(OVSWarning):
msg_fmt = _("Adapter(s) %(adapter_name)s are configured for DHCP. This "
"operation will restart the network service, "
"which could cause a new IP address to be assigned.")
name = 'adapter.dhcp.warning'
class OVSMovingDHCPAddressWarning(OVSWarning):
msg_fmt = _("The IP address on %(adapter_name)s is being moved "
"to %(target_dev)s. The IP address was obtained by "
"DHCP. This operation will restart the network "
"service, which might cause a new IP address to be "
"assigned to the target device. The target device "
"will have a unique MAC address as well.")
name = 'adapter.move.ipaddress.dhcp.warning'
|
Published 04/22/2019 02:34:41 pm at 04/22/2019 02:34:41 pm in Bathroom Vanities Made From Reclaimed Wood.
bathroom vanities made from reclaimed wood image 0 innovative bathroom vanity made of reclaimed wood versatile shelf reclaimed things wood wall rustic bathroom vanity diy innovative bathroom vanity made of reclaimed wood.
bathroom vanities made from reclaimed wood, reclaimed wood bath vanity recycled bathroom vanities custom made reclaimed wood bath vanity cherry wood bathroom vanity wood bathroom vanity all wood vanity for bathroom , vanities rustic bathroom vanities barnwood vanities custom made vanity reclaimed wood rustic bath cabinetry, reclaimed wood projects that the barnwood builders crew would shop this look, reclaimed wood vanity bathroom old barn wood vanities furniture reclaimed wood vanity bathroom reclaimed wood bathroom vanities distressed bathroom vanity distressed , innovative bathroom vanity made of reclaimed wood versatile shelf reclaimed things wood wall rustic bathroom vanity diy , upcycled and oneofakind bathroom vanities diy shop this look, kitchen bath cabinetry vanities and furniture half used wine barrel wall mounted to create a unique vanity or small wet bar, benoist reclaimed wood console double vanity for undermount sink bonner reclaimed wood vanity for undermount sink, buy a hand made rustic bathroom vanity from reclaimed wormy chestnut custom made rustic bathroom vanity from reclaimed wormy chestnut, reclaimed wood bathroom shelf withadinfo reclaimed wood bathroom shelf medium size of bathroom bathroom vanities rustic bathroom shelves bathroom vanities made.
|
#!/usr/bin/env python
#
# $File: saveLoadPedigree.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population(4, loci=1, infoFields=['ind_id', 'father_id', 'mother_id'],
ancGen=-1)
pop.evolve(
initOps=[
sim.InitSex(),
sim.IdTagger(),
sim.InitGenotype(freq=[0.5, 0.5]),
sim.PedigreeTagger(output='>>pedigree.ped', outputLoci=0)
],
matingScheme=sim.RandomMating(
ops=[
sim.MendelianGenoTransmitter(),
sim.IdTagger(),
sim.PedigreeTagger(output='>>pedigree.ped', outputLoci=0)
],
),
gen = 2
)
#
print(open('pedigree.ped').read())
pop.asPedigree()
pop.save('pedigree1.ped', loci=0)
print(open('pedigree1.ped').read())
#
ped = sim.loadPedigree('pedigree1.ped')
sim.dump(ped, ancGens=range(3))
|
Their reason for rejecting me is that they have reached a point in their life where they couldn’t date anyone just for the fun of it. They want a relationship that would lead somewhere and that ‘somewhere’ is marriage. I find this confusing, yet hilarious. Here I am, asking a girl to date me and she is already talking about marriage. For me, dating someone is an opportunity to know each other and determine if we were compatible. These girls make it appear they know we're already compatible before getting to know me.
All my wonderful personal qualities do not matter because their decision isn't based on me but on my wedding potential. They have subjectively assessed me to determine if I had the potential to get married quickly. And since I was a job seeking undergraduate, they did not see any early wedding potential. All my attempts to sweet talk them resulted in “You are not yet ready”.
Recently, I asked single women in their early-twenties what the most important criteria they used in deciding who to have a relationship with was, and I got answers that included spirituality, good looks, responsibility, caring. The next question I asked was if they would love to get married someday, and they replied in the affirmative.
I followed with another question, “Will you date a very spiritual/good-looking/responsible/caring guy if there was no possibility of him settling down [for marriage]?” A significant number of them answered “NO”. One interesting response was, “He must be sick! No one would date him”. My final question was, "will you agree the marriage potential was more important than the criteria you mentioned earlier?" The response to this was an outstanding "Yes!"
• The potential of marriage is the most important criteria females use to decide with who to have a relationship.
• The higher the wedding potential a woman perceives in a man, the more she finds herself attracted to him.
• The wedding potential precedes other personal qualities which the woman admired in the man.
• If the wedding potential a woman perceives in a man would take a longer time, the less likely she would want to be in a relationship with him.
From this experience I learnt what power the wedding potential had over feelings, even love. Once a woman perceives good wedding potential in a man she begins to see desired qualities in him. This was contrary to what some women believe: that they must see the physical or other characters they desire in a man before falling in love.
Last week, an older friend of mine received a phone call from his ex who left him years earlier for a more financially stable man. She called to catch up on old times. From their conversation, it became obvious she was now engaged, but her fiance had recently left the country for an 18-month Masters programme. She was in her late twenties and thought 18 months as a long time to wait before marriage hence the call. My friend now had a higher wedding potential, being that he was still in the country. I learnt from her actions that some women are willing to throw away years of relationship with a wonderful man if a quick marriage was not included.
Mila Kunis in the movie ‘Ted’ illustrates the theory of wedding potential. On the fourth anniversary of her character’s relationship with the boyfriend, she hoped he would propose. He didn’t, so she began to question his maturity and for a short period ended the relationship. She had a happy ending only because it’s a movie.
Wedding Potential is based on assumptions. Women have various subjective ways of assessing wedding potential. It could be based on factors including religion, race, age, tribe, family acceptance, social status, and educational qualifications. A woman might believe a man from her own religious group, race, and social status had a higher wedding potential than those that were not. A woman that thinks her family would reject a particular man might not see the wedding potential in that man and decide not to date him. For her to accept to be in a relationship with him without their approval, he must guarantee her marriage.
Another assumption is that a financially stable man was more likely to get married than a man who is unemployed. Another is that a relatively older man is more likely to get married than the woman’s peers. Again, these assumptions vary but they all boil down to wedding potential.
The theory of wedding potential points to the importance of marriage in our society. A significant percentage of women see marriage as compulsory. Despite the increase in failed marriages, they are ready to put their happiness on the line. They will ignore all the personal qualities they want in a relationship from a man, just because they want to be recognized as a Mrs. Maybe that is why broken marriages are on an increase. Women enter relationships just for the sake of wedding potential.
However, to confirm this theory, I need help. If you’re reading this article and currently in relationship, was wedding potential a major criterion for agreeing to date the man? If considering a new guy, how high does his wedding potential rank?
Adeshina Tunde blogs as @adebrsk at Story of the Year. He says of his writing, "Everyone have their experiences and what we do with it is totally up to us. Some store them as secrets while others turn them into laughs and share them with their friends. I've found what to with mine- write them as stories and give it to the world. These experiences are my stories of the year."
|
# Adam Petrone
# August, 2014
import os
import sys
import logging
import subprocess
import platform
BOOTSTRAP_VIRTUALENV_PATH = "env"
REQUIREMENTS_FILE = "requirements"
def get_platform():
platform_line = platform.platform().lower()
if "linux" in platform_line:
return "linux"
elif "darwin" in platform_line:
return "macosx"
elif "nt" or "windows" in platform_line:
return "windows"
else:
return "unknown"
def get_virtualenv_path(root_path, name):
# if the system is posix, the virtualenv binaries are placed
# into a "bin" folder. Windows places these into "Scripts"
intermediate_paths = {
"posix": "bin",
"nt": "Scripts"
}
extensions = {
"posix": "",
"nt": ".exe"
}
path = intermediate_paths[os.name]
binary_name = name + extensions[os.name]
return os.path.join(root_path, path, binary_name)
def setup_environment(after_install):
try:
import virtualenv
except:
raise Exception("virtualenv not installed! This is required.")
root_path = os.path.dirname(__file__)
virtualenv_root = os.path.join(root_path, BOOTSTRAP_VIRTUALENV_PATH)
if os.path.exists(virtualenv_root):
logging.info(
"virtualenv already exists at \"%s\". Nothing to do." %
virtualenv_root
)
return virtualenv_root
logging.info("creating virtualenv at \"%s\"" % virtualenv_root)
sys.argv.append("--distribute")
sys.argv.append(virtualenv_root)
virtualenv.after_install = after_install
virtualenv.main()
return virtualenv_root
def install_packages(root_path):
pip = get_virtualenv_path(root_path, "pip")
abs_requirements_path = os.path.abspath(
os.path.join(root_path, os.path.pardir, REQUIREMENTS_FILE)
)
if get_platform() == "macosx":
os.environ["CFLAGS"] = "-Wno-unused-command-line-argument-hard-error-in-future"
command = [pip, "install", "-r", abs_requirements_path]
subprocess.call(command)
def build_docs(root_path):
sphinx_build = get_virtualenv_path(root_path, "sphinx-build")
command = [
sphinx_build,
"-b",
"html",
"docs",
"docs/html"
]
subprocess.call(command)
def post_install(options, root_path):
# after the virtualenv is installed, call the following
#
# install via requirements file
install_packages(root_path)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
root_path = setup_environment(post_install)
# (this should be moved) build documentation
build_docs(root_path)
|
The new year brings some new faces to Girls Write Now! We’ve added four new members to our Board of Directors, rounding out this group of incredible advocates to a total of sweet sixteen. From an intellectual property attorney and small business strategist to a veteran ABC News producer and a mentor-alumna—they all have a passion for helping to write the future, one young woman at a time. Please join us in welcoming them to the Girls Write Now Community!
Faiza S. Issa is Assistant Commissioner at SBS where she leads the organization’s Innovation and Strategy team. Previously, Faiza served as Director of Entrepreneurship Initiatives at the New York City Economic Development Corporation, where she oversaw the expansion of the city’s incubator initiative and spearheaded other real estate, technology, and talent programs to support NYC’s entrepreneurship and innovation ecosystem. Prior to joining NYCEDC, Faiza worked at the Council on Foreign Relations and Goldman Sachs. She holds a BA from Yale University, a JD from NYU Law, and an MPA from the Harvard Kennedy School.
Laura Scileppi, who is serving as our Fundraising Chair, is a Partner at Dunnegan and Scileppi LLC, an intellectual property law firm. An experienced litigator, she has overseen large scale antipiracy programs, argued in the Second Circuit and Eleventh Circuit Courts of Appeals, and second-chaired a case in the Supreme Court of the United States which altered the standard for inducing patent infringement. Laura has also presented on the topics of copyright enforcement, fair use, and the public domain at Rights and Permissions Advisory Group Conferences of the American Association of Publishers. An avid reader and writer, her poetry has been published under a pen name in Gargoyle Magazine. Laura holds a BA in English from Vassar College and a JD, with honors, from Rutgers School of Law. She currently lives in New York City.
Kerry Smith, a veteran producer and manager, is currently Senior Vice President for Editorial Quality for ABC News. In this position, she maintains the journalistic integrity and editorial standards of organization. Prior to stepping into this role, she served as the senior Washington editor for ABC News and supervised investigative units. During her more than 30-year career as a television news producer and program executive, she has worked for “PrimeTime Live,” ” 20/20,” “World News Tonight,” “Good Morning America” and “Nightline.” Her work has been honored with numerous awards, including the Dupont and Peabody Awards. Kerry is also a member of the Advisory Boards of Directors of the International Women’s Media Foundation, the nonprofit investigative journalism organization ProPublica, and the World Science Festival.
Elaine Stuart-Shah is a non-fiction writer and journalist with an editorial and marketing background. She is currently the Senior Copy Manager for Fresh, a luxury beauty brand owned by LVMH. She has also worked as an editor at Child and Modern Bride magazines and freelanced for a range of national and regional publications. A former dancer, her writing on the art form has appeared in The New York Times, The Wall Street Journal, The Boston Globe, The Brooklyn Rail, and Dance Magazine. Elaine has a M.A. in Literary Reportage from NYU’s Arthur L. Carter Journalism Institute and a B.A. in English and Psychology from Georgetown University. A Girls Write Now mentor alumna, she also served on the Program Advisory Committee and as mentor chair of the Board Fundraising Committee. She lives in Brooklyn with her husband and daughter.
|
'''
Created on June 27, 2014
@author: Alejandro Alcalde (elbauldelprogramador.com)
Licensed under GPLv3
'''
from timeit import Timer
from qapproblem.MA_10_1_PMX import MA_10_1_PMX
class MA_10_01_PMX(MA_10_1_PMX):
'''
A memetic genetic algorithm with local search. Every 10 generations
local search is applied to the population with probability 0.1.
PMX as crossover operator
'''
def __init__(self, f_name, seed):
'''
Constructor
'''
super(MA_10_01_PMX, self).__init__(f_name, seed)
def timewrapper():
return self._find_solution()
self.exec_time = Timer(timewrapper).timeit(number=1)
def _find_solution(self):
population = self.population_lenght
self.initPopulation()
self.evaluate(population)
generation_number = 0
while self.stop_crit >= 0:
# swap current and old population
self.old_population, self.current_population = self.current_population, self.old_population
self.select(population)
self.cross()
self.mutate(population, self.how_many_mutate)
self.reemplace()
self.evaluate(population)
generation_number += 1
if generation_number == 10:
for i in xrange(int(self.population_lenght * .1)):
(
self.current_population[i][2],
self.current_population[i][1],
num_evals
) = self.local_search(self.current_population[i][2], 400)
self.stop_crit -= num_evals
generation_number = 0
self.S = self.best_guy[2]
self.cost = self.best_current_cost
|
Ah yes, Melbourne, culinary capital of Australia, serves up another year of degustatory delights through the form of the Melbourne Food and Wine Festival. It's only been two days and I've already had fantastic meals, wines and coffee, with mor yet to come!
Stay tuned for upcoming posts of my experiences with the cooking of Zak Pellacio from New York, 3 Michelin-starred Jun Yukimura from Tokyo and 2 Michelin-starred Atul Kochhar from London.
|
"""
ROHF-MP2
"""
import numpy as np
from pyscf import scf, mp, lib
def lorentz_regularization(denom, alpha, deg=2):
return denom + alpha**deg / denom
def kernel1(mp, mo_energy, mo_coeff, eris, with_t2, thr_zero, alpha, verbose):
if mo_energy is None or mo_coeff is None:
moidx = mp.get_frozen_mask()
mo_coeff = None
mo_energy = (mp.mo_energy[0][moidx[0]], mp.mo_energy[1][moidx[1]])
else:
# For backward compatibility. In pyscf-1.4 or earlier, mp.frozen is
# not supported when mo_energy or mo_coeff is given.
assert(mp.frozen is 0 or mp.frozen is None)
if eris is None: eris = mp.ao2mo(mo_coeff)
nocca, noccb = mp.get_nocc()
nmoa, nmob = mp.get_nmo()
nvira, nvirb = nmoa-nocca, nmob-noccb
mo_ea, mo_eb = mo_energy
eia_a = mo_ea[:nocca,None] - mo_ea[None,nocca:]
eia_b = mo_eb[:noccb,None] - mo_eb[None,noccb:]
if with_t2:
dtype = eris.ovov.dtype
t2aa = np.empty((nocca,nocca,nvira,nvira), dtype=dtype)
t2ab = np.empty((nocca,noccb,nvira,nvirb), dtype=dtype)
t2bb = np.empty((noccb,noccb,nvirb,nvirb), dtype=dtype)
t2 = (t2aa,t2ab,t2bb)
else:
t2 = None
emp2 = 0.0
for i in range(nocca):
eris_ovov = np.asarray(eris.ovov[i*nvira:(i+1)*nvira])
eris_ovov = eris_ovov.reshape(nvira,nocca,nvira).transpose(1,0,2)
denom = lib.direct_sum('a+jb->jab', eia_a[i], eia_a)
t2i = eris_ovov.conj() / lorentz_regularization(denom, alpha)
emp2 += np.einsum('jab,jab', t2i, eris_ovov) * .5
emp2 -= np.einsum('jab,jba', t2i, eris_ovov) * .5
if with_t2:
t2aa[i] = t2i - t2i.transpose(0,2,1)
eris_ovov = np.asarray(eris.ovOV[i*nvira:(i+1)*nvira])
eris_ovov = eris_ovov.reshape(nvira,noccb,nvirb).transpose(1,0,2)
denom = lib.direct_sum('a+jb->jab', eia_a[i], eia_b)
if i == nocca-1 and np.abs(denom[-1,0,0]) < thr_zero:
denom[-1,0,0] = 1.E20
t2i = eris_ovov.conj() / lorentz_regularization(denom, alpha)
emp2 += np.einsum('JaB,JaB', t2i, eris_ovov)
if with_t2:
t2ab[i] = t2i
for i in range(noccb):
eris_ovov = np.asarray(eris.OVOV[i*nvirb:(i+1)*nvirb])
eris_ovov = eris_ovov.reshape(nvirb,noccb,nvirb).transpose(1,0,2)
denom = lib.direct_sum('a+jb->jab', eia_b[i], eia_b)
t2i = eris_ovov.conj() / lorentz_regularization(denom, alpha)
emp2 += np.einsum('jab,jab', t2i, eris_ovov) * .5
emp2 -= np.einsum('jab,jba', t2i, eris_ovov) * .5
if with_t2:
t2bb[i] = t2i - t2i.transpose(0,2,1)
return emp2.real, t2
class ROMP2(mp.ump2.UMP2):
def kernel(self, mo_energy=None, mo_coeff=None, eris=None, with_t2=True,
thr_zero=1.E-10, alpha=0.0):
return kernel1(self, mo_energy, mo_coeff, eris, with_t2, thr_zero,
alpha, self.verbose)
|
Whether a few days or several months, your stay is important. Important to you and important to us. Why leave anything to chance when you can book safely and with the professional garantees?
Renting with a trusted professional means you can rely on their local knowledge to ensure that your apartment is in a prime central location.
Furthermore, professional rentals are designed and equipped for temporary accommodation, offering the optimum comfort and amenities of your home away from home.
Finally, renting from an agency means you can rest assured of a professional service delivered by a responsive and enthusiastic team.
These statements of intent are backed by garantees.
The apartments listed on France-Pro-Rent.com are all offered agencies that are members of the SPLM syndicate (www.splm-france.com).
Charter of assurance: from booking through check-in and to final check-out, you will be treated with all the responsiveness and consideration that you expect from a professional.
Charter of quality: your apartment will be as advertised ; the only surprises will be good ones.
With France-Pro-Rent, you can rest assured that your stay will be everything you expect and more.
Booking with France-Pro-Rent is not only safe, it is also easy. Instead of browsing individuel agency websites, you save time thanks to our centralized no-commission listing, bringing you the best of France’s apartments, all in one place.
To browse our apartments, click here .
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask_wtf import Form
from wtforms import TextField, HiddenField, BooleanField
from wtforms import SelectMultipleField, DateField, DateTimeField
from wtforms import widgets
from wtforms.validators import DataRequired
etiquetas_data = [('irregulares','Fechas irregulares (Coincidencia de fechas)'),
('cercanas','Fechas cercanas (Fechas menores a 5 dias)'),
('mayor','Montos irregulares (Monto contratado mayor al referencial')]
moneda_data = [('S/.','Soles'),
('US$','Dolares'),
('EUR,$','Euros')]
class SearchFormProveedor(Form):
page = HiddenField('page')
proveedor = HiddenField('proveedor')
term = TextField('term')
monto = TextField('monto')
tipo_moneda = SelectMultipleField(
choices=moneda_data,
option_widget=widgets.CheckboxInput(),
widget=widgets.ListWidget(prefix_label=False)
)
etiquetas = SelectMultipleField(
choices=etiquetas_data,
option_widget=widgets.CheckboxInput(),
widget=widgets.ListWidget(prefix_label=False)
)
fecha_inicial = DateTimeField('Fecha de inicio', format='%Y-%m-%d')
fecha_final = DateTimeField('Fecha de fin', format='%Y-%m-%d')
class SearchFormEntidad(Form):
page = HiddenField('page')
entidad = HiddenField('entidad')
term = TextField('term')
monto = TextField('monto')
tipo_moneda = SelectMultipleField(
choices=moneda_data,
option_widget=widgets.CheckboxInput(),
widget=widgets.ListWidget(prefix_label=False)
)
etiquetas = SelectMultipleField(
choices=etiquetas_data,
option_widget=widgets.CheckboxInput(),
widget=widgets.ListWidget(prefix_label=False)
)
fecha_inicial = DateTimeField('Fecha de inicio', format='%Y-%m-%d')
fecha_final = DateTimeField('Fecha de fin', format='%Y-%m-%d')
class SearchTerm(Form):
termino = TextField('Termino de busqueda')
|
Formed from the combination of industry leaders, Braas Monier and Icopal, BMI Group is the largest roofing and waterproofing company in Europe, with a significant presence in Asia and Africa.
When we brought the rich histories of Braas Monier and Icopal together, we created a unique industry leader with the highest-quality roofing and waterproofing solutions. We’re always looking to the future to develop practical, leading-edge, and sustainable products.
Environmentally sustainable and aesthetically pleasing. Our green roofing systems are changing cityscapes across the world.
We’re committed to sustainable solutions with real-world applications. Our In-Roof Solar Systems fit perfectly into the roof for a sleek style that harnesses energy from the sun without compromising the beauty of your home.
Our Cool Roof Initiative reduces energy consumption and improves comfort with optimized thermal performance, heat reflection, and improved ventilation. The result is a greener, cooler home at a lower cost.
We are always hiring talented people with a strong vision of our industry’s future. We invest in the long-term success of our employees, developing their skills and education while creating an environment that thrives on innovation and brilliant ideas.
|
from twisted.python.usage import UsageError
from twisted.trial.unittest import TestCase
from airtime_service import service
class TestService(TestCase):
def test_make_service(self):
svc = service.makeService({
'database-connection-string': 'sqlite://',
'port': '0',
})
assert not svc.running
def test_make_service_bad_db_conn_str(self):
self.assertRaises(Exception, service.makeService, {
'database-connection-string': 'the cloud',
'port': '0',
})
def test_happy_options(self):
opts = service.Options()
opts.parseOptions(['-p', '1234', '-d', 'sqlite://'])
assert set(opts.keys()) == set([
'port', 'database-connection-string'])
assert opts['database-connection-string'] == 'sqlite://'
assert opts['port'] == '1234'
def test_default_port(self):
opts = service.Options()
opts.parseOptions(['-d', 'sqlite://'])
assert set(opts.keys()) == set([
'port', 'database-connection-string'])
assert opts['database-connection-string'] == 'sqlite://'
assert opts['port'] == '8080'
def test_db_conn_str_required(self):
opts = service.Options()
self.assertRaises(UsageError, opts.parseOptions, [])
|
Titanium Druzy earrings are glittery, and beautiful!
Each rough cut stud is backed on hypoallergenic posts with metal backs.
Studs are 8mm wide and fit on most lobes perfectly.
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflowjs import version
# File name for the indexing JSON file in an artifact directory.
ARTIFACT_MODEL_JSON_FILE_NAME = 'model.json'
# JSON string keys for fields of the indexing JSON.
ARTIFACT_MODEL_TOPOLOGY_KEY = 'modelTopology'
ARTIFACT_WEIGHTS_MANIFEST_KEY = 'weightsManifest'
FORMAT_KEY = 'format'
TFJS_GRAPH_MODEL_FORMAT = 'graph-model'
TFJS_LAYERS_MODEL_FORMAT = 'layers-model'
GENERATED_BY_KEY = 'generatedBy'
CONVERTED_BY_KEY = 'convertedBy'
def get_converted_by():
"""Get the convertedBy string for storage in model artifacts."""
return 'TensorFlow.js Converter v%s' % version.version
|
Tom P. From Jaxscene was at Ante Up 2, taking photos and shooting video. Which is awesome, because my new MacBook spazzed out and I can't edit video on it, so my coverage is in development hell, so to speak. I've said it before, but I'll say it again - If you don't have JaxScene on your list of followed blogs or sites, you really should do that right now. In addition to providing information on shows, which DJs are playing at what club downtown, and what events are going on in town that are worth knowing about, Jenny K also posts up listings of family events for those of you who have kids or just want to do something fun that doesn't involve going out to the club. This, along with all the other contributors makes Jaxscene one of the most important blogs related to the Jacksonville area.
You can see the coverage, along with the flickr galleries here and here.
|
# -*- coding: utf-8 -*-
#
# Jira Python Client documentation build configuration file, created by
# sphinx-quickstart on Thu May 3 17:01:50 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
from jira import __version__ # noqa
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "4.0.0"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.8", None),
"requests": ("https://requests.readthedocs.io/en/latest/", None),
"requests-oauthlib": ("https://requests-oauthlib.readthedocs.io/en/latest/", None),
"ipython": ("https://ipython.readthedocs.io/en/stable/", None),
"pip": ("https://pip.readthedocs.io/en/stable/", None),
}
autodoc_default_options = {
"member-order": "bysource",
"members": True,
"show-inheritance": True,
"special-members": "__init__",
"undoc-members": True,
}
autodoc_inherit_docstrings = False
nitpick_ignore = [
("py:class", "JIRA"), # in jira.resources we only import this class if type
("py:obj", "typing.ResourceType"), # only Py36 has a problem with this reference
("py:class", "jira.resources.MyAny"), # Dummy subclass for type checking
# From other packages
("py:mod", "filemagic"),
("py:mod", "ipython"),
("py:mod", "pip"),
("py:class", "_io.BufferedReader"),
("py:class", "BufferedReader"),
("py:class", "Request"),
("py:class", "requests.models.Response"),
("py:class", "requests.sessions.Session"),
("py:class", "Response"),
("py:mod", "requests-kerberos"),
("py:mod", "requests-oauthlib"),
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"jira-python"
copyright = u"2012, Atlassian Pty Ltd."
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = "1"
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"body_max_width": "100%"}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_style = "css/custom_width.css"
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "jirapythondoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {"papersize": "a4paper", "pointsize": "10pt"}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"jirapython.tex",
u"jira-python Documentation",
u"Atlassian Pty Ltd.",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "jirapython", u"jira-python Documentation", [u"Atlassian Pty Ltd."], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Napoleon -----------------------------------------------------
napoleon_google_docstring = True
napoleon_numpy_docstring = False # Explicitly prefer Google style docstring
napoleon_use_param = True # for type hint support
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"jirapython",
u"jira-python Documentation",
u"Atlassian Pty Ltd.",
"jirapython",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
This webcam is currently assigned to Cre@'Weather. It was originally added on 17 février 2014 and has been viewed 27 539 times since then. The current picture above was taken il y a 4 minutes, thereby the webcam seems to be currently active. So far, it was added to their personal favorites on webcams.travel by 0 people.
|
# Generated by Django 2.0 on 2018-03-30 05:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('image', models.URLField()),
('link', models.URLField()),
('author', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('image', models.URLField()),
('link', models.URLField()),
('year', models.IntegerField()),
],
),
migrations.CreateModel(
name='Show',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('image', models.URLField()),
('link', models.URLField()),
('series', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('image', models.URLField()),
('link', models.URLField()),
('artist', models.CharField(max_length=200)),
],
),
]
|
We Provide Support Contracts to Small Businesses.
Why do i need it? Many businesses find that having an in-house IT person or trying to maintain their own networks and systems is too costly. You can count on SmartFix Center's IT Team to provide you with the support that matches your needs at a fair price.
|
import numpy as np
import scipy.sparse
import kmeans
import json
#Make sure we get consistent, reproducible results
np.random.seed(seed=1)
#Define the data directory (change if you place data elsewhere)
data_dir = "/home/bsprague/Projects/CS589/MovieLens/Data/"
#Load the training ratings
A = np.load(data_dir + "train.npy")
A.shape = (1,)
Xtrain = A[0]
#Load the validation ratings
A = np.load(data_dir + "validate.npy")
A.shape = (1,)
Xval = A[0]
#Load the test ratings
A = np.load(data_dir + "test.npy")
A.shape = (1,)
Xtest = A[0]
#Load the user, item, and genre information
Users = np.load(data_dir + "users.npy")
Items = np.load(data_dir + "items.npy")
Genres = np.load(data_dir + "genres.npy")
def getRMSE(k):
model = kmeans.kmeans(n_clusters=k)
model.fit(Xtrain)
#Predict back the training ratings and compute the RMSE
XtrainHat = model.predict(Xtrain,Xtrain)
tr= model.rmse(Xtrain,XtrainHat)
#Predict the validation ratings and compute the RMSE
XvalHat = model.predict(Xtrain,Xval)
val= model.rmse(Xval,XvalHat)
return (tr,val)
results = []
#Test k from 1 to 10
for k in range(1,11):
results.append([])
#Do 5 random restarts
for runs in range(1,6):
#Store the results
results[k-1].append(getRMSE(k))
# Average, Max, and Min RMSE over k = 1 to 10 on training set
avg_tr = [np.mean([z[0] for z in y]) for y in results]
max_tr = [np.amax([z[0] for z in y]) for y in results]
min_tr = [np.amin([z[0] for z in y]) for y in results]
# Average, Max, and Min RMSE over k = 1 to 10 on validation set
avg_val = [np.mean([z[1] for z in y]) for y in results]
max_val = [np.amax([z[1] for z in y]) for y in results]
min_val = [np.amin([z[1] for z in y]) for y in results]
# Our actual model, with k=3
model = kmeans.kmeans(n_clusters=3)
model.fit(Xtrain)
clusters = model.cluster(Xtrain)
# Age, Gender, Occupation, and Address arrays for each cluster
resAge = [[],[],[]]
resGen = [[],[],[]]
resOcc = [[],[],[]]
resSt = [[],[],[]]
for i, x in enumerate(clusters):
resAge[int(x)].append(Users[i][1])
resGen[int(x)].append(Users[i][2])
resOcc[int(x)].append(Users[i][3])
resSt[int(x)].append(Users[i][4])
# 'zip.json' is a map from zip codes to states
with open('zip.json') as data_file:
mapping = json.load(data_file)
for x in range(3):
d = {}
# Look at each zip code in the cluster and add it into our map
for o in resSt[x]:
if o in mapping:
if mapping[o] in d:
d[mapping[o]] += 1
else:
d[mapping[o]] = 1
else:
print("Couldn't find " + o)
# Here, we'd build our pie chart
# centers is a k x 1682 array of ratings
centers = model.get_centers()
high = [list(reversed(sorted([(rating, Items[movie_id][1]) for movie_id, rating in enumerate(center)])))[:5] for center in centers]
low = [sorted([(rating, Items[movie_id][1]) for movie_id, rating in enumerate(center)])[:5] for center in centers]
|
Societe Generale in France are worldwide with headquarters in Paris. It is an international financial services company with subsidiaries abroad and presense in about 85 countries. It was founded in 1864 and has over 2,200 branches in France.
|
from abc import abstractmethod
from rambutan3.check_args.base.RAbstractTypeMatcher import RAbstractTypeMatcher
from rambutan3.check_args.base.traverse.RTypeMatcherError import RTypeMatcherError
class RAbstractForwardingTypeMatcher(RAbstractTypeMatcher):
# noinspection PyMissingConstructor
def __init__(self):
raise NotImplementedError('Internal error: Do not call this constructor')
@property
@abstractmethod
def _delegate(self) -> RAbstractTypeMatcher:
"""Do not forget to include decorator @property in the overriding subclasses!"""
raise NotImplementedError()
# @override
def matches(self, value, matcher_error: RTypeMatcherError=None) -> bool:
x = self._delegate.matches(value, matcher_error)
return x
# # @override
# def check_arg(self, value, arg_name: str, *arg_name_format_args):
# self._delegate.check_arg(value, arg_name, *arg_name_format_args)
# Leave this code for history.
# Disabled during testing as this causes bugs.
# # @override
# def __or__(self, other: RAbstractTypeMatcher) -> RAbstractTypeMatcher:
# x = self._delegate.__or__(other)
# return x
# @override
def __eq__(self, other: RAbstractTypeMatcher) -> bool:
if not isinstance(other, type(self)):
return False
x = self._delegate.__eq__(other._delegate)
return x
# Leave this code for history.
# Disabled during testing as this causes bugs.
# # @override
# def __ne__(self, other: RAbstractTypeMatcher) -> bool:
# if not isinstance(other, type(self)):
# return True
#
# x = self._delegate.__ne__(other._delegate)
# return x
# @override
def __hash__(self) -> int:
x = self._delegate.__hash__()
return x
# @override
def __str__(self) -> str:
x = self._delegate.__str__()
return x
|
Moving house can be one of the most stressful experiences that we do in our lives, but it doesn’t need to be. The chaos and craziness that often comes with packing and unpacking every belonging you own can be minimised if you know the right tricks to make things easier. We’ve compiled the top 14 tips to help you make this exciting time actually exciting!
In the months leading up to your move, get rid of things you don’t want anymore. This saves you from moving a ton of stuff you are going to throw away anyway. Some examples? Donate old toys, clean out your wardrobe, and sort through books and filing.
Conduct a family meeting and make a good plan for your moving day. It’s a good idea to plan for pets and young children to be elsewhere if possible.
To get your kids excited about moving, plan their new bedrooms and investigate activities in the new area.
If your kids are present on moving day, give them age appropriate jobs to help you out.
Boxes can be very expensive to buy, so instead see if you can get some from your local supermarket. Make sure to reinforce boxes with a lot of tape to save the headache of box bottoms falling out.
When packing, stick a piece of tape on cupboards and drawers you have already emptied to save you from opening them 100 times!
Label EVERYTHING! You can even colour code boxes with tape for different rooms of your new house. This makes it easy for removalists to put your boxes in the right spot.
Put on packing music to keep you motivated!
Take a photo of the back of electronics to help reconnect them in your new home.
Keep pets’ and kids’ most prized toys accessible for easy access upon arrival in your new home.
Create a list of family hopes and dreams, or house rules for your new home. It’s a great time to break bad family habits and create great ones!
Pack surprises into boxes to encourage kids to help with unpacking.
Upbeat unpacking music keeps everyone in a good mood.
Don’t be lazy with your placement of things – if you start you new home well organised it will be easier to keep it that way!
|
from ....common.db.sql import VARCHAR, Numeric as NUMBER, DateTime as DATETIME, Column, BaseModel, CLOB, DATE
VARCHAR2 = VARCHAR
class AShareIPO(BaseModel):
"""
4.17 中国A股首次公开发行数据
Attributes
----------
object_id: VARCHAR2(100)
对象ID
s_info_windcode: VARCHAR2(40)
Wind代码
crncy_code: VARCHAR2(10)
货币代码
s_ipo_price: NUMBER(20,4)
发行价格(元) 网上申购价格
s_ipo_pre_dilutedpe: NUMBER(20,4)
发行市盈率(发行前股本)
s_ipo_dilutedpe: NUMBER(20,4)
发行市盈率(发行后股本)
s_ipo_amount: NUMBER(20,4)
发行数量(万股)
s_ipo_amtbyplacing: NUMBER(20,4)
网上发行数量(万股)
s_ipo_amttojur: NUMBER(20,4)
网下发行数量(万股)
s_ipo_collection: NUMBER(20,4)
募集资金(万元) 含发行费用
s_ipo_cashratio: NUMBER(20,8)
网上发行中签率(%)
s_ipo_purchasecode: VARCHAR2(10)
网上申购代码
s_ipo_subdate: VARCHAR2(8)
申购日
s_ipo_jurisdate: VARCHAR2(8)
向一般法人配售上市日期 网下机构首次限售上次
s_ipo_instisdate: VARCHAR2(8)
向战略投资者配售部分上市日期
s_ipo_expectlistdate: VARCHAR2(8)
预计上市日期
s_ipo_fundverificationdate: VARCHAR2(8)
申购资金验资日
s_ipo_ratiodate: VARCHAR2(8)
中签率公布日
s_fellow_unfrozedate: VARCHAR2(8)
申购资金解冻日
s_ipo_listdate: VARCHAR2(8)
上市日
s_ipo_puboffrdate: VARCHAR2(8)
招股公告日
s_ipo_anncedate: VARCHAR2(8)
发行公告日
s_ipo_anncelstdate: VARCHAR2(8)
上市公告日
s_ipo_roadshowstartdate: VARCHAR2(8)
初步询价(预路演)起始日期
s_ipo_roadshowenddate: VARCHAR2(8)
初步询价(预路演)终止日期
s_ipo_placingdate: VARCHAR2(8)
网下配售发行公告日
s_ipo_applystartdate: VARCHAR2(8)
网下申购起始日期
s_ipo_applyenddate: VARCHAR2(8)
网下申购截止日期
s_ipo_priceannouncedate: VARCHAR2(8)
网下定价公告日
s_ipo_placingresultdate: VARCHAR2(8)
网下配售结果公告日
s_ipo_fundenddate: VARCHAR2(8)
网下申购资金到帐截止日
s_ipo_capverificationdate: VARCHAR2(8)
网下验资日
s_ipo_refunddate: VARCHAR2(8)
网下多余款项退还日
s_ipo_expectedcollection: NUMBER(20,4)
预计募集资金(万元)
s_ipo_list_fee: NUMBER(20,4)
发行费用(万元)
s_ipo_namebyplacing: NUMBER(20,4)
上网发行简称
s_ipo_showpricedownlimit: NUMBER(20,4)
投标询价申购价格下限
s_ipo_par: NUMBER(20,4)
面值
s_ipo_purchaseuplimit: NUMBER(20,4)
网上申购上限(个人)
s_ipo_op_uplimit: NUMBER(20,4)
网下申购上限
s_ipo_op_downlimit: NUMBER(20,4)
网下申购下限
s_ipo_purchasemv_dt: VARCHAR2(8)
网上市值申购登记日
s_ipo_pubosdtotisscoll: NUMBER(20,4)
公开及原股东募集资金总额
s_ipo_osdexpoffamount: NUMBER(20,4)
原股东预计售股数量
s_ipo_osdexpoffamountup: NUMBER(20,4)
原股东预计售股数量上限
s_ipo_osdactoffamount: NUMBER(20,4)
原股东实际售股数量
s_ipo_osdactoffprice: NUMBER(20,4)
原股东实际售股金额
s_ipo_osdunderwritingfees: NUMBER(20,4)
原股东应摊承销费用
s_ipo_pureffsubratio: NUMBER(20,4)
网上投资者有效认购倍数
s_ipo_reporate: NUMBER(20,4)
回拨比例 网下往网上是正的, 网上往网下是负的, 占本次发行数量合计的比例
ann_dt: VARCHAR2(8)
最新公告日期
is_failure: NUMBER(5,0)
是否发行失败 0:发行正常;1:发行失败;2:发行暂缓
s_ipo_otc_cash_pct: NUMBER(24,8)
网下申购配售比例 网下中签率
opdate: DATETIME
opdate
opmode: VARCHAR(1)
opmode
"""
__tablename__ = "AShareIPO"
object_id = Column(VARCHAR2(100), primary_key=True)
s_info_windcode = Column(VARCHAR2(40))
crncy_code = Column(VARCHAR2(10))
s_ipo_price = Column(NUMBER(20,4))
s_ipo_pre_dilutedpe = Column(NUMBER(20,4))
s_ipo_dilutedpe = Column(NUMBER(20,4))
s_ipo_amount = Column(NUMBER(20,4))
s_ipo_amtbyplacing = Column(NUMBER(20,4))
s_ipo_amttojur = Column(NUMBER(20,4))
s_ipo_collection = Column(NUMBER(20,4))
s_ipo_cashratio = Column(NUMBER(20,8))
s_ipo_purchasecode = Column(VARCHAR2(10))
s_ipo_subdate = Column(VARCHAR2(8))
s_ipo_jurisdate = Column(VARCHAR2(8))
s_ipo_instisdate = Column(VARCHAR2(8))
s_ipo_expectlistdate = Column(VARCHAR2(8))
s_ipo_fundverificationdate = Column(VARCHAR2(8))
s_ipo_ratiodate = Column(VARCHAR2(8))
s_fellow_unfrozedate = Column(VARCHAR2(8))
s_ipo_listdate = Column(VARCHAR2(8))
s_ipo_puboffrdate = Column(VARCHAR2(8))
s_ipo_anncedate = Column(VARCHAR2(8))
s_ipo_anncelstdate = Column(VARCHAR2(8))
s_ipo_roadshowstartdate = Column(VARCHAR2(8))
s_ipo_roadshowenddate = Column(VARCHAR2(8))
s_ipo_placingdate = Column(VARCHAR2(8))
s_ipo_applystartdate = Column(VARCHAR2(8))
s_ipo_applyenddate = Column(VARCHAR2(8))
s_ipo_priceannouncedate = Column(VARCHAR2(8))
s_ipo_placingresultdate = Column(VARCHAR2(8))
s_ipo_fundenddate = Column(VARCHAR2(8))
s_ipo_capverificationdate = Column(VARCHAR2(8))
s_ipo_refunddate = Column(VARCHAR2(8))
s_ipo_expectedcollection = Column(NUMBER(20,4))
s_ipo_list_fee = Column(NUMBER(20,4))
s_ipo_namebyplacing = Column(NUMBER(20,4))
s_ipo_showpricedownlimit = Column(NUMBER(20,4))
s_ipo_par = Column(NUMBER(20,4))
s_ipo_purchaseuplimit = Column(NUMBER(20,4))
s_ipo_op_uplimit = Column(NUMBER(20,4))
s_ipo_op_downlimit = Column(NUMBER(20,4))
s_ipo_purchasemv_dt = Column(VARCHAR2(8))
s_ipo_pubosdtotisscoll = Column(NUMBER(20,4))
s_ipo_osdexpoffamount = Column(NUMBER(20,4))
s_ipo_osdexpoffamountup = Column(NUMBER(20,4))
s_ipo_osdactoffamount = Column(NUMBER(20,4))
s_ipo_osdactoffprice = Column(NUMBER(20,4))
s_ipo_osdunderwritingfees = Column(NUMBER(20,4))
s_ipo_pureffsubratio = Column(NUMBER(20,4))
s_ipo_reporate = Column(NUMBER(20,4))
ann_dt = Column(VARCHAR2(8))
is_failure = Column(NUMBER(5,0))
s_ipo_otc_cash_pct = Column(NUMBER(24,8))
opdate = Column(DATETIME)
opmode = Column(VARCHAR(1))
|
In order to present the sixth edition of the International residency program RESÒ, sponsored by the Foundation for Modern and Contemporary Art CRT, and the Working Geographies volume (which documents all editions since 2010) it has been started a journey through culture’s places, residencies, academies and museums. RESÒ is in fact an experience of residence that offers a set of reflections around boundary concepts, identity and culture in a global fluid scene which is subject to constant interrelations and fractures.
The book which documents the first five editions has already been presented to Circolo dei Lettori in Turin and to the NABA in Milan. On the 6th October it will be presented in Bologna in order to compare it with the long activity of Nosadella.due (Independent Residency for Public Art). Then the tour will continue with the artist Paola Anzichè and the 20th October it will reach Matera, the Capital of Culture 2019, with the SoutHeritage Foundation for Contemporary Art and finally it will get to Bergamo the 9th November, where the program and the volume will be presented as part of the Accademia delle Belle Arti G. Carrara activities with Alessandra Pioselli and Giorgio Cugno.
Some questions and topical issues such as economy, environment, politics, history, communication, social sciences, will be explored through texts of authors and projects of the twenty artists involved in the six editions of RESO’, exploring not usual artistic routes that in times of radical transformation, bring into dialogue the Northern Italian region of Piedmont, with Egypt, Brazil, India and Colombia.Times and places of each presentation will be announced on the website in the day before each meeting.
|
def rosenbrock_list(**kwargs):
num_fns = kwargs['functions']
# if num_fns > 1:
# least_sq_flag = true
# else:
# least_sq_flag = false
x = kwargs['cv']
ASV = kwargs['asv']
f0 = x[1]-x[0]*x[0]
f1 = 1-x[0]
retval = dict([])
if (ASV[0] & 1): # **** f:
f = [100*f0*f0+f1*f1]
retval['fns'] = f
if (ASV[0] & 2): # **** df/dx:
g = [ [-400*f0*x[0] - 2*f1, 200*f0] ]
retval['fnGrads'] = g
if (ASV[0] & 4): # **** d^2f/dx^2:
fx = x[1]-3*x[0]*x[0]
h = [
[ [-400*fx + 2, -400*x[0]],
[-400*x[0], 200 ] ]
]
retval['fnHessians'] = h
return(retval)
def rosenbrock_numpy(**kwargs):
from numpy import array
num_fns = kwargs['functions']
# if num_fns > 1:
# least_sq_flag = true
# else:
# least_sq_flag = false
x = kwargs['cv']
ASV = kwargs['asv']
f0 = x[1]-x[0]*x[0]
f1 = 1-x[0]
retval = dict([])
if (ASV[0] & 1): # **** f:
f = array([100*f0*f0+f1*f1])
retval['fns'] = f
if (ASV[0] & 2): # **** df/dx:
g = array([[-400*f0*x[0] - 2*f1, 200*f0]])
retval['fnGrads'] = g
if (ASV[0] & 4): # **** d^2f/dx^2:
fx = x[1]-3*x[0]*x[0]
h = array([ [ [-400*fx + 2, -400*x[0]],
[-400*x[0], 200 ] ] ] )
retval['fnHessians'] = h
return(retval)
|
Chester Avenue and E 36th Street, Cleveland, Ohio.
“Northern Ohio Traction and Light Co ex 1500 Series car used as restaurant”.
The Forest Diner closed on May 28th, 2012. Here’s a full post on its closing, with pics of its final days. To refresh your memory- here’s the way the diner looked a little less than a month ago. The old Silk City diner was entirely encased in a larger restaurant, with about four feet between the diner’s facade and that of the surrounding building.
In these past couple weeks, the surrounding building has been razed, leaving nothing but the diner itself. Word on the street was conflicting- one person at the site saying that it had already been sold and was going to be moved to Virginia, the other that it had not been sold, but was moving to temporary storage off-site until plans can be made for it. Once I hear back from people who know for sure, I’ll post it on the blog. Either way, the diner is being saved, but removed from Rt. 40.
Another recent addition to the collection- a medium format negative from the 1930s of Hartford CT’s Quaker Diner. It’s still there, and still looks just about the same.
Some recent additions to my collection. These photos were taken in 1965. There used to be quite a few trolley conversions in the mid atlantic (and elsewhere), but they just didn’t hold up as well as factory built diners. By the time they came into service as diners, most had served a full lifetime of service on the roads, so the condition was obviously not as good as a factory built diner. It took work, money and some jerry-rigging to change them over from transportation to food service. But they could be picked up and converted on the cheap, so they were a good way to get into the business. It seems most owners traded up to a proper factory built diner, or to a on-site construction once they had earned enough money to do so, so the trolleys didn’t survive very well.
This shot comes from my friend Luke Ryalls, who is down south on a trip with Dalhousie University architecture.
The 11th St. Diner is a 1948 Paramount, moved from Wilkes Barre, PA in 1992.
|
from py2neo import Graph
from py2neo.ext.gremlin import Gremlin
import os
DEFAULT_GRAPHDB_URL = "http://localhost:7474/db/data/"
DEFAULT_STEP_DIR = os.path.dirname(__file__) + '/bjoernsteps/'
class BjoernSteps:
def __init__(self):
self._initJoernSteps()
self.initCommandSent = False
def setGraphDbURL(self, url):
""" Sets the graph database URL. By default,
http://localhost:7474/db/data/ is used."""
self.graphDbURL = url
def addStepsDir(self, stepsDir):
"""Add an additional directory containing steps to be injected
into the server"""
self.stepsDirs.append(stepsDir)
def connectToDatabase(self):
""" Connects to the database server."""
self.graphDb = Graph(self.graphDbURL)
self.gremlin = Gremlin(self.graphDb)
def runGremlinQuery(self, query):
""" Runs the specified gremlin query on the database. It is
assumed that a connection to the database has been
established. To allow the user-defined steps located in the
joernsteps directory to be used in the query, these step
definitions are prepended to the query."""
if not self.initCommandSent:
self.gremlin.execute(self._createInitCommand())
self.initCommandSent = True
return self.gremlin.execute(query)
def runCypherQuery(self, cmd):
""" Runs the specified cypher query on the graph database."""
return cypher.execute(self.graphDb, cmd)
def getGraphDbURL(self):
return self.graphDbURL
"""
Create chunks from a list of ids.
This method is useful when you want to execute many independent
traversals on a large set of start nodes. In that case, you
can retrieve the set of start node ids first, then use 'chunks'
to obtain disjoint subsets that can be passed to idListToNodes.
"""
def chunks(self, idList, chunkSize):
for i in xrange(0, len(idList), chunkSize):
yield idList[i:i+chunkSize]
def _initJoernSteps(self):
self.graphDbURL = DEFAULT_GRAPHDB_URL
self.stepsDirs = [DEFAULT_STEP_DIR]
def _createInitCommand(self):
initCommand = ""
for stepsDir in self.stepsDirs:
for (root, dirs, files) in os.walk(stepsDir, followlinks=True):
files.sort()
for f in files:
filename = os.path.join(root, f)
if not filename.endswith('.groovy'): continue
initCommand += file(filename).read() + "\n"
return initCommand
|
Navin leads Drewry’s bulk shipping research team which covers dry bulk, tankers, LPG, LNG, Offshore and multipurpose shipping sectors. He also leads the vessel valuation practice.
Navin is the director of Drewry Maritime Research and manages the core team of Drewry’s bulk shipping research. He started working with Drewry in 2003 as a gas shipping analyst and over the years has gained extensive experience in various domains of maritime research and consulting. Navin has been instrumental in establishing offshore shipping practice at Drewry.
Besides managing Drewry’s bulk shipping research, he is an expert in asset valuation, residual value forecast, forecast asset values, and formulates depreciation strategy of assets for clients. He takes deep interest in cost modelling for shipping, thanks to his natural affinity for number crunching.
Navin works on a variety of projects and is actively involved in advising clients on the issues related to port development, port expansion, port traffic estimation, tariff benchmarking, ICD and CFS traffic forecast, market due diligence, commercial due diligence of assets, due diligence of port operations and freight rate benchmarking.
At Drewry, he has pioneered the use of technology for efficient management and delivery of research and consultancy reports.
|
import tweepy
import inspect
from bottle import PluginError
class TweepyPlugin(object):
name = 'tweepy'
api = 2
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret, keyword='api'):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
self.keyword = keyword
def setup(self, app):
for other in app.plugins:
if not isinstance(other, TweepyPlugin): continue
if other.keyword == self.keyword:
raise PluginError("Found another tweepy plugin with "\
"conflicting settings (non-unique keyword).")
def apply(self, callback, context):
conf = context.config.get('tweepy') or {}
consumer_key = conf.get('consumer_key', self.consumer_key)
consumer_secret = conf.get('consumer_secret', self.consumer_secret)
access_token = conf.get('access_token', self.access_token)
access_token_secret = conf.get('access_token_secret', self.access_token_secret)
keyword = conf.get('keyword', self.keyword)
args = inspect.getargspec(context.callback)[0]
if keyword not in args:
return callback
def wrapper(*args, **kwargs):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
kwargs[self.keyword] = tweepy.API(auth)
rv = callback(*args, **kwargs)
return rv
return wrapper
|
Its been fun doing many events and meeting new people for the past two months.
Drawing Violet Rose with the Girl Scouts of Eastern Massachusetts!
And I was so honored to be part of the Princeton Children's Book Festival. Many book lovers out there!
|
from __future__ import unicode_literals
import os
from . import conf # noqa need the settings
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.core.files.base import File as DjangoFile
from filer.models import File, Folder
from filer.utils.files import get_valid_filename
def check_rename(instance, old_name=None):
"""
do the rename, needed
if old_name is provided, use it, otherwise fetch old file from db.
:param instance: filer file instance
:return:
"""
if not conf.FILER_ADDONS_CONSISTENT_FILENAMES:
return
if instance.id and instance.file:
if old_name is None:
old_instance = File.objects.get(pk=instance.id)
old_name = old_instance.file.name
old_name = os.path.basename(old_name)
new_name = get_valid_filename(instance.original_filename)
# done with startswith instead of ==
# prevent deadlock, when storagem gives the _x5sx4sd suffix!
splitext = os.path.splitext
if not (splitext(old_name)[0].startswith(splitext(new_name)[0]) and
splitext(old_name)[1] == splitext(new_name)[1]):
# rename!
# print "do rename: %s to %s" % (old_name, new_name)
existing_file = open(instance.file.path, mode='rb')
new_file = DjangoFile(existing_file)
instance.file.delete(False) # remove including thumbs
instance.file.save(new_name, new_file, save=False)
# print instance.file.name
# do it here, original_filename is not updated correctly else!
instance.save()
existing_file.close()
@receiver(
post_save,
sender='filer.File',
dispatch_uid="filer_addons_unfiled_file_to_folder",
)
@receiver(
post_save,
sender='filer.Image',
dispatch_uid="filer_addons_unfiled_image_to_folder",
)
def filer_unfiled_to_folder(sender, instance, **kwargs):
"""
check if a file is unfiled, if yes, put into default folder.
ATTENTION: this signal must be registered before the duplicate detection
signal => for when only duplicates in the same folder need to be detected!
(put in folder first, then detect duplicate)
"""
UNFILED_HANDLING = conf.FILER_ADDONS_UNFILED_HANDLING
if not UNFILED_HANDLING.get('move_unfiled', None):
return
created_only = UNFILED_HANDLING.get('created_only', False)
if created_only and not kwargs.get('created', None):
return
if not instance.folder:
default_folder_name = UNFILED_HANDLING.get(
'default_folder_name',
'Unfiled',
)
default_folder_list = Folder.objects.filter(name=default_folder_name)
if default_folder_list.count() > 0:
default_folder = default_folder_list[0]
else:
default_folder = Folder(name=default_folder_name)
default_folder.save()
instance.folder = default_folder
instance.save()
@receiver(
post_save,
sender='filer.File',
dispatch_uid="filer_addons_prevent_duplicates_file",
)
@receiver(
post_save,
sender='filer.Image',
dispatch_uid="filer_addons_prevent_duplicates_image",
)
def filer_duplicates_and_rename(sender, instance, **kwargs):
"""
check for duplicates, dont allow them!
as this is post save, it will ELIMINATE ALL DUPLICATES of a file,
if there are...this can be quite dangerous, but also be wonderfull ;-)
"""
DUPLICATE_HANDLING = conf.FILER_ADDONS_DUPLICATE_HANDLING
if not DUPLICATE_HANDLING.get('prevent'):
check_rename(instance)
return
created_only = DUPLICATE_HANDLING.get('created_only', False)
if created_only and not kwargs.get('created', None):
check_rename(instance)
return
file_obj = instance
duplicates = File.objects.exclude(pk=file_obj.id).filter(sha1=file_obj.sha1)
# narrow down? depends.
if DUPLICATE_HANDLING.get('same_folder_required', None):
duplicates = duplicates.filter(folder=file_obj.folder)
if DUPLICATE_HANDLING.get('same_filename_required', None):
# TODO: is this slugified somehow??!
duplicates = duplicates.filter(
original_filename=file_obj.original_filename
)
if len(duplicates):
# print "duplicates found (post save):"
# print duplicates
duplicate = None
for file in duplicates:
if file.file:
duplicate = file
if duplicate is None:
# duplicate without file is nothing we use and would corrupt data!
return
instance.delete()
duplicate = duplicates[0]
old_name = duplicate.file.name
instance.id = duplicate.id
instance.file = duplicate.file
instance.name = duplicate.name
instance.name = duplicate.name
instance.description = duplicate.description
if hasattr(duplicate, 'subject_location'):
instance.subject_location = duplicate.subject_location
# to be defined: set some more fields from duplicate, if filled?!
# arf dont touch django magic
# instance._uncommitted_filefields = []
# instance._state = duplicate._state
instance.save()
check_rename(instance, old_name=old_name)
else:
"""
when updating a file in a files detail view, it already has the new,
correct name leaving this here, for example when manipulating files
(and original_filename) programmatically.
"""
check_rename(instance)
@receiver(
pre_save,
sender='filer.File',
dispatch_uid="filer_addons_prevent_replace_orphans",
)
@receiver(
pre_save,
sender='filer.Image',
dispatch_uid="filer_addons_prevent_replace_orphans",
)
def filer_prevent_rename_orphans(sender, instance, **kwargs):
"""
https://github.com/divio/django-filer/pull/958
"""
if not conf.FILER_ADDONS_REPLACE_FIX:
return
# Delete old file(s) when updating the file via: admin > advanced > replace file
try:
from_db = File.objects.get(id=instance.id)
if from_db.file != instance.file:
from_db.file.delete(save=False)
except:
pass
return
|
I see that 'Closing Down' has appeared in the window of Zeus Menswear shop on Central Road. It's always a crying shame to see shops closing down in Worcester Park - especially small independent ones (which, lets face it, nearly always seem to be the casualties).
That said, should I be bemoaning the passing of a store that I've never even been in to? I wonder how many of my fellow Worcester Parkers 'keep meaning to go in' to Zeus and many other shops in Worcester Park but walk on by every time.
Farewell, Zeus. I did mean to go in and spend some money. But I didn't.
|
import pdb
import operator
def mul(seq):
return reduce(operator.mul, seq, 1)
print 'This version of gm for use during first lecture.'
class Potential:
# variables: list of strings naming the variables
# pot: dictionary mapping tuples of variable values to potential value
def __init__(self, variables, pot):
self.vars = variables
self.indices = dict(zip(variables, range(len(variables))))
self.pot = pot
def __str__(self):
return 'Potential('+str(self.vars)+','+str(self.pot)+')'
# vt is a tuple of values; return the associated potential value
# return 0 if vt is not explicitly represented in self.pot
def valTuple(self, vt):
return self.pot[vt] if vt in self.pot else 0.0
# Return a list of all elements that have weight > 0 in this potential
def support(self):
return [k for (k, v) in self.pot.items() if v > 0]
# assign is a dictionary mapping variable names to values; return
# the associated potential valu.
def val(self, assign):
return self.valTuple(tuple([assign[var] for var in self.vars]))
# Product of two instances of Potential is a new Potential defined
# on the union of the variables of self and other
def mul(self, other):
# Three sets of vars: only in self, in both, only in other
selfOnly = set(self.vars).difference(set(other.vars))
otherOnly = list(set(other.vars).difference(set(self.vars)))
both = set(self.vars).intersection(set(other.vars))
# keep whole tuple from self; add some indices from other
otherIndices = [other.indices[v] for v in otherOnly]
newPot = {}
for e1 in self.support():
for e2 in other.support():
if self.agrees(other, e1, e2, both):
newElt = tuple(list(e1) + [e2[i] for i in otherIndices])
newPot[newElt] = self.valTuple(e1) * other.valTuple(e2)
return Potential(self.vars + otherOnly, newPot)
# vs is a list of variable names
# Assume: tuple1 is an assignment of the variables in self, tuple
# 2 is an assignment of variables in other. Return True if they
# agree on the values of the variables in vs
def agrees(self, other, tuple1, tuple2, vs):
for v in vs:
if tuple1[self.indices[v]] != tuple2[other.indices[v]]:
return False
return True
# cVars is a list of variable names
# cVals is a list of the same length of values for those variables
# Treat self as a joint probability distribution, and this as the
# operation of conditioning on the event cVars = cVals
# - select out entries for which cVars = cVals
# - remove cVars from the potential
# - sum potential values if there are duplicate entries
# - renormalize to obtain a distribution
# Returns a new instance of Potential defined on previous vars minus cVars
def condition(self, cVars, cVals):
newPot = {}
indices = [self.indices[v] for v in cVars]
for e in self.support():
if all(e[i] == v for (i, v) in zip(indices, cVals)):
newPot[removeIndices(e, indices)] = self.pot[e]
return Potential(removeIndices(self.vars, indices), newPot).normalize()
# qVars is a list of variable names
# Sum out all other variables, returning a new potential on qVars
def marginalize(self, qVars):
newPot = {}
indices = removeVals(range(len(self.vars)),
[self.indices[v] for v in qVars])
for e in self.support():
newE = removeIndices(e, indices)
addToEntry(newPot, newE, self.valTuple(e))
return Potential(qVars, newPot)
# Divide through by sum of values; returns a new Potential on the
# same variables with potential values that sum to 1 over the
# whole domain.
def normalize(self):
total = sum(self.pot.values())
newPot = dict([(v, p/total) for (v, p) in self.pot.items()])
return Potential(self.vars, newPot)
# Convenient abbreviation
P = Potential
# Useful as the multiplicitive identity: p.mul(iPot) = p
iPot = Potential([], {tuple() : 1.0})
######################################################################
# Bayesian networks
######################################################################
class BNNode:
# name is a string naming the variable
# parents is a list of strings naming parent variables
# cpd is an instance of Potential, defined on variables [name] + parents
# It needs to be a well-formed conditional probability
# distribution, so that for each value v of name,
# sum_{values of parents} cpd([v] + values of parents) = 1
def __init__(self, name, parents, cpd):
self.name = name
self.parents = parents
self.cpd = cpd
class BN:
# bn is a dictionary
# key: string
# value: (list of strings, Potential on varName and parents)
def __init__(self, nodes):
self.vars = [n.name for n in nodes]
# LPK: Check to be sure all parents are in network
self.nodes = nodes
# assign is a dictionary from variable names to values, with an
# entry for every variable in the network
# Returns probability of that assignment
def prob(self, assign):
return mul([n.cpd.val(assign) for n in self.nodes])
# Create a joint probability distribution
# Returns a potential reprsenting the joint distribution, defined
# over all the variables in the network
def joint(self):
j = reduce(Potential.mul, [n.cpd for n in self.nodes], iPot)
assert 1-1e-8 < sum(j.pot.values()) < 1 + 1e-8
return j
# queryVars is a list of variable names
# eVars is a list of variable names
# eValues is a list of values, one for each of eVars
# Returns a joint distribution on the query variables representing
# P(queryVars | eVars = eValues)
def query(self, queryVars, eVars = [], eValues = []):
# your code here
return self.joint().condition(eVars, eValues).marginalize(queryVars)
######################################################################
# xs is a tuple (or list) of items and indices is a list of indices
# returns a new tuple containing only those items whose indices are
# not in the list
def removeIndices(xs, indices):
return tuple([xs[i] for i in range(len(xs)) if not i in indices])
# xs is a tuple (or list) of items and vals is a list of values
# returns a new tuple containing only those items whose indices are
# not in the list. Use this instead of set difference because we want
# maintain the order of the remaining xs
def removeVals(xs, vals):
return tuple([x for x in xs if x not in vals])
# Assuming d is a dictionary mapping elements to numeric values
# Adds e to the dictionary if it is not already there
# Increments the value of e by v
def addToEntry(d, e, v):
if not e in d: d[e] = 0
d[e] += v
######################################################################
# Test cases
######################################################################
# Wet grass
wg = BN([BNNode('R', [], P(['R'], {(0,) : .8, (1,) : .2})),
BNNode('S', [], P(['S'], {(0,) : .9, (1,) : .1})),
BNNode('J', ['R'],
P(['J', 'R'],
{(0, 0) : 0.8, (0, 1) : 0.0, (1, 0) : 0.2, (1, 1) : 1.0})),
BNNode('T', ['R', 'S'],
P(['T', 'R', 'S'],
{(0, 0, 0) : 1.0, (1, 0, 0) : 0.0,
(0, 0, 1) : 0.1, (1, 0, 1) : 0.9,
(0, 1, 0) : 0.0, (1, 1, 0) : 1.0,
(0, 1, 1) : 0.0, (1, 1, 1) : 1.0}))])
# Test BN query method using the wet grass model.
def test2():
print 'Testing prob'
print "wg.prob({'R' : 1, 'S' : 1, 'T' : 0, 'J' : 0})"
print wg.prob({'R' : 1, 'S' : 1, 'T' : 0, 'J' : 0})
print "wg.prob({'R' : 0, 'S' : 0, 'T' : 0, 'J' : 0})"
print wg.prob({'R' : 0, 'S' : 0, 'T' : 0, 'J' : 0})
print "wg.prob({'R' : 1, 'S' : 0, 'T' : 0, 'J' : 0})"
print wg.prob({'R' : 1, 'S' : 0, 'T' : 0, 'J' : 0})
print "wg.prob({'R' : 0, 'S' : 1, 'T' : 0, 'J' : 0})"
print wg.prob({'R' : 0, 'S' : 1, 'T' : 0, 'J' : 0})
print 'Testing query'
print "wg.query(['S'])"
print wg.query(['S'])
print "wg.query(['S'], ['T'], [1])"
print wg.query(['S'], ['T'], [1])
print "wg.query(['S'], ['T', 'J'], [1, 1])"
print wg.query(['S'], ['T', 'J'], [1, 1])
print "wg.query('R')"
print wg.query('R')
print "wg.query('R', ['T'], [1])"
print wg.query('R', ['T'], [1])
print "wg.query('R', ['T', 'S'], [1, 1])"
print wg.query('R', ['T', 'S'], [1, 1])
print "Loaded gm.py"
|
JUST LINES AND SLEEK DESIGN.
VITEO lamps and luminaires integrate perfectly with terraces and gardens. They help you read, enjoy company or just marvel at the enlightened garde.
|
"""
This module contains the Distribution class which defines a standard
interface for distributions It also provides several implemented
distributions, which inherit from Distribution Any user-specified
distributions should inherit from Distribution
"""
import numpy as np
from .utils import overrides, package_path
import os
from scipy import stats
import pickle
class Distribution(object):
"""
Interface/abstract class for distributions.
Any user-specified distributions should be defined by inheriting from this class and
overriding the appropriate methods.
"""
def __init__(self, ndims=2, nbatch=100):
""" Creates a Distribution object
:param ndims: the dimension of the state space for this distribution
:param nbatch: the number of sampling particles to run simultaneously
:returns: a Distribution object
:rtype: Distribution
"""
# distribution dimensions
self.ndims = ndims
# number of sampling particles to use
self.nbatch = nbatch
# TensorflowDistributions require some special treatment
# this attribute is to be used instead of isinstance, as that would require
# tensorflow to be imported globally
if not hasattr(self, 'backend'):
self.backend = 'numpy'
# true iff being sampled with a jump process
self.mjhmc = None
# number of times energy op has been called
self.E_count = 0
# number of times gradient op has been called
self.dEdX_count = 0
# only set to true when I have a bias initialization and am being burned in
# to generate and cache a fair initialization for continuous samplers
self.generation_instance = False
# so some distributions may modify the default
if not hasattr(self, 'max_n_particles'):
self.max_n_particles = None
# set the state fairly. calls out to a cache
self.init_X()
def E(self, X):
self.E_count += X.shape[1]
return self.E_val(X)
def E_val(self, X):
"""
Subclasses should implement this with the correct energy function
"""
raise NotImplementedError()
def dEdX(self, X):
self.dEdX_count += X.shape[1]
return self.dEdX_val(X)
def dEdX_val(self, X):
"""
Subclasses should implement this with the correct energy gradient function
"""
raise NotImplementedError()
def __hash__(self):
""" Subclasses should implement this as the hash of the tuple of all parameters
that effect the distribution, including ndims. This is very important!!
nbatch should not be part of the hash!! Including it will break everything
As an example, see how this is implemented in Gaussian
:returns: a hash of the relevant parameters of self
:rtype: int
"""
raise NotImplementedError()
def init_X(self):
"""
Sets self.Xinit to a good initial value
"""
# TODO: make production ready by adding global flag to disable
# research options like this
self.cached_init_X()
def cached_init_X(self):
""" Sets self.Xinit to cached (serialized) initial states for continuous-time samplers, generated by burn in
*For use with continuous-time samplers only*
:returns: None
:rtype: none
"""
distr_name = type(self).__name__
distr_hash = hash(self)
file_name = '{}_{}.pickle'.format(distr_name, distr_hash)
file_prefix = '{}/initializations'.format(package_path())
if file_name in os.listdir(file_prefix):
with open('{}/{}'.format(file_prefix, file_name), 'rb') as cache_file:
mjhmc_endpt, _, _, control_endpt = pickle.load(cache_file)
if self.mjhmc:
self.Xinit = mjhmc_endpt[:, :self.nbatch]
else:
self.Xinit = control_endpt[:, :self.nbatch]
else:
from mjhmc.misc.gen_mj_init import MAX_N_PARTICLES, cache_initialization
# modify this object so it can be used by gen_mj_init
old_nbatch = self.nbatch
self.nbatch = self.max_n_particles or MAX_N_PARTICLES
self.generation_instance = True
# must rebuild now that nbatch is changed back
if self.backend == 'tensorflow':
self.build_graph()
# start with biased initializations
# changes self.nbatch
try:
self.gen_init_X()
except NotImplementedError:
# completely arbitrary choice
self.Xinit = np.random.randn(self.ndims, self.nbatch)
#generate and cache fair initialization
cache_initialization(self)
# reconstruct this object using fair initialization
self.nbatch = old_nbatch
self.generation_instance = False
# must rebuild now that nbatch is changed back
if self.backend == 'tensorflow':
self.build_graph()
self.cached_init_X()
def gen_init_X(self):
""" Sets self.Xinit to generated initial states for the sampling particles
*For use with discrete-time samplers only*
:returns: None
:rtype: None
"""
raise NotImplementedError()
def reset(self):
"""
resets the object. returns self for convenience
"""
self.E_count = 0
self.dEdX_count = 0
if not self.generation_instance:
self.init_X()
return self
def __call__(self, X):
"""
Convenience method for NUTS compatibility
returns -E, -dEdX
"""
rshp_X = X.reshape(len(X), 1)
E = float(self.E(rshp_X))
dEdX = self.dEdX(rshp_X).T[0]
return -E, -dEdX
def load_cache(self):
""" Loads and returns the cached fair initializations and
estimated variances associated with this
distribution. Throws an error if the cache does not exist
:returns: the loaded cache: (fair_initialization, emc_var_estimate, true_var_estimate)
:rtype: (np.ndarray, float, float)
"""
distr_name = type(self).__name__
distr_hash = hash(self)
file_name = '{}_{}.pickle'.format(distr_name, distr_hash)
file_prefix = '{}/initializations'.format(package_path())
with open('{}/{}'.format(file_prefix, file_name)) as cache_file:
return pickle.load(cache_file)
class LambdaDistribution(Distribution):
""" An `anonymous' distribution object for quick
experimentation. Due to the initialization time that is required
at first run it, one shouldn't use this object in the
long-term. Rather create your own distribution class that inherits
from Distribution.
You should give your LambdaDistribution objects a name. Use a
descriptive name, and use the same for functionally equivalent
LambdaDistributions - the hash of the name is used to label the
initialization information which is generated at first run time of
a new distribution. This requirement is a side effect of the
unfortunate fact that there is no computable hash function which
assigns functionally identical programs to the same number.
"""
#pylint: disable=too-many-arguments
def __init__(self, energy_func=None, energy_grad_func=None, init=None, name=None):
""" Creates an anonymous distribution object.
:param ndims: the dimension of the state space for this distribution
:param nbatch: the number of sampling particles to run simultaneously
:param energy_func: function specifying the energy
:param energy_grad_func: function specifying gradient of the energy
:param name: name of this distribution. use the same name for
functionally identical distributions
:param init: fair initialization for this distribution. array of shape (ndims, nbatch)
:returns: an anonymous distribution object
:rtype: LambdaDistribution
"""
self.energy_func = energy_func
self.energy_grad_func = energy_grad_func
self.init = init
# TODO: raise warning if name is not passed
self.name = name or str(np.random())
super(LambdaDistribution, self).__init__(ndims=init.shape[0], nbatch=init.shape[1])
@overrides(Distribution)
def E_val(self, X):
return np.sum(X*np.dot(self.J,X), axis=0).reshape((1,-1))/2.
@overrides(Distribution)
def dEdX_val(self, X):
return np.dot(self.J,X)/2. + np.dot(self.J.T,X)/2.
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = self.init
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.nbatch, self.name))
class Gaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, log_conditioning=6):
"""
Energy function, gradient, and hyperparameters for the "ill
conditioned Gaussian" example from the LAHMC paper.
"""
self.conditioning = 10**np.linspace(-log_conditioning, 0, ndims)
self.J = np.diag(self.conditioning)
self.description = '%dD Anisotropic Gaussian, %g self.conditioning'%(ndims, 10**log_conditioning)
super(Gaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
return np.sum(X*np.dot(self.J,X), axis=0).reshape((1,-1))/2.
@overrides(Distribution)
def dEdX_val(self, X):
return np.dot(self.J,X)/2. + np.dot(self.J.T,X)/2.
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = (1./np.sqrt(self.conditioning).reshape((-1,1))) * np.random.randn(self.ndims,self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, hash(tuple(self.conditioning))))
class RoughWell(Distribution):
def __init__(self, ndims=2, nbatch=100, scale1=100, scale2=4):
"""
Energy function, gradient, and hyperparameters for the "rough well"
example from the LAHMC paper.
"""
self.scale1 = scale1
self.scale2 = scale2
self.description = '{} Rough Well'.format(ndims)
super(RoughWell, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
cosX = np.cos(X*2*np.pi/self.scale2)
E = np.sum((X**2) / (2*self.scale1**2) + cosX, axis=0).reshape((1,-1))
return E
@overrides(Distribution)
def dEdX_val(self, X):
sinX = np.sin(X*2*np.pi/self.scale2)
dEdX = X/self.scale1**2 + -sinX*2*np.pi/self.scale2
return dEdX
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = self.scale1 * np.random.randn(self.ndims, self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.scale1, self.scale2))
class MultimodalGaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, separation=3):
self.sep_vec = np.array([separation] * nbatch +
[0] * (ndims - 1) * nbatch).reshape(ndims, nbatch)
# separated along first axis
self.sep_vec[0] += separation
super(MultimodalGaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
trim_sep_vec = self.sep_vec[:, :X.shape[1]]
return -np.log(np.exp(-np.sum((X + trim_sep_vec)**2, axis=0)) +
np.exp(-np.sum((X - trim_sep_vec)**2, axis=0)))
@overrides(Distribution)
def dEdX_val(self, X):
# allows for partial batch size
trim_sep_vec = self.sep_vec[:, :X.shape[1]]
common_exp = np.exp(np.sum(4 * trim_sep_vec * X, axis=0))
# floating point hax
return ((2 * ((X - trim_sep_vec) * common_exp + trim_sep_vec + X)) /
(common_exp + 1))
@overrides(Distribution)
def init_X(self):
# okay, this is pointless... sep vecs cancel
self.Xinit = ((np.random.randn(self.ndims, self.nbatch) + self.sep_vec) +
(np.random.randn(self.ndims, self.nbatch) - self.sep_vec))
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.separation))
class TestGaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, sigma=1.):
"""Simple default unit variance gaussian for testing samplers
"""
self.sigma = sigma
super(TestGaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
return np.sum(X**2, axis=0).reshape((1, -1)) / (2. * self.sigma ** 2)
@overrides(Distribution)
def dEdX_val(self, X):
return X/self.sigma**2
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = np.random.randn(self.ndims, self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.sigma))
#pylint: disable=too-many-instance-attributes
class ProductOfT(Distribution):
""" Provides the product of T experts distribution
"""
#pylint: disable=too-many-arguments
def __init__(self, ndims=36, nbasis=36, nbatch=100, lognu=None, W=None, b=None):
""" Product of T experts, assumes a fixed W that is sparse and alpha that is
"""
# awkward hack to import theano in poe only
try:
import theano.tensor as T
import theano
self.theano = theano
self.T = T
except:
raise ImportError("Theano could not be imported")
if ndims != nbasis:
raise NotImplementedError("Initializer only works for ndims == nbasis")
self.ndims = ndims
self.nbasis = nbasis
self.nbatch = nbatch
if W is None:
W = np.eye(ndims, nbasis)
self.weights = self.theano.shared(np.array(W, dtype='float32'), 'W')
if lognu is None:
pre_nu = np.random.rand(nbasis,) * 2 + 2.1
else:
pre_nu = np.exp(lognu)
self.nu = self.theano.shared(np.array(pre_nu, dtype='float32'), 'nu')
if b is None:
b = np.zeros((nbasis,))
self.bias = self.theano.shared(np.array(b, dtype='float32'), 'b')
state = T.matrix()
energy = self.E_def(state)
gradient = T.grad(T.sum(energy), state)
#@overrides(Distribution)
self.E_val = self.theano.function([state], energy, allow_input_downcast=True)
#@overrides(Distribution)
self.dEdX_val = self.theano.function([state], gradient, allow_input_downcast=True)
super(ProductOfT,self).__init__(ndims,nbatch)
self.backend = 'theano'
def E_def(self,X):
"""
energy for a POE with student's-t expert in terms of:
samples [# dimensions]x[# samples] X
receptive fields [# dimensions]x[# experts] W
biases [# experts] b
degrees of freedom [# experts] nu
"""
rshp_b = self.bias.reshape((1,-1))
rshp_nu = self.nu.reshape((1, -1))
alpha = (rshp_nu + 1.)/2.
energy_per_expert = alpha * self.T.log(1 + ((self.T.dot(X.T, self.weights) + rshp_b) / rshp_nu) ** 2)
energy = self.T.sum(energy_per_expert, axis=1).reshape((1, -1))
return energy
@overrides(Distribution)
def gen_init_X(self):
#hack to remap samples from a generic product of experts to
#the model we are actually going to generate samples from
Zinit = np.zeros((self.ndims, self.nbatch))
for ii in xrange(self.ndims):
Zinit[ii] = stats.t.rvs(self.nu.get_value()[ii], size=self.nbatch)
Yinit = Zinit - self.bias.get_value().reshape((-1, 1))
self.Xinit = np.dot(np.linalg.inv(self.weights.get_value()), Yinit)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims,
self.nbasis,
hash(tuple(self.nu.get_value())),
hash(tuple(self.weights.get_value().ravel())),
hash(tuple(self.bias.get_value().ravel()))))
|
This was my second attempt with AtoZ Challenge. My first stint was last year as a newbie. Though I knew what to expect this year, I wasn’t sure if I would be surviving as I had a lot going on. But nevertheless, I tried. I wanted to try rather than giving up right from the beginning. As always, the challenge has helped in many ways both in writing and also about learning a bit more about myself.
My first AtoZ in 2016 was full of excitement and I miraculously survived by posting meaningful content and the best part- I was right on schedule! But this year was different. I remember the pressure and freaking out I did last year to post everyday before the clock strikes 12 in the midnight. Since I was a newbie, I was all stressed out. I had this in mind and wanted to take up the challenge this year without feeling stressed. I made up my mind that is okay to not be on schedule and I wrote only when I could.
Congratulations on completing the challenge – it’s a shame you didn’t network, that’s the most fun part!
Congrats on completing the challenge. I agree that writing short posts really works in a challenge like this. It’s easier to read and then one can sure visit more and more folks.
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((5285.54, 11341.9, 1266.98), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((6267.49, 10231.7, 2218.72), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((5755.87, 8443.87, 1755.19), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((5944.66, 9768.49, -158.49), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((5367.6, 8543.67, -1285.72), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((4587.65, 6515.84, -83.8183), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((3461.98, 5536.51, 774.218), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((3477.3, 5981.21, 12.3706), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((2712.38, 4839.23, 2289.47), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((1104.07, 5036.13, 2675.56), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((1401.61, 4296.47, 4376.8), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((2079.88, 5166.2, 4888.51), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((2711.46, 5856.52, 6126.36), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((2392.22, 6942.3, 5562.68), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2095.77, 8244.87, 7401.45), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((4181.79, 9261.98, 9458.4), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5790.45, 8413.5, 8741.98), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((5203.45, 8225.03, 8802.05), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((4149.33, 7246.24, 7957.07), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((2847.89, 6635.09, 8322.73), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((3320.45, 5332.86, 6334.83), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((3609.98, 6471.49, 8025.94), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((4417.02, 5780.82, 8183.08), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((5321.91, 6495.04, 8813.66), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5643.39, 7832.99, 8429.98), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((5873.76, 9308.5, 8974.68), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5075.97, 8135.59, 8406.41), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4296.78, 7392.28, 6506.27), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((4169.32, 6353.38, 7030.46), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((3641.7, 5188.88, 6772.72), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((4566.35, 5023.36, 6807.17), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((3178.24, 5212.79, 5802.02), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3462.31, 5123.43, 7566.67), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((4851.65, 5186.4, 7904.56), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((4405.94, 6426.33, 8292.53), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((4297.14, 7138.95, 9379.59), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((4000.83, 5875.07, 7335.34), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4842.35, 6232.66, 8975.94), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((5620.05, 5926.64, 8063.02), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((4535.19, 6122.51, 8984.33), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((4479.25, 4866.2, 7896.88), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((3267.87, 3441.61, 7723.6), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((4158.55, 2578.54, 10044.9), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((3375.26, 2176.27, 8342.43), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((4211.22, 3623.71, 8420.38), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((4331.56, 3595.69, 6467.44), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((6095.12, 2766.37, 6756.62), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((5651.57, 1754.71, 8491.76), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((6082.32, 1817.25, 6661.8), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((5641.96, 2468.79, 4933.65), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((5257.38, 1223.83, 5310.65), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((4526.6, 2592.91, 4573.49), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((4126.57, 4196.26, 3851.78), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((3275.14, 3552.26, 2737.51), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((3456.32, 2765.96, 2806.34), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5162.83, 2935.53, 4018.09), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((6744.23, 3850.89, 2836.36), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((9257.77, 4204.87, 2949.83), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((9824.21, 4294.32, 3049.01), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((9779.03, 4230.77, 2228.35), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((9193.47, 3844.48, 2791.86), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((9561.92, 3243.52, 2338.43), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((7898.62, 3985.36, 2919.97), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((9190.93, 3100.32, 1946.76), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((11048.1, 2476.57, 1343.01), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((9970.25, 1441.99, 2355.89), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((11533.1, 1714.77, 2849.73), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((9256.93, 2584.04, 2650.17), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((11136.5, 3015.37, 1882.96), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((12557.4, 3027.16, 2795.64), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((11919.7, 3293.02, 3323.69), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
Our mission is to provide the best products and services for the people who need it the most.
Empire Scientific is a leading manufacturer of replacement batteries for camcorders, digital cameras, cordless telephones, and two way radios. All their products are guaranteed to meet or exceed manufacturers specifications. All Radio equipment batteries contain Japanese cells.
* For replacement, merchandise must be returned within 30 days of receipt and in the original packaging accompanied with packing slip.
* Absolutely no walk-in return/replacement service will be accepted.
* A refund may be issued if replacement is not available.
* Returns may be subject to a 15% restocking fee.
* All defective products will be replaced within the appropriate warranty period.
|
import logging
import os
from copy import deepcopy
from PyQt5.QtWidgets import QFileDialog
from gui.controller.customizer import Customizer
logger = logging.getLogger("apps.rendering")
class RendererCustomizer(Customizer):
def __init__(self, gui, logic):
self.options = logic.options
Customizer.__init__(self, gui, logic)
def get_task_name(self):
raise NotImplementedError
def load_data(self):
r = self.logic.get_task_type(self.get_task_name())
self.gui.ui.outputResXSpinBox.setValue(
r.defaults.resolution[0])
self.gui.ui.outputResYSpinBox.setValue(
r.defaults.resolution[1])
# FIXME Move verification function to task specific widgets
self.logic.customizer.gui.ui.verificationSizeXSpinBox.setMaximum(
r.defaults.resolution[0])
self.logic.customizer.gui.ui.verificationSizeYSpinBox.setMaximum(
r.defaults.resolution[1])
self.gui.ui.outputFormatsComboBox.clear()
self.gui.ui.outputFormatsComboBox.addItems(r.output_formats)
for i, output_format in enumerate(r.output_formats):
if output_format == r.defaults.output_format:
self.gui.ui.outputFormatsComboBox.setCurrentIndex(i)
self.gui.ui.mainSceneFileLineEdit.clear()
self.gui.ui.outputFileLineEdit.clear()
self.options = self.logic.options
def load_task_definition(self, definition):
self.options = deepcopy(definition.options)
self.gui.ui.mainSceneFileLineEdit.setText(definition.main_scene_file)
self.gui.ui.outputResXSpinBox.setValue(definition.resolution[0])
self.gui.ui.outputResYSpinBox.setValue(definition.resolution[1])
self.gui.ui.outputFileLineEdit.setText(definition.output_file)
output_format_item = self.gui.ui.outputFormatsComboBox.findText(definition.output_format)
if output_format_item >= 0:
self.gui.ui.outputFormatsComboBox.setCurrentIndex(output_format_item)
else:
logger.error("Cannot load task, wrong output format")
return
if os.path.normpath(definition.main_scene_file) in definition.resources:
definition.resources.remove(os.path.normpath(definition.main_scene_file))
self.save_setting('main_scene_path',
os.path.dirname(definition.main_scene_file))
self.save_setting('output_file_path',
os.path.dirname(definition.output_file), sync=True)
def get_task_specific_options(self, definition):
self._change_options()
definition.options = self.options
definition.resolution = [self.gui.ui.outputResXSpinBox.value(), self.gui.ui.outputResYSpinBox.value()]
definition.output_file = self._add_ext_to_out_filename()
definition.output_format = u"{}".format(
self.gui.ui.outputFormatsComboBox.itemText(self.gui.ui.outputFormatsComboBox.currentIndex()))
definition.main_scene_file = u"{}".format(
self.gui.ui.mainSceneFileLineEdit.text())
def _change_options(self):
pass
def _setup_connections(self):
self.gui.ui.chooseMainSceneFileButton.clicked.connect(
self._choose_main_scene_file_button_clicked)
self._setup_output_connections()
self._connect_with_task_settings_changed([
self.gui.ui.mainSceneFileLineEdit.textChanged,
])
self.gui.ui.outputFormatsComboBox.currentIndexChanged.connect(self._add_ext_to_out_filename)
self.gui.ui.outputFileLineEdit.editingFinished.connect(self._add_ext_to_out_filename)
def _add_ext_to_out_filename(self):
chosen_ext = str(self.gui.ui.outputFormatsComboBox.itemText(self.gui.ui.outputFormatsComboBox.currentIndex()))
out_file_name = str(self.gui.ui.outputFileLineEdit.text())
if not out_file_name:
return ""
file_name, ext = os.path.splitext(out_file_name)
ext = ext[1:]
if self.gui.ui.outputFormatsComboBox.findText(ext) != -1 or \
self.gui.ui.outputFormatsComboBox.findText(ext.upper()) != -1:
self.gui.ui.outputFileLineEdit.setText(u"{}.{}".format(file_name, chosen_ext))
else:
self.gui.ui.outputFileLineEdit.setText(u"{}.{}".format(out_file_name, chosen_ext))
return u"{}".format(str(self.gui.ui.outputFileLineEdit.text()))
def _connect_with_task_settings_changed(self, list_gui_el):
for gui_el in list_gui_el:
gui_el.connect(self.logic.task_settings_changed)
def _setup_output_connections(self):
self.gui.ui.chooseOutputFileButton.clicked.connect(
self._choose_output_file_button_clicked)
self.gui.ui.outputResXSpinBox.valueChanged.connect(self._res_x_changed)
self.gui.ui.outputResYSpinBox.valueChanged.connect(self._res_y_changed)
def _choose_main_scene_file_button_clicked(self):
tmp_output_file_ext = self.logic.get_current_task_type().output_file_ext
output_file_ext = []
for ext in tmp_output_file_ext:
output_file_ext.append(ext.upper())
output_file_ext.append(ext.lower())
output_file_types = " ".join([u"*.{}".format(ext) for ext in output_file_ext])
filter_ = u"Scene files ({})".format(output_file_types)
path = u"{}".format(str(self.load_setting('main_scene_path', os.path.expanduser('~'))))
file_name, _ = QFileDialog.getOpenFileName(self.gui,
"Choose main scene file",
path,
filter_)
if file_name:
self.save_setting('main_scene_path', os.path.dirname(file_name))
self.gui.ui.mainSceneFileLineEdit.setText(file_name)
def _choose_output_file_button_clicked(self):
output_file_type = u"{}".format(self.gui.ui.outputFormatsComboBox.currentText())
filter_ = u"{} (*.{})".format(output_file_type, output_file_type)
path = u"{}".format(str(self.load_setting('output_file_path', os.path.expanduser('~'))))
file_name, _ = QFileDialog.getSaveFileName(self.gui,
"Choose output file",
path,
filter_)
if file_name:
self.save_setting('output_file_path', os.path.dirname(file_name))
self.gui.ui.outputFileLineEdit.setText(file_name)
def _res_x_changed(self):
self.logic.change_verification_option(size_x_max=self.gui.ui.outputResXSpinBox.value())
def _res_y_changed(self):
self.logic.change_verification_option(size_y_max=self.gui.ui.outputResYSpinBox.value())
class FrameRendererCustomizer(RendererCustomizer):
def _setup_connections(self):
super(FrameRendererCustomizer, self)._setup_connections()
self.gui.ui.framesCheckBox.stateChanged.connect(self._frames_check_box_changed)
self.gui.ui.framesLineEdit.textChanged.connect(self._frames_changed)
self.gui.ui.framesCheckBox.stateChanged.connect(self._frames_changed)
def load_data(self):
super(FrameRendererCustomizer, self).load_data()
self._set_frames_from_options()
def load_task_definition(self, definition):
super(FrameRendererCustomizer, self).load_task_definition(definition)
self._set_frames_from_options()
def _set_frames_from_options(self):
self.gui.ui.framesCheckBox.setChecked(self.options.use_frames)
self.gui.ui.framesLineEdit.setEnabled(self.options.use_frames)
if self.options.use_frames:
self.gui.ui.framesLineEdit.setText(self.frames_to_string(self.options.frames))
else:
self.gui.ui.framesLineEdit.setText("")
def _change_options(self):
self.options.use_frames = self.gui.ui.framesCheckBox.isChecked()
if self.options.use_frames:
frames = self.string_to_frames(self.gui.ui.framesLineEdit.text())
if not frames:
self.show_error_window(u"Wrong frame format. Frame list expected, e.g. 1;3;5-12.")
return
self.options.frames = frames
def _frames_changed(self):
self.logic.task_settings_changed()
def _frames_check_box_changed(self):
self.gui.ui.framesLineEdit.setEnabled(self.gui.ui.framesCheckBox.isChecked())
if self.gui.ui.framesCheckBox.isChecked():
self.gui.ui.framesLineEdit.setText(self.frames_to_string(self.options.frames))
@staticmethod
def frames_to_string(frames):
s = ""
last_frame = None
interval = False
try:
for frame in sorted(frames):
frame = int(frame)
if frame < 0:
raise ValueError("Frame number must be greater or equal to 0")
if last_frame is None:
s += str(frame)
elif frame - last_frame == 1:
if not interval:
s += '-'
interval = True
elif interval:
s += str(last_frame) + ";" + str(frame)
interval = False
else:
s += ';' + str(frame)
last_frame = frame
except (ValueError, AttributeError, TypeError) as err:
logger.error("Wrong frame format: {}".format(err))
return ""
if interval:
s += str(last_frame)
return s
@staticmethod
def string_to_frames(s):
try:
frames = []
after_split = s.split(";")
for i in after_split:
inter = i.split("-")
if len(inter) == 1: # pojedyncza klatka (np. 5)
frames.append(int(inter[0]))
elif len(inter) == 2:
inter2 = inter[1].split(",")
if len(inter2) == 1: # przedzial klatek (np. 1-10)
start_frame = int(inter[0])
end_frame = int(inter[1]) + 1
frames += range(start_frame, end_frame)
elif len(inter2) == 2: # co n-ta klata z przedzialu (np. 10-100,5)
start_frame = int(inter[0])
end_frame = int(inter2[0]) + 1
step = int(inter2[1])
frames += range(start_frame, end_frame, step)
else:
raise ValueError("Wrong frame step")
else:
raise ValueError("Wrong frame range")
return sorted(frames)
except ValueError as err:
logger.warning("Wrong frame format: {}".format(err))
return []
except (AttributeError, TypeError) as err:
logger.error("Problem with change string to frame: {}".format(err))
return []
|
Sextus is an intelligent young man, astute in matters of law and society.
The Council intends to offer him the position of Autocrat once he is recovered from his boot withdrawal.
Sextus is the younger brother of Cornelius ex Jerbiton. Sextus is the sixth son of Augustus Quince. Sextus has always looked up to his older brother, admiring Cornelius' no-nonsense take on life. Augustus had intended for Sextus to study law and become the family's legal representative. Once Sextus went to Oxford, he began to hang out with Cornelius and their great Uncle. Sextus lost his eye in a duel with another student over the affections of a local barmaid. After Cornelius passed his gauntlet, Sextus decided to accompany Cornelius on his journeys.
You have been educated to a level equivalent to that of a Grammar School. You have a score of 3 in Speak Latin and 1 in Scribe Latin, and may purchase Academic Knowledge or Skills at character generation. You are at least 17 years old.
You have a broad range of acquaintances in a specific social circle (specified when this Virtue is purchased), accumulated over years of travel and socializing. Almost everywhere you go, you meet someone you know, or can get in touch with someone who can help you. Whenever you are somewhere new, you can contact someone on a simple Presence roll of 6+. The Storyguide may modify this target number upward for very small areas or areas where it is extremely unlikely that you would know someone. You may purchase this Virtue more than once, each time specifying a different social group.
You are a stirring speaker or a heroic figure, and can urge people to great efforts. You give targets a +3 bonus to rolls for appropriate Personality Traits, and +3 to natural resistance rolls versus certain spells that effect the mind.
By staring intently at people you make them feel uneasy, as if you are peering into their souls. Those with ulterior motives, uneasy consciences, or lying tongues must make rolls against an appropriate Personality Trait, Guile, or whatever the Storyguide deems appropriate, to remain calm. Furthermore, you gain a +3 to rolls involving intimidation. Faeries and demons are unfazed by your power.
You cannot judge close distances easily and get -3 on Attack rolls for missiles. In melee combat you suffer -1 on Attack rolls because your field of vision is limited. You also have a blind side from which people can approach unseen.
You have an unfortunate urge that causes you problems.
You have an exaggerated and unshakable opinion of your capabilities, and you do not hesitate to try things that promise only defeat. If you are convincing in your speech, you can infect others with your overconfidence. You gain the Personality Trait Overconfident at +3.
When you sleep, you don’t go halfway. You can sleep through loud noises and generally only wake up when shaken, or when good and ready. Even then you suffer –3 on your rolls for half an hour or so after awakening, and you’re likely to head back to bed if at all possible.
Knowledge of the structure, operation, and goals of the specified organization. Organizations can be as large as the Church, or as small as a local craft guild. The smaller the organization, the more detailed your knowledge.
Brawling covers fist fighting, using knives and daggers, and improvising weapons. It also covers the ability to dodge incoming blows.
The skill of using two single weapons, one in each hand. This includes Florentine style, double ax, and many other combinations.
Despite his missing eye, Sextus can handle his daggers fairly well.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-01 17:43
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('users', '0001_api_tokens'), ('users', '0002_unicode_literals')]
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('expires', models.DateTimeField(blank=True, null=True)),
('key', models.CharField(max_length=40, unique=True, validators=[django.core.validators.MinLengthValidator(40)])),
('write_enabled', models.BooleanField(default=True, help_text='Permit create/update/delete operations using this key')),
('description', models.CharField(blank=True, max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tokens', to=settings.AUTH_USER_MODEL)),
],
options={
'default_permissions': [],
},
),
]
|
Stressful airports and last minute packing can cause tensions to run high with your travel companions and your own anxiety to creep up on you. Follow these 5 tips for a relaxing start to your vacation.
There are 2 types of people in this world – those who pack their suitcases ahead of time…and those who wait until midnight before a 5 a.m. flight while still doing laundry. In my life, I am the former and my husband Kiran is the latter.
Everyone made it, still smiling!
While it always works out for him and he has yet to forget anything important (probably thanks to his amazing wife), my philosophy on packing is that you should just get it done ahead of time and use the days and nights leading up to your trip for pampering.
In addition to your packing your suitcase, you’re going to want to plan out what you’ll be taking on board as your “personal item” (be sure to check out your airline’s rules on sizing). I never travel without my favorite tote bag, the Longchamp Le Pliage, which is lightweight, holds everything I need and still looks put together.
If you’ve ever wondered how travelers get the best seats or end up with tons of overhead bin space for their luggage, I’ll tell you one easy and free way – online check-in.
Most airlines allow you to check in online via their websites 24 hours before departure and at that time, you can change your seat, pay for your checked baggage and most importantly, receive your boarding pass. You’ll receive your boarding group (the order in which you’ll board the plane), boarding time and a tentative gate number so that you can prepare for your arrival at the airport.
Airlines will often release their premium seats in each cabin during online check-in if they haven’t been sold so you may get lucky and snag that sweet seat without having to pay. And, the earlier you check in, the better chance you have of getting a more favorable boarding group, allowing you to avoid having to gate check your carry-on or worse, stow it 20 rows behind you!.
This 24 hour prior check-in is especially important if you fly Southwest Airlines and you haven’t added any upgrades – since there is no assigned seating, your boarding group is assigned in the order in which you check-in. This boarding group number shows the order in which you’ll board the plane and choose your seat. The sooner you get on, the better your seat choice will be!
After years of early flights, I’ve learned that my tendency to think “oh I’ll just do that task/pack that item when I wake up in the morning” is a bad way to start a vacation. I usually forget whatever it is that I was supposed to pack or delay our departure trying to do whatever task I needed to do.
I now force myself to pack the last minute things the night before, even if it means staying up a little bit later. I’m an obsessive list maker so I don’t let myself get into bed until everything is checked off and my airport outfit is laid out - that way, I can just get up and go.
I love everything about the travel experience, from packing to flying, so I’ve started planning out the night before a trip to start my vacation early and extend my enjoyment.
Once everything is ready to go, I’ll pick up take-out, put on a vacation-themed tv show or movie (I LOVE Bravo’s Below Deck!) and make myself my signature Margarita. Not only does this get me into vacation mode after a day at the office but it helps me get a good night’s sleep.
Since vacation time in the U.S. can be so limited, you’ll want to hit the ground running when you arrive at your resort, not be a zombie from lack of sleep.
This is probably my favorite "day of travel hack" for a stress free airport experience and although you have to plan ahead on this one due to the application process, it can save you lots of time in the end.
My preferred trusted traveler program is Global Entry. This Department of Homeland Security program for pre-screened travelers allows you to pass through a dedicated immigration checkpoint when returning to the U.S. from abroad and also provides TSA PreCheck benefits at U.S. security checkpoints – no more taking off your shoes and waiting in long lines! There is a cost associated with applying for this program but once you’re approved, you have the benefits for 5 years. If you travel a couple of times a year, the application cost more than pays for itself in convenience. TSA PreCheck can also be applied for alone without Global Entry and is a much quicker application process.
Global Entry isn't the only option for cutting down your immigration checkpoint wait times - Mobile Passport is growing in popularity at U.S. airports and is totally free. While it doesn't offer TSA PreCheck benefits like Global Entry does, there is no interview and no background check. This program is currently being offered in limited airports and for U.S. and Canadian citizens only. You just download the app, fill in passport information for each traveler, answer a couple of quick customs questions and you're good to go!
Contact info@destinationluxetravel.com to start planning your next getaway!
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from rango.models import Category, Page
from rango.forms import CategoryForm, PageForm, UserForm, UserProfileForm
from datetime import datetime
def index(request):
# Consulte o banco de dados por uma lista de TODAS as categorias.
# Ordene as categorias pelo número de likes em ordem decrescente.
# Recupere apenas o top 5- ou todas se for menos do que 5
pages = Page.objects.order_by('-views')[:5]
categories = Category.objects.order_by('-likes')[:5]
context_dict = {'categories': categories, 'pages': pages}
# Pegue o número de visitas ao site.
# Nós usamos a função COOKIES.get() para obter o cookie de visitas.
# Se o cookie existir, o valor retornado é convertido para um inteiro.
# Se o cookie não existir, por padrão setamos para zero e convertemos.
visits = request.session.get('visits')
if not visits:
visits = 0
reset_last_visit_time = False
last_visit = request.session.get('last_visit')
if last_visit:
last_visit_time = datetime.strptime(
last_visit[:-7], '%Y-%m-%d %H:%M:%S'
)
if (datetime.now() - last_visit_time).days > 0:
# soma o valor anterior do cookie com +1
visits += 1
# e atualiza o cookie last_visit também
reset_last_visit_time = True
else:
# Cookie last_visit não existe, então crie ele com a data/hora atual
reset_last_visit_time = True
context_dict['visits'] = visits
request.session['visits'] = visits
if reset_last_visit_time:
request.session['last_visit'] = str(datetime.now())
response = render(request, 'rango/index.html', context_dict)
# Retornar uma resposta para o usuário, atualizando
# quaisquer cookies que precisem ser mudados.
return response
def about(request):
context_dict = {'message': 'Not necessary'}
return render(request, 'rango/about.html', context_dict)
def category(request, category_name_slug):
# Crie um dicionário de contexto para que possamos passar para engine
# de renderização de template.
context_dict = {}
try:
# Nós podemos encontrar um slug do nome da categoria com o nome dado
# Se não encontrarmos, o método .get() lança uma exceção DoesNotExist
# Assim, o método .get() retorna 1 instância do model ou lança exceção
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name'] = category.name
# Recupera todas as páginas associadas.
# Note que o filter retorna >= 1 instância de model.
pages = Page.objects.filter(category=category)
# Adicione nossa lista de resultados de contexto com o nome 'pages'
context_dict['pages'] = pages
# Nós também adicionamos o objeto category do banco para o contexto.
# Usaremos isso no template para verificar se a categoria existe
context_dict['category'] = category
except Category.DoesNotExist:
# Entramos aqui se não tiver sido encontrada a categoria desejada
# Não faça nada - o template mostrará a mensagem "sem categoria".
pass
# Renderize a resposta e retorne-a para o cliente.
context_dict['category_name_slug'] = category_name_slug
return render(request, 'rango/category.html', context_dict)
@login_required
def add_category(request):
# É um POST HTTP?
if request.method == 'POST':
form = CategoryForm(request.POST)
# O form é válido?
if form.is_valid():
# Salve a nova categoria no banco
form.save(commit=True)
# Agora chame a view index()
# O usuário será levado para a página inicial
return index(request)
else:
# O form fornecido contém erros - dê print neles
print form.errors
else:
# Se a requisição não é POST, mostre o form para inserir dados
form = CategoryForm()
# Renderize o form com as mensagens de erro (se houver alguma)
return render(request, 'rango/add_category.html', {'form': form})
@login_required
def add_page(request, category_name_slug):
try:
cat = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat = None
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if cat:
page = form.save(commit=False)
page.category = cat
page.views = 0
page.save()
return category(request, category_name_slug)
else:
print form.errors
else:
form = PageForm()
context_dict = {'form': form, 'category': cat}
return render(request, 'rango/add_page.html', context_dict)
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
else:
user_form = UserForm()
profile_form = UserProfileForm()
return render(
request,
'rango/register.html',
{
'user_form': user_form,
'profile_form': profile_form,
'registered': registered
}
)
def user_login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/rango/')
else:
return HttpResponse('Sua conta está desativada.')
else:
print("Detalhes inválidos de login: {0}, {1}".format(
username, password)
)
context_dict = {'errors': 'Nome de user ou senha incorretos.'}
return render(request, 'rango/login.html', context_dict)
# return HttpResponse("Detalhes inválidos de login fornecidos.")
else:
return render(request, 'rango/login.html', {})
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect('/rango/')
@login_required
def restricted(request):
return render(request, 'rango/restricted.html', {})
|
Please submit any useful information about climbing Agulles Amitges that may be useful to other climbers. Consider things such as access and accommodation at the base of Agulles Amitges, as well as the logistics of climbing to the summit.
Use this relief map to navigate to mountain peaks in the area of Agulles Amitges.
|
import signal
import sys
from fcb.checker import mail, mega
from fcb.checker.settings import Configuration
from fcb.database.helpers import get_session
from fcb.database.helpers import get_db_version
from fcb.utils.log_helper import get_logger_module
log = get_logger_module('mail_checker')
def main():
# noinspection PyUnresolvedReferences
import log_configuration
if len(sys.argv) < 2:
log.error("Usage: %s <config_file>", sys.argv[0])
exit(1)
conf = Configuration(sys.argv[1])
with get_session() as session:
db_version = get_db_version(session)
if db_version != 3:
log.error("Invalid database version (%d). 3 expected", db_version)
session.close()
exit(1)
session.close()
mail_verifier = mail.Verifier()
mega_verifier = mega.Verifier()
def signal_handler(signal, frame):
print "Abort signal received!!!!"
mail_verifier.stop()
mega_verifier.stop()
signal.signal(signal.SIGINT, signal_handler)
for mail_conf in conf.mail_confs:
mail_verifier.verify(mail_conf)
for meaga_conf in conf.mega_confs:
mega_verifier.verify(meaga_conf)
mail_verifier.close()
mega_verifier.close()
if __name__ == '__main__':
main()
|
Thanks for this terrific innovation! I made one for my mums Mother’s Day card and my daughter made name badges for her friends for her 10th birthday party. Think we will be adorning all our homemade cards with these this year!
|
# import module
from Sass.completions import properties as prop
import sublime, re
# default
prop_default = prop.names + prop.tag
# Completions Flag
flag = sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
# 【CSS】补全方法
def completions(self, view, prefix, locations):
# 获取当前位置
pt = locations[0] - len(prefix)
# 获取属性值
if view.match_selector(pt, 'meta.property-value.sass'):
line = view.substr(sublime.Region(view.line(pt).begin(), pt))
m = re.search(re.compile('(?:-webkit-|-moz-|-ms-|-o-)?([-a-zA-Z]+):[^;]*$'), line)
if m:
style = m.group(1)
if style in prop.value_for_name:
return prop.value_for_name[style] + prop.default_value, flag
return prop.default_value, flag
return prop.names, flag
if view.match_selector(pt, 'meta.parameter-value.sass'):
return flag
# 获取当前字符
ch = view.substr(sublime.Region(pt - 1, pt))
# at-rule
if ch == '@':
return prop.extends_style, flag
# 伪类
if ch == ':':
return prop.pseude_class, flag
# 变量
if ch == '$':
return flag
line = view.substr(sublime.Region(view.line(pt).begin(), pt))
# 属性
if re.search(re.compile('^\s*$'), line):
return prop_default, flag
# 标签
return prop.tag, flag
|
Become part of an award-winning and highly expert plumbing company in Malabar today.
Typically, we think of plumbing issues when they happen. Most of the times, you see water flowing in your faucets, taps, toilets, and taps, and never think of calling a plumber, up until one day an issue takes you by surprise. Well, if you are a prudent home or company owner, then having contacts of a 24-hour emergency plumber is something that should be at the top of your priority list.
Keep in mind, an issue with your plumbing system can happen at any time of the day. It doesn’t matter whether it is the middle of the night or the eve of Christmas day. Issues are bound to happen, and most of the time, they happen when you least expect them. Thankfully, we more than happy to announce to you that we can assist you regardless of the issue you are facing, 24 Hr a day and seven days a week.
First things first, we strive to inform our existing and potential customers on the value of employing an expert to fix their plumbing systems when they face issues. One thing you have to know is that the plumbing system is complicated, interconnected systems that need the care and knowledge of professionals. For that reason, do not be lured to believe that particular plumbing issues are simple DIY tasks. You might end up causing more pricey damage. Call us, and we will cost effectively deal with any plumbing issue quick.
From hot water heater repair, emergency plumbing services, to drain cleansing, our specialists have seen it all. We utilize innovative techniques and the most recent technology such as trenchless sewer repair to guarantee we offer unsurpassable services all the time.
You might be wondering why you need to pick us among hundreds of plumbers found in your location. Well, below are some of the components that make us the marketplace leader who you need to consider working with the next time you are faced with a plumbing issue.
We take pride in the fact that we provide exact same day service for over 90% of the issues we deal with. We understand that your home or organisation can not be complete without water and gas supply, and this is why we have heavily invested in technology that permits us to work quick, while still conforming to your spending plan.
In addition, we can take care of your issue during the day, at night, and during public holidays for the exact same low rates as our basic services.
Through the years we have provided plumbing solutions in Sydney, our objective has been to offer diligent services, satisfy our customers fully, and offer them budget-friendly services. Through this, we have had the ability to get a credibility that can not be shaken. We have been rated five stars in various review websites, not forgetting the applauds we get from our community of delighted customers. This describes why we have been nominated for various awards and won a number of them throughout the years.
Our reputation is what makes us win new customers from time to time, and it is the reason we get repeat business from our previous customers. Work with us today, and you will definitely get value for your hard-earned cash.
Keeping a high level of professionalism is one of our organizational objectives. Also, our company believes that excellent client experiences is what motivates repeat business, and results in the achievement of goals and objectives. For that reason, we provide regular training to our workers on social skills that allow them to relate well with our ever-growing client population. Hence, when you come to us, you will never get a negative answer, or find cruel client service assistants. You can come to us with self-confidence knowing that you will get nothing but the best, and you will make friends out of your plumbing issue.
Have you ever been in an emergency situation, and called a plumber who reached your premises only to inform you that he can not solve the concern because he did not have specific equipment? This is a nightmare you can not wish to go through. With a fleet of automobiles fully-equipped with modern devices, we are not that kind of plumber. No problem is too big for us to solve. We will fix it in no time, and guarantee you have a functional plumbing system every day of your life.
As a property owner, home owner, or tenant, the last thing you would desire is a plumbing issue, because it compromises your convenience. Generally, even the tiniest plumbing concern can become a huge issue if not tackled on sight.
As our client, your business is our business. When you deal with a plumbing concern in your company, we will send our specialists and account representatives to determine your distinct needs. We will then create a strategy to ensure your plumbing system works as it ought to at all times.
In many instances, plumbing issues do not call. Surprisingly, most of them happen at weird hours– when it is difficult to find somebody to deal with them. Having an issue with your plumbing system at night, or on a public holiday? Call us today, and we will fix it at the lowest costs in the area.
|
line = input()
while line != "":
rows = int(line)
diamonds = int(input())
symbol = input()
mid = rows // 2
# For each row
for currentRow in range(rows):
# For each diamond in the pattern
for diamond in range(diamonds):
# For each column in a diamond
for currentCol in range(rows):
if currentRow <= mid:
if currentCol == mid - currentRow or currentCol == mid + currentRow:
print(symbol, end="")
else:
print(" ", end="")
else:
if currentCol == mid - (rows - currentRow) + 1 or currentCol == mid + (rows - currentRow) - 1:
print(symbol, end="")
else:
print(" ", end="")
# A row in a single diamond in the pattern is finished
if diamond < diamonds - 2:
print(" ", end="")
# A complete row is finished
print()
# The whole pattern is finished
print()
line = input()
|
Additive manufacturing is becoming increasingly important both in industry and in research. But in many cases it is unclear what this term actually means. This article provides an introduction to additive manufacturing, how it works, and where it is used.
Additive manufacturing is playing an increasingly important role in the manufacturing industry. Additive manufacturing processes are used above all in toolmaking and prototype construction.
Subtractive Processes (something is removed): Milling, lathing, etc.
Formative Processes (a material is redesigned): Casting, forging, etc.
Additive Processes (something is added): 3D printing, etc.
1. The component is made up of different layers. Usually the process is carried out from bottom to top. Simply put, it uses the same principle as for the building of sand castles: A new layer is applied to a building platform in order to build a tower.
2. Different processes take place repeatedly in layers (i.e. one after the other). This involves the feeding of the material, the melting (shaping), and finally the bonding with the previous layers. These steps, called the process chain, are the same regardless of which machine is used for additive manufacturing. The only difference is the way the individual layers are created.
Additive manufacturing thus enables the creation of 3D objects. To make this possible, the machine first requires the 3D design specifications ("three-dimensional CAD") of the part to be produced. The respective data set consists of the outline data (length x, height y), the number of layers (z) and the layer thickness (dz). It is the task of the corresponding computer program to divide the model into suitable layers. The software then transmits the data set to the machine in the form of production instructions, e.g. the printer for 3D metal printing.
Definitions for 3D printing sound very similar to the above explanations of additive manufacturing. Nevertheless, it is not correct to use 3D printing as a synonym. Rather, 3D printing is an additive manufacturing process. For this reason, this also applies to 3D metal printing as a type of 3D printing using liquid metal. But there are more than 20 different additive manufacturing technologies. Therefore, the following simple rule of thumb applies: For example, 3D metal printing, which is often used as a generic term for additive manufacturing, is always a form of additive manufacturing. However, additive manufacturing is not always 3D metal printing. It involves more than that: It comprises all additive technologies.
In almost every case, additive manufacturing a uses a powder bed. This means that a powdered material is fed into a bed where it is further processed. In 3D metal printing, for example, a metal (or several metals) is reduced to a powder before it is fed into the chamber and rebuilt. There are four common methods of producing the layers from the powder.
1. Selective Laser Sintering (SLS): Sintering involves the heating of materials under pressure, but not to the point of melting them. Laser technology makes it possible to create three-dimensional geometries by using undercuts. Usually CO2 or fiber lasers are used to do this.
2. Selective Laser Melting (SLM): The powder is heated by a high-energy fiber laser and then cooled down. The shape of the components is created by the targeted deflection of the laser beams. SLM is being used more and more frequently than SLS. Since no pressure is applied, the objects exhibit a higher strength and are therefore more durable. In Germany, this process is mainly used in the power generation industry and in the construction of tools. This process is frequently used for 3D metal printing.
3. Electronic Beam Melting (EMD/ EBM): In principle, this method is similar to SLM. However, this application uses an electron beam and not a laser beam. The entire process takes place in a vacuum. EMD is faster than SLM, but less precise and has a lower maximum print volume. EMD machines have an average diameter of 350 mm and a height of 380mm. SLM machines are twice as large. EMD is particularly are an ideal additive manufacturing technology whenever small parts have to be produced in large quantities. This process is also often used for 3D metal printing.
4. Binder Jetting: The powder is selectively deposited with a liquid binding agent to form the layers. This process has the advantage of allowing a very simple construction in different colours.
- Stereolithography: This is a classic case for additive manufacturing. The process had already been developed by Chuck Hull in 1983. The object is gradually lowered into a liquid photopolymer bath. It is processed by a laser.
- Fused Layer Modelling (FLM): This is the usual procedure for 3D printing (with plastics). This is a special extrusion process in which the material is "pressed" out of a nozzle to form after cooling. As usual, extrusion takes place in layers. In the automotive or electronics industry, for example, FLM often supplements SLM objects. 3D metal printing can be supplemented by FLM for example.
1. Rapid Prototyping: Additive manufacturing is used to enable the rapid construction of a model. There should be physical models available at an early stage in the development of a product. Rapid prototyping allows these models to be produced in a particularly reliable manner.
2. Rapid Tooling: In Germany, small series tools for injection molding and metal casting are usually produced by means of 3D metal printing.
3. Rapid Manufacturing: This involves the rapid production of objects that are used as end products or components. Unlike rapid prototyping, no models are generated, but ready-to-use parts. This application is common in medicine and dentistry. The additive manufacturing process is used to produce dental bridges or implants, for example.
Various standards are used to ensure that additive manufacturing in general and 3D metal printing in particular meet certain quality requirements. These include ISO/ ASTM 52901 - 52903-3, 52910, 52195 and 52921, which cover the entire process chain. In the German-speaking countries, additional DIN regulations will soon be in place: the German Institute for Standardization announced at the end of September 2018 the establishment of an advisory board for additive manufacturing. In addition to the existing ISO standards, guidelines on legal development, and the financial framework are to be issued. For Switzerland, the Swiss Additive Manufacturing Group (an association of companies that use additive manufacturing) is participating in the standardization talks, which take place under the umbrella of CECIMO with the aim of promoting quality assurance in additive manufacturing and 3D metal printing in particular.
- Individualization: Objects can be customized as desired. For example, it is possible to produce walls with varying thicknesses, very fine structures or very small dimensions. Individualization is particularly important in medical technology. For instance, a surgeon can have instruments exactly tailored to his needs.
- Greater Freedom of Design: It is possible to realize complex geometries using 3D metal printing, which would not be possible using other manufacturing processes. This includes, for example, cavities, undercuts, channels with arches or overhangs. It used to be the case that the design of an object had to follow the limitations of the manufacturing possibilities. This constraint is largely eliminated with additive manufacturing. This is a great advantage, especially in 3D metal printing.
Apart from the machine, no tools are needed.
- Finishing is Unavoidable: If an object requires a certain surface quality, post-processing is inevitable. The same applies if certain tolerances are to be maintained. There is still no standard for this (ISO/ASTM 52195 could, however, be further elaborated in a corresponding way). Especially in the case of 3D metal printing, finishing can be extremely time-consuming.
- Limited suitability for industrial mass production: For instance, 3D metal printing usually allows a maximum of two objects to be produced in one machine at the same time. Conventional manufacturing methods, on the other hand, allow much larger quantities to be produced. For industrial mass production, additive manufacturing is therefore only suitable to a limited extent. The best example of this is automotive production: Theoretically, an entire vehicle could be produced by additive manufacturing. Due to the large number of components, however, this would be far too expensive. For this reason, most components continue to be manufactured with conventional methods.
It is noteworthy that the experts in additive manufacturing list the costs both as advantages and disadvantages. The machines cost CHF 17,000 to CHF 120,000. In comparison with other machines, this is a favorable price. This is still the case if maintenance costs are included. Especially in medical technology, additive manufacturing therefore offers a significant cost advantage. Things are different in industrial applications. In this area, for example, 3D metal printing supplements conventional production systems. However, they cannot completely replace them. Here, additive manufacturing causes additional costs.
Additive manufacturing has a great future ahead of it. SAMG summed it up this way: The current development of additive manufacturing has already surpassed the predictions made in studies carried out in previous years. Renowned research institutions such as ETH Zurich are convinced that 3D metal printing will become increasingly important in mechanical engineering and toolmaking. The aim is for additive manufacturing to become fast and precise enough to be suitable for series production. At present, the possibilities of additive manufacturing still compete with CNC-controlled machines, which are still better suited for series production. The long-term goal is for 3D metal printing of complex objects to overtake CNC in the future.
|
# Organic Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for organic solar cells.
# Copyright (C) 2012 Roderick C. I. MacKenzie
#
# roderick.mackenzie@nottingham.ac.uk
# www.opvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import pygtk
pygtk.require('2.0')
import gtk
import sys
import os
import shutil
from cal_path import get_image_file_path
from about import about_dialog_show
from used_files_menu import used_files_menu
from server import server
from scan_tab import scan_vbox
from gui_util import dlg_get_text
import threading
import gobject
import multiprocessing
import time
import glob
from window_list import windows
from util import opvdm_delete_file
from util import delete_second_level_link_tree
from util import copy_scan_dir
from search import return_file_list
from win_lin import running_on_linux
import webbrowser
from search import find_fit_log
from scan_io import get_scan_dirs
from hpc import hpc_class
from debug import debug_mode
from inp import inp_update_token_value
from inp import inp_get_token_value
import i18n
_ = i18n.language.gettext
class scan_class(gtk.Window):
def callback_cluster(self, widget, data=None):
if self.cluster_window==None:
self.cluster_window=hpc_class()
self.cluster_window.init(self.hpc_root_dir,self.myserver.terminal)
print self.cluster_window.get_property("visible")
if self.cluster_window.get_property("visible")==True:
self.cluster_window.hide()
else:
self.cluster_window.show()
def get_main_menu(self, window):
accel_group = gtk.AccelGroup()
item_factory = gtk.ItemFactory(gtk.MenuBar, "<main>", accel_group)
item_factory.create_items(self.menu_items)
if debug_mode()==False:
item_factory.delete_item(_("/Advanced"))
window.add_accel_group(accel_group)
self.item_factory = item_factory
return item_factory.get_widget("<main>")
def callback_close(self, widget, data=None):
self.win_list.update(self,"scan_window")
self.hide()
return True
def callback_change_dir(self, widget, data=None):
dialog = gtk.FileChooserDialog(_("Change directory"),
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER)
filter = gtk.FileFilter()
filter.set_name(_("All files"))
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.sim_dir=dialog.get_filename()
a = open("scan_window.inp", "w")
a.write(self.sim_dir)
a.close()
self.clear_pages()
self.load_tabs()
dialog.destroy()
return True
def callback_help(self, widget, data=None):
webbrowser.open('http://www.opvdm.com/man/index.html')
def callback_add_page(self, widget, data=None):
new_sim_name=dlg_get_text( _("New simulation name:"), _("Simulation ")+str(self.number_of_tabs+1))
if new_sim_name!=None:
new_sim_name=self.remove_invalid(new_sim_name)
name=os.path.join(os.getcwd(),new_sim_name)
self.add_page(name)
def callback_remove_page(self,widget,name):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
self.toggle_tab_visible(tab.tab_name)
def callback_cluster_sleep(self,widget,data):
self.myserver.sleep()
def callback_cluster_poweroff(self,widget,data):
self.myserver.poweroff()
def callback_cluster_get_data(self,widget):
self.myserver.get_data()
def callback_cluster_print_jobs(self,widget):
self.myserver.print_jobs()
def callback_cluster_fit_log(self,widget):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
path=os.path.join(self.sim_dir,name)
find_fit_log("./fit.dat",path)
os.system("gnuplot -persist ./fit.dat &\n")
def callback_copy_page(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
old_dir=os.path.join(self.sim_dir,name)
new_sim_name=dlg_get_text( _("Clone the current simulation to a new simulation called:"), name)
if new_sim_name!=None:
new_sim_name=self.remove_invalid(new_sim_name)
new_dir=os.path.join(self.sim_dir,new_sim_name)
copy_scan_dir(new_dir,old_dir)
print _("I want to copy"),new_dir,old_dir
self.add_page(new_sim_name)
def callback_run_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.simulate(True,True)
def callback_build_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.simulate(False,True)
def callback_run_simulation_no_build(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.simulate(True,False)
def callback_nested_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.nested_simulation()
def callback_clean_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.clean_scan_dir()
def callback_clean_unconverged_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.scan_clean_unconverged()
def callback_clean_simulation_output(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.scan_clean_simulation_output()
def callback_import_from_hpc(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.import_from_hpc()
def callback_push_to_hpc(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.push_to_hpc()
def callback_push_unconverged_to_hpc(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.push_unconverged_to_hpc()
def callback_set_hpc_dir(self,widget,data):
config_file=os.path.join(self.sim_dir,"server.inp")
hpc_path=inp_get_token_value(config_file, "#hpc_dir")
dialog = gtk.FileChooserDialog(_("Select HPC dir"),
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
if os.path.isdir(hpc_path):
dialog.set_current_folder(hpc_path)
filter = gtk.FileFilter()
filter.set_name(_("All files"))
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
inp_update_token_value(config_file, "#hpc_dir", dialog.get_filename(),1)
dialog.destroy()
def remove_invalid(self,input_name):
return input_name.replace (" ", "_")
def callback_rename_page(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
old_dir=os.path.join(self.sim_dir,name)
new_sim_name=dlg_get_text( _("Rename the simulation to be called:"), name)
if new_sim_name!=None:
new_sim_name=self.remove_invalid(new_sim_name)
new_dir=os.path.join(self.sim_dir,new_sim_name)
shutil.move(old_dir, new_dir)
tab.rename(new_dir)
def callback_delete_page(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
dir_to_del=os.path.join(self.sim_dir,name)
md = gtk.MessageDialog(None, 0, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, _("Should I remove the simulation directory ")+dir_to_del)
#gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION,
# gtk.BUTTONS_CLOSE, "Should I remove the simulation directory "+dir_to_del)
response = md.run()
if response == gtk.RESPONSE_YES:
self.notebook.remove_page(pageNum)
for items in self.tab_menu.get_children():
if items.get_label()==name:
self.tab_menu.remove(items)
print _("I am going to delete file"),dir_to_del
delete_second_level_link_tree(dir_to_del)
self.number_of_tabs=self.number_of_tabs-1
elif response == gtk.RESPONSE_NO:
print _("Not deleting")
md.destroy()
def toggle_tab_visible(self,name):
tabs_open=0
print name
for i in range(0, self.number_of_tabs):
if self.rod[i].visible==True:
tabs_open=tabs_open+1
#print "tabs open",tabs_open,self.number_of_tabs
for i in range(0, self.number_of_tabs):
print self.rod[i].tab_name, name, self.rod[i].visible
if self.rod[i].tab_name==name:
if self.rod[i].visible==False:
self.rod[i].set_visible(True)
self.rod[i].visible=True
else:
if tabs_open>1:
print self.rod[i].tab_label
self.rod[i].set_visible(False)
self.rod[i].visible=False
def callback_view_toggle(self, widget, data):
#print "one",widget.get_label()
self.toggle_tab_visible(widget.get_label())
def callback_view_toggle_tab(self, widget, data):
self.toggle_tab_visible(data)
def callback_run_all_simulations(self,widget):
for i in range(0,self.notebook.get_n_pages()):
tab = self.notebook.get_nth_page(i)
tab.simulate(True,True)
def callback_stop_simulation(self,widget):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.stop_simulation()
def load_tabs(self):
sim_dirs=[]
get_scan_dirs(sim_dirs,self.sim_dir)
print sim_dirs,self.sim_dir
if len(sim_dirs)==0:
sim_dirs.append("scan1")
else:
for i in range(0,len(sim_dirs)):
sim_dirs[i]=sim_dirs[i]
for i in range(0,len(sim_dirs)):
self.add_page(sim_dirs[i])
def clear_pages(self):
for items in self.tab_menu.get_children():
self.tab_menu.remove(items)
for child in self.notebook.get_children():
self.notebook.remove(child)
self.rod=[]
def add_page(self,name):
hbox=gtk.HBox()
hbox.set_size_request(-1, 25)
label=gtk.Label("")
sim_name=os.path.basename(os.path.normpath(name))
print "Looking for",sim_name,name
self.rod.append(scan_vbox())
self.rod[len(self.rod)-1].init(self.myserver,self.tooltips,self.status_bar,self.context_id,label,self.sim_dir,sim_name)
label.set_justify(gtk.JUSTIFY_LEFT)
hbox.pack_start(label, False, True, 0)
button = gtk.Button()
close_image = gtk.Image()
close_image.set_from_file(os.path.join(get_image_file_path(),"close.png"))
close_image.show()
button.add(close_image)
button.props.relief = gtk.RELIEF_NONE
button.connect("clicked", self.callback_view_toggle_tab,self.rod[len(self.rod)-1].tab_name)
button.set_size_request(25, 25)
button.show()
hbox.pack_end(button, False, False, 0)
hbox.show_all()
self.notebook.append_page(self.rod[len(self.rod)-1],hbox)
self.notebook.set_tab_reorderable(self.rod[len(self.rod)-1],True)
menu_item = gtk.CheckMenuItem(sim_name)
menu_item.set_active(True)
self.tab_menu.append(menu_item)
menu_item.show()
menu_item.set_active(self.rod[len(self.rod)-1].visible)
#print "Rod",name,self.rod[len(self.rod)-1].visible
menu_item.connect("activate", self.callback_view_toggle,menu_item)
self.number_of_tabs=self.number_of_tabs+1
def callback_last_menu_click(self, widget, data):
print [data]
def switch_page(self,page, page_num, user_param1):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
self.status_bar.push(self.context_id, tab.sim_dir)
def callback_remove_all_results(self, widget, data):
results=[]
return_file_list(results,self.sim_dir,"scan.inp")
for i in range(0,len(results)):
dir_name=os.path.dirname(results[i])
if os.path.isdir(dir_name):
print "delete:",dir_name
#opvdm_delete_file(dir_name)
def callback_wol(self, widget, data):
self.myserver.wake_nodes()
def init(self,my_server):
self.cluster_window=None
self.win_list=windows()
self.win_list.load()
self.win_list.set_window(self,"scan_window")
print "constructur"
self.rod=[]
if os.path.isfile("scan_window.inp"):
f = open("scan_window.inp")
lines = f.readlines()
f.close()
path=lines[0].strip()
if path.startswith(os.getcwd()):
self.sim_dir=path
else:
self.sim_dir=os.getcwd()
else:
self.sim_dir=os.getcwd()
self.tooltips = gtk.Tooltips()
self.set_border_width(2)
self.set_title(_("Parameter scan - opvdm"))
n=0
self.hpc_root_dir= os.path.abspath(os.getcwd()+'/../')
self.number_of_tabs=0
items=0
self.status_bar = gtk.Statusbar()
self.status_bar.show()
self.context_id = self.status_bar.get_context_id("Statusbar example")
box=gtk.HBox()
box.add(self.status_bar)
box.set_child_packing(self.status_bar, True, True, 0, 0)
box.show()
self.menu_items = (
( _("/_File"), None, None, 0, "<Branch>" ),
( _("/File/Change dir"), None, self.callback_change_dir, 0, None ),
( _("/File/Close"), None, self.callback_close, 0, None ),
( _("/Simulations/_New"), None, self.callback_add_page, 0, "<StockItem>", "gtk-new" ),
( _("/Simulations/_Delete simulaton"), None, self.callback_delete_page, 0, "<StockItem>", "gtk-delete" ),
( _("/Simulations/_Rename simulation"), None, self.callback_rename_page, 0, "<StockItem>", "gtk-edit" ),
( _("/Simulations/_Clone simulation"), None, self.callback_copy_page, 0, "<StockItem>", "gtk-copy" ),
( _("/Simulations/sep1"), None, None, 0, "<Separator>" ),
( _("/Simulations/_Run simulation"), None, self.callback_run_simulation, 0, "<StockItem>", "gtk-media-play" ),
( _("/Advanced/_Build simulation"), None, self.callback_build_simulation, 0, "<StockItem>", "gtk-cdrom" ),
( _("/Advanced/_Run (no build)"), None, self.callback_run_simulation_no_build, 0, "<StockItem>", "gtk-media-play" ),
( _("/Advanced/_Run nested simulation"), None, self.callback_nested_simulation, 0, "<StockItem>", "gtk-media-play" ),
( _("/Advanced/_Clean simulation"), None, self.callback_clean_simulation, 0, "<StockItem>", "gtk-clear" ),
( _("/Advanced/_Clean unconverged simulation"), None, self.callback_clean_unconverged_simulation, 0, "<StockItem>", "gtk-clear" ),
( _("/Advanced/_Clean simulation output"), None, self.callback_clean_simulation_output, 0, "<StockItem>", "gtk-clear" ),
( _("/Advanced/sep2"), None, None, 0, "<Separator>" ),
( _("/Advanced/_Import from hpc"), None, self.callback_import_from_hpc, 0, "<StockItem>", "gtk-open" ),
( _("/Advanced/_Push to hpc"), None, self.callback_push_to_hpc, 0, "<StockItem>", "gtk-save" ),
( _("/Advanced/_Push unconverged to hpc"), None, self.callback_push_unconverged_to_hpc, 0, "<StockItem>", "gtk-save" ),
( _("/Advanced/_Set hpc dir"), None, self.callback_set_hpc_dir, 0, "<StockItem>", "gtk-open" ),
( _("/Advanced/_Cluster sleep"), None, self.callback_cluster_sleep, 0, "<StockItem>", "gtk-copy" ),
( _("/Advanced/_Cluster poweroff"), None, self.callback_cluster_poweroff, 0, "<StockItem>", "gtk-copy" ),
( _("/Advanced/_Cluster wake"), None, self.callback_wol, 0, "<StockItem>", "gtk-copy" ),
( _("/Advanced/_Remove all results"), None, self.callback_remove_all_results, 0, "<StockItem>", "gtk-copy" ),
( _("/_Help"), None, None, 0, "<LastBranch>" ),
( _("/_Help/Help"), None, self.callback_help, 0, None ),
( _("/_Help/About"), None, about_dialog_show, 0, "<StockItem>", "gtk-about" ),
)
main_vbox = gtk.VBox(False, 3)
menubar = self.get_main_menu(self)
main_vbox.pack_start(menubar, False, False, 0)
menubar.show()
toolbar = gtk.Toolbar()
toolbar.set_style(gtk.TOOLBAR_ICONS)
toolbar.set_size_request(-1, 50)
pos=0
#image = gtk.Image()
#image.set_from_file(os.path.join(get_image_file_path(),"new-tab.png"))
tb_new_scan = gtk.MenuToolButton(gtk.STOCK_NEW)
tb_new_scan.connect("clicked", self.callback_add_page)
self.tooltips.set_tip(tb_new_scan, _("New simulation"))
self.tab_menu=gtk.Menu()
tb_new_scan.set_menu(self.tab_menu)
toolbar.insert(tb_new_scan, pos)
pos=pos+1
sep = gtk.SeparatorToolItem()
sep.set_draw(True)
sep.set_expand(False)
toolbar.insert(sep, pos)
pos=pos+1
delete = gtk.ToolButton(gtk.STOCK_DELETE)
delete.connect("clicked", self.callback_delete_page,None)
self.tooltips.set_tip(delete, _("Delete simulation"))
toolbar.insert(delete, pos)
pos=pos+1
copy = gtk.ToolButton(gtk.STOCK_COPY)
copy.connect("clicked", self.callback_copy_page,None)
self.tooltips.set_tip(copy, _("Clone simulation"))
toolbar.insert(copy, pos)
pos=pos+1
rename = gtk.ToolButton(gtk.STOCK_EDIT)
rename.connect("clicked", self.callback_rename_page,None)
self.tooltips.set_tip(rename, _("Rename simulation"))
toolbar.insert(rename, pos)
pos=pos+1
sep = gtk.SeparatorToolItem()
sep.set_draw(True)
sep.set_expand(False)
toolbar.insert(sep, pos)
pos=pos+1
image = gtk.Image()
image.set_from_file(os.path.join(get_image_file_path(),"forward2.png"))
tb_simulate = gtk.ToolButton(image)
tb_simulate.connect("clicked", self.callback_run_all_simulations)
self.tooltips.set_tip(tb_simulate, _("Run all simulation"))
toolbar.insert(tb_simulate, pos)
pos=pos+1
if debug_mode()==True:
sep = gtk.SeparatorToolItem()
sep.set_draw(True)
sep.set_expand(False)
toolbar.insert(sep, pos)
pos=pos+1
image = gtk.Image()
image.set_from_file(os.path.join(get_image_file_path(),"server.png"))
cluster = gtk.ToolButton(image)
cluster.connect("clicked", self.callback_cluster)
self.tooltips.set_tip(cluster, _("Configure cluster"))
toolbar.insert(cluster, pos)
cluster.show()
pos=pos+1
sep = gtk.SeparatorToolItem()
sep.set_draw(False)
sep.set_expand(True)
toolbar.insert(sep, pos)
pos=pos+1
tb_help = gtk.ToolButton(gtk.STOCK_HELP)
tb_help.connect("clicked", self.callback_help)
self.tooltips.set_tip(tb_help, _("Help"))
toolbar.insert(tb_help, pos)
pos=pos+1
toolbar.show_all()
main_vbox.pack_start(toolbar, False, False, 0)
#main_vbox.add(toolbar)
main_vbox.set_border_width(1)
self.add(main_vbox)
main_vbox.show()
self.myserver=my_server
self.notebook = gtk.Notebook()
self.notebook.show()
self.notebook.set_tab_pos(gtk.POS_LEFT)
self.load_tabs()
main_vbox.pack_start(self.notebook, True, True, 0)
main_vbox.pack_start(box, False, False, 0)
self.connect("delete-event", self.callback_close)
self.notebook.connect("switch-page",self.switch_page)
self.set_icon_from_file(os.path.join(get_image_file_path(),"image.jpg"))
self.hide()
|
You have the right to access the personal information Wiltshire Health and Care holds about you. You may ask to view your patient/health records in person, or you may ask for a copy of your health records.
In most circumstances the information will be provided to you free of charge. Wiltshire Health and Care may raise a charge if there is an excessive amount of information in your record. In these circumstances we will inform you of the potential cost before responding to your request so that you can make a decision on whether you would like to proceed.
We will respond to your request within 30 calendar days of receiving it. Should additional time be required because of the complexity of the request we will write to let you know.
Please note, we will need to confirm your identity before disclosing information or releasing photocopies.
Complete this application form and post it to the address on the form.
Phone 01249 456565 or email whc.corporateservices@nhs.net and we can send you an application form.
We should be able to provide you with access to your health records within 40 calendar days, although we aim to do so within 21 calendar days wherever possible. Please note, we will need to confirm your identity before disclosing information or releasing photocopies.
Wiltshire Health and Care is registered with the Information Commissioner’s Office as a Data Controller, as required by the EU General Data Protection Regulations (GDPR) and UK Legislation. Information about our registration, including in general terms how we use personal data, can be found in our Data Protection Register Entry Details.
Our Privacy Notice for Patients can be found here.
For further information, please phone us on 01249 456565 or email ask.wiltshirehealthandcare@nhs.net.
|
from argv.iterables import peekable
from argv.flags import parse_tokens
class InferentialParser(object):
def __repr__(self):
return '%s()' % self.__class__.__name__
def parse(self, args=None):
'''Parse a list of arguments, returning a dict.
Flags are only boolean if they are not followed by a non-flag argument.
All positional arguments not associable with a flag will be added to the return dictionary's `['_']` field.
'''
opts = dict()
if args is None:
import sys
# skip over the program name with the [1:] slice
args = sys.argv[1:]
# arglist is a tuple of (is_flag, name) pairs
arglist = peekable(parse_tokens(args))
for is_flag, name in arglist:
if is_flag is True:
# .peek will return the default argument iff there are no more entries
next_is_flag, next_name = arglist.peek(default=(None, None))
# next_is_flag will be None if there are no more items, but True/False if there is a next item
# if this argument looks for a subsequent (is set as boolean),
# and the subsequent is not a flag, consume it
if next_is_flag is False:
opts[name] = next_name
# finally, advance our iterator, but since we already have the next values, just discard it
arglist.next()
else:
# if there is no next thing, or the next thing is a flag,
# all the boolean=False's in the world can't save you then
opts[name] = True
else:
# add positional argument
opts.setdefault('_', []).append(name)
return opts
|
Krakow will become Wizz Air’s 26 base and its newest base in Poland next to Warsaw, Katowice, Gdansk and Wroclaw. As part of WIZZ’s expansion, the airline will continue to increase its operations in Poland to provide Polish customers with ever more travel opportunities. The base establishment in Krakow represents a $240 million** investment by WIZZ in Poland and creates 80 new direct jobs with the airline and over 140 jobs in associated industries***. The 2 new Airbus A321 aircraft will support the operations of twelve new routes, long-awaited by Krakow travellers services to London, Kyiv, Bari, Catania, Larnaca, Nice, Turku, Billund, Doncaster, Kharkiv, Oslo and Kutaisi.
WIZZ has a great number of innovative products and services enabling its passengers to best customize their journeys, all of which can be booked through Wizz Air’s easy-to-use app and website. Among the airline’s latest developments, customers can choose the Flexible Travel Partner service, whenever they want to create a new reservation without including all passengers’ names at the time of booking or secure the best fare for a period of 48 hours by selecting the Fare Lock product.
|
#!/usr/bin/env python
# kidsafe child safe proxy server using squid
# see http://www.penguintutor.com/kidsafe
# kidsafe.py - squid v3 authethentication helper application
# Copyright Stewart Watkiss 2012
# This file is part of kidsafe.
#
# kidsafe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kidsafe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kidsafe. If not, see <http://www.gnu.org/licenses/>.
import sys, time, datetime, os
# These are global variables so can be accessed elsewhere
# These are not normally updated during running
# Only loglevel needs to be global, but others kept together at start of file
# Set loglevel to amount of logging required
# 0 = no logging, 1 = critical only, 2 = significant (warning), 3 = disable all logging of accepts / denies, 4 = log entries where log=yes is set on rule, 5 = include denies, 6 = include accept, 7 = include session tests, 8 = detailed session log, 9 = debug, 10 = full debug
# Normally this should be level 4 (or level 5 if want to log all blocked sites)
# above 6 not recommended for normal use
# 3 is same as 2 at the moment
loglevel = 6
#loglevel = 10
session_file = "/opt/kidsafe/kidsafe.session"
rules_file = "/opt/kidsafe/kidsafe.rules"
log_file = "/var/log/squid3/kidsafe.log"
def main ():
global loglevel
global rules_file
global session_file
# store time when files were last modified - if this is updated then we reload the file
r_mtime = os.path.getmtime(rules_file)
s_mtime = os.path.getmtime(session_file)
if (loglevel >= 2):
logEntry (2, "kidsafe starting")
# Session list
# holds whether user is logged in and at what level access they have
sessions = loadSessions(session_file);
# List to hold whitelist
# each entry in the whitelist is a list containing
rules = loadRules(rules_file);
while (1):
# read entry from stdin - src, dst, dstport
inline = sys.stdin.readline()
# only use src & dst; dstport is for logging purposes
# path is included, but not used. In future this can be used for additional keyword checks against url
(src, dst, dstport, path) = inline.split()
if (loglevel >= 10):
logEntry (10, "New proxy request "+inline)
# current time - we use this later to check for expired entries / sessions
timenow = int(time.time());
# convert dst to lowercase (rest should be IP addresses)
dst = dst.lower()
# check if sessions file updated (mtime) to see if we need to reload
if (loglevel >= 10):
logEntry (10, "last loaded mtime "+str(s_mtime))
logEntry (10, "new mtime "+str(os.path.getmtime(session_file)))
if (s_mtime != os.path.getmtime(session_file)):
sessions = loadSessions(session_file)
s_mtime = os.path.getmtime(session_file)
# check if rules file updated (mtime) to see if we need to reload
if (loglevel >= 10):
logEntry (10, "last loaded mtime "+str(r_mtime))
logEntry (10, "new mtime "+str(os.path.getmtime(rules_file)))
if (r_mtime != os.path.getmtime(rules_file)):
rules = loadRules(rules_file)
r_mtime = os.path.getmtime(rules_file)
# reset authorisation level
authlevel = 0
# check to see if the user is logged on and get permission level
for sessionentry in sessions:
# check time not expired (when we loaded we checked, but time could have expired since)
if (sessionentry[3]!='*' and sessionentry[3]!= '0' and int(sessionentry[3]) < timenow):
# may not be relevant, but we include in level 8
if (loglevel >= 8):
logEntry (8, "Expired session "+str(sessionentry))
# expired to move to next
continue
if (checkAddress (src,sessionentry[0])):
# Log all matches if on session log level
if (loglevel >= 8):
logEntry (8, "Session match "+str(sessionentry))
# set auth level if higher
if (sessionentry[1] > authlevel):
# cast this as an int - otherwise int tests fail later
authlevel = int(sessionentry[1])
elif (loglevel >= 10):
logEntry (10, "Session not matched "+src+" "+str(sessionentry))
if (loglevel >= 7):
logEntry (7, "Highest permission current session "+str(authlevel))
# Special case - level 10 is accept all & no log to return as OK
if (authlevel > 9):
sys.stdout.write("OK\n")
sys.stdout.flush()
continue
# Check against rules
# have we had an accept?
accept = False
# rulematch will hold the rule number that get a hit on for logging
rulematch = 0
# set logentry if status is to log error
logentry = 1
for ruleentry in rules:
# check rule not expired (since generated)
if (ruleentry[4]!='*' and ruleentry[4]!= '0' and int(ruleentry[4]) < timenow):
# may not be relevant, but we include in level 9
if (loglevel >= 9):
logEntry (9, "Expired rule "+str(ruleentry))
continue
# check if the user level matches this rule
if (checkUserLevel(authlevel, ruleentry[3]) == False) :
if (loglevel >= 10) :
logEntry (10, "User level not matched on rule "+str(ruleentry[1]))
continue
if (loglevel >= 10) :
logEntry (10, "User level match on rule "+str(ruleentry[1]))
# check if the destination matches
if checkDst(dst, ruleentry[0]) :
rulematch = ruleentry[1]
if (loglevel >= 10) :
logEntry (10, "Destination match on rule "+str(rulematch))
logentry = ruleentry[5]
# is this an accept or a deny rule
# allow if not 0
if (int(ruleentry[2]) != 0) :
if (loglevel >= 10) :
logEntry (10, "Rule "+str(rulematch)+" is allow "+str(ruleentry[2]))
accept = True
break
# deny
else :
if (loglevel >= 10) :
logEntry (10, "Rule "+str(rulematch)+" is deny "+str(ruleentry[2]))
accept = False
break
else :
if (loglevel >= 9):
logEntry (9, "Rule doesn't match destination")
if (loglevel >= 10) :
logEntry (10, "RULES checked accept = "+str(accept))
# if accept has been changed to True - return OK otherwise return ERR
# if logging because it's set in rule then use loglevel 4, otherwise 5 / 6 as appropriate
if (accept == True) :
if (loglevel >= 4 and logentry != '0'):
logEntry (4, "ACCEPT "+src+" -> "+dst+":"+str(dstport)+" rule:"+str(rulematch))
elif (loglevel >= 6):
logEntry (6, "ACCEPT "+src+" -> "+dst+":"+str(dstport)+" rule:"+str(rulematch))
sys.stdout.write("OK\n")
else :
if (loglevel >= 4 and logentry != '0'):
logEntry (4, "REJECT "+src+" -> "+dst+":"+str(dstport)+" rule:"+str(rulematch))
elif (loglevel >= 5):
logEntry (5, "REJECT "+src+" -> "+dst+":"+str(dstport)+" rule:"+str(rulematch))
sys.stdout.write("ERR\n")
sys.stdout.flush()
# Open and close the file each time so that we don't run the risk of keeping the file
# open when another thread wants to write to it.
def logEntry(logmessagelevel, logmessage):
global log_file
# Get timestamp as human readable format
now = datetime.datetime.now()
timestamp = now.strftime("%Y-%m-%d %H:%M:%s")
# open file to apptend
logfile = open(log_file, 'a')
logfile.write(timestamp+" "+str(logmessagelevel)+" "+logmessage+"\n")
logfile.close()
return
def loadRules(filename):
global loglevel
ruleslist = list()
# Read in rules file
ruleslistfile = open(filename, 'r')
# currenttime
timenow = int(time.time());
# Use linecount to track position in file - in case of error
# read in each line
for linecount, entry in enumerate(ruleslistfile):
entry = entry.rstrip()
# ignore any empty lines / comments
if (entry and not(entry.startswith('#'))):
thisLine = entry.split(' ')
# check there is a valid entry (basic check of number of elements in entry)
if (len(thisLine) < 6):
if (loglevel >= 1):
logEntry(1, "Invalid entry in rules file line %d \n" % (linecount))
# Print message and abort
#print ("Invalid entry in rules file line %d \n" % (linecount))
# print deny
print "ERR\n"
sys.exit()
# check not expired
if (thisLine[4]!='*' and thisLine[4]!= '0' and int(thisLine[4]) < timenow):
if (loglevel >= 9):
logEntry (9, "Expired rule (load) "+str(entry))
continue
# if expired move on to next entry (ignore)
continue
ruleslist.append (thisLine)
ruleslistfile.close()
if (loglevel >= 2):
logEntry(2, "loaded rules file")
# debug level >=9 is not recommended for normal use
if (loglevel >= 9):
all_entries = "";
for each_entry in ruleslist:
all_entries += str(each_entry)+"\n"
logEntry (9, "Rules entries:\n"+all_entries)
return ruleslist
# returns current login level for this IP address (highest value)
def loadSessions(filename):
global loglevel
sessionlist = list()
# Read in whitelist file
sessionlistfile = open(filename, 'r')
# currenttime
timenow = int(time.time());
# Use linecount to track position in file - in case of error
# read in each line
for linecount, entry in enumerate(sessionlistfile):
entry = entry.rstrip()
# ignore any empty lines / comments
if (entry and not(entry.startswith('#'))):
thisLine = entry.split(' ')
# check there is a valid entry (basic check of number of elements in entry)
if (len(thisLine) < 4):
if (loglevel >=1 ):
logEntry (1, "Invalid entry in session file line %d \n" %(linecount))
# Print message and abort
#print ("Invalid entry in sessions file line %d \n" % (linecount))
print "ERR\n"
sys.exit()
# check not expired
if (thisLine[3]!='*' and thisLine[3]!= '0' and int(thisLine[3]) < timenow):
# if expired move on to next entry (ignore) - only skip here for efficiency later as we need to check this in case it changes in future anyway
# may not be relevant, but we include in level 9 (ie higher than normal session log level)
if (loglevel >= 9):
logEntry (9, "Expired session (load) "+str(entry))
continue
sessionlist.append (thisLine)
sessionlistfile.close()
if (loglevel >= 2):
logEntry(2, "loaded session file")
# debug level >=9 is not recommended for normal use
if (loglevel >= 9):
all_entries = "";
for each_entry in sessionlist:
all_entries += str(each_entry)+"\n"
logEntry (9, "Session entries:\n"+all_entries)
return sessionlist
# function to check if a specific destination matches a particular rule
# rule should just be the domain/host part of the whitelist
def checkDst(dest, rule):
# check for * rule (normally used to always allow for a specific source IP address or to temporarily disable
if (rule=='*'):
return True
# check specific rule first - more efficient than rest
if (dest==rule):
return True
# does entry start with a . (if so then check using endswith)
if (rule.startswith('.')) :
if (dest.endswith(rule)):
return True
else :
return False
# least efficient - regular expression
elif (dest.startswith('/')) :
if re.match (rule, dest) :
return True
else :
return False
# No match
else :
return False
# check if our IP address matches that in the rule
# currently accept fixed IP address or regexp (add subnets in future)
def checkAddress(src, session):
# First try a normal ip address (most likely match)
if (src == session):
return True
# look for a regular expression
elif session.startswith ('/'):
if re.match (session, src) :
return True
else :
return False;
# if it's a subnet (not yet implemented)
#elif session.find('/')
# otherwise it's a normal IP address
else:
return False
# check to see if user level matches (supports +/- after the value)
def checkUserLevel(authlevel, ruleuser):
# rule = * applies to all users
if (ruleuser=='*') : return True
# split into comma separated entries if applicable
listruleuser = ruleuser.split (',')
for thisruleuser in listruleuser:
# get int representation (with +/- removed)
ruleuserint = int (thisruleuser.rstrip('+-'))
# first check for exact match (don't need to know if it had +/-
if (authlevel == ruleuserint) : return True
# check with +
if (ruleuser.endswith('+')) :
if (authlevel > ruleuserint) : return True
elif (ruleuser.endswith('-')) :
if (authlevel < ruleuserint) : return True
# if not matched
return False
# - Added inline instead as more efficient than function call
## function to check if a particular rule has expired
## uses unix time stamp - or * for no expiry
#def checkExpire(expiretime):
# if (expiretime == '*' or expiretime == '0'): return True
# timenow = int(time.time());
# if (int(expiretime) > timenow): return True;
# return False
# Start
if __name__ == '__main__':
main()
|
Thursday I’m at Laced with Grace!
Camp begins again NEXT week.
MUCH has been happening around here though…for one thing, I’ve done a TON of laundry, complete with ironing! I’ve tried not to cook much. Well, I did just a little. I will get back into that gear next week.
Head over to Laced with Grace and read my ramblings from this week.
I hope this like will take you there! Click here!
Or if it doesn’t work…just go to Laced with Grace!
This entry was posted on Thursday, July 7th, 2011 at 12:59 am and posted in Laced With Grace. You can follow any responses to this entry through the RSS 2.0 feed.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.