text
stringlengths 29
850k
|
|---|
from django.db import models
from django.db.models.signals import post_save
from django.conf import settings
from django.utils import timezone
from django.core import validators
from django.core.mail import send_mail
from django.contrib.auth.models import AbstractBaseUser, UserManager, PermissionsMixin
from django.utils.translation import ugettext_lazy as _
import os
import binascii
def generate_auth_token():
return binascii.b2a_hex(os.urandom(15))
class User(AbstractBaseUser, PermissionsMixin):
""" We had to copy this from contrib.auth.models because we need email to be unique. """
username = models.CharField(_('username'), max_length=30,
unique=True,
help_text=_('<strong>Required!</strong><br>Your username can be composed of letters, digits, and the symbols "@", ".", "+", "-", and "_".'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _('Enter a valid username.'), 'invalid')
])
email = models.EmailField(_('email address'),
unique=True,
help_text=_('<strong>Required!</strong>'),
validators=[
validators.validate_email
])
authtoken = models.CharField(_('auth token'), max_length=48,
unique=True,
help_text=_('The authentication token used to log into the client app'),
default=generate_auth_token)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
is_staff = models.BooleanField(_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'))
is_active = models.BooleanField(_('active'),
default=True,
help_text=_('Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, unique=True, related_name='profile')
mybb_loginkey = models.CharField(max_length=100)
mybb_uid = models.IntegerField(null=True)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
# This is supposed to use settings.USER_AUTH_MODEL but it doesn't seem to work
post_save.connect(create_user_profile, sender=User)
|
Although the Monte Carlo Casino – or “Le Casino” – is the best known casino in Monte Carlo, there are three other casinos in the principality, each with a distinctive character.
Le Café de Paris, on the Place du Casino, is reminiscent of Monte Carlo at the end of the nineteenth century, with a Belle Époque style for which both the casino and the famous café itself are famous. The casino’s decor is inspired by the various historic Grand Prix.
Le Café de Paris has 1,200 slot machines and offers a total of 15 table games, including American roulette, baccarat, blackjack, chemin de fer, craps, electronic roulette, and punto banco. Slot machines are available for play from 10:00 AM and table games start after 5:00 PM. Players must be at least 21 years old.
Of the Monte Carlo casinos, the circus-themed Sun Casino is most similar in look and feel to a Las Vegas casino. Located within the Monte-Carlo Grand Hotel on 27 Avenue de Spelugues, it features 450 slot machines and 27 table games, including baccarat, American roulette, blackjack, chemin de fer, craps and punto banco. There is no admission fee, and players must be 18 years or older. Smart casual dress is appreciated, but no strict dress code is enforced.
Blackjack, craps and American roulette are offered after 5:00 PM on weekdays and after 4:00 PM on weekends. The slot machines are available for play every day after 11:00 AM.
Monte Carlo Sporting Club & Casino on the Avenue Princess Grace is open only in the summer months, from June to September. It has a futuristic, airy design and features 62 slot machines, 24 table games – including roulette, baccarat, banque a tout va, blackjack, chemin de fer, craps, English roulette, French roulette and punto banco, and a private salon for high-rolling gambling. Gaming begins each day in summer at 10:00 PM. Players must be 21 years or older, and a tie and jacket dress code is enforced.
In addition to its gaming facilities, Le Sporting Monte-Carlo hosts concerts and live performances in its Salle d’Etoile. It also has a Cuban smoking parlour, restaurants and a nightclub.
For details of some of the main table games played at the Monte Carlo casinos, see Monte Carlo roulette and Monte Carlo roulette betting, and Monte Carlo card games.
|
import re
from functools import reduce
from pyparsing import (
infixNotation,
opAssoc,
Optional,
Literal,
CharsNotIn,
ParseException,
)
from logging import Filter, _levelNames
import six
from django.apps import apps
from django.db import models
from django.conf import settings
from awx.main.utils.common import get_search_fields
__all__ = ['SmartFilter', 'ExternalLoggerEnabled']
class FieldFromSettings(object):
"""
Field interface - defaults to getting value from setting
if otherwise set, provided value will take precedence
over value in settings
"""
def __init__(self, setting_name):
self.setting_name = setting_name
def __get__(self, instance, type=None):
if self.setting_name in getattr(instance, 'settings_override', {}):
return instance.settings_override[self.setting_name]
return getattr(settings, self.setting_name, None)
def __set__(self, instance, value):
if value is None:
if hasattr(instance, 'settings_override'):
instance.settings_override.pop('instance', None)
else:
if not hasattr(instance, 'settings_override'):
instance.settings_override = {}
instance.settings_override[self.setting_name] = value
class ExternalLoggerEnabled(Filter):
# Prevents recursive logging loops from swamping the server
LOGGER_BLACKLIST = (
# loggers that may be called in process of emitting a log
'awx.main.utils.handlers',
'awx.main.utils.formatters',
'awx.main.utils.filters',
'awx.main.utils.encryption',
'awx.main.utils.log',
# loggers that may be called getting logging settings
'awx.conf'
)
lvl = FieldFromSettings('LOG_AGGREGATOR_LEVEL')
enabled_loggers = FieldFromSettings('LOG_AGGREGATOR_LOGGERS')
enabled_flag = FieldFromSettings('LOG_AGGREGATOR_ENABLED')
def __init__(self, **kwargs):
super(ExternalLoggerEnabled, self).__init__()
for field_name, field_value in kwargs.items():
if not isinstance(ExternalLoggerEnabled.__dict__.get(field_name, None), FieldFromSettings):
raise Exception('%s is not a valid kwarg' % field_name)
if field_value is None:
continue
setattr(self, field_name, field_value)
def filter(self, record):
"""
Uses the database settings to determine if the current
external log configuration says that this particular record
should be sent to the external log aggregator
False - should not be logged
True - should be logged
"""
# Logger exceptions
for logger_name in self.LOGGER_BLACKLIST:
if record.name.startswith(logger_name):
return False
# General enablement
if not self.enabled_flag:
return False
# Level enablement
if record.levelno < _levelNames[self.lvl]:
# logging._levelNames -> logging._nameToLevel in python 3
return False
# Logger type enablement
loggers = self.enabled_loggers
if not loggers:
return False
if record.name.startswith('awx.analytics'):
base_path, headline_name = record.name.rsplit('.', 1)
return bool(headline_name in loggers)
else:
if '.' in record.name:
base_name, trailing_path = record.name.split('.', 1)
else:
base_name = record.name
return bool(base_name in loggers)
def string_to_type(t):
if t == u'null':
return None
if t == u'true':
return True
elif t == u'false':
return False
if re.search(r'^[-+]?[0-9]+$',t):
return int(t)
if re.search(r'^[-+]?[0-9]+\.[0-9]+$',t):
return float(t)
return t
def get_model(name):
return apps.get_model('main', name)
class SmartFilter(object):
SEARCHABLE_RELATIONSHIP = 'ansible_facts'
class BoolOperand(object):
def __init__(self, t):
kwargs = dict()
k, v = self._extract_key_value(t)
k, v = self._json_path_to_contains(k, v)
Host = get_model('host')
search_kwargs = self._expand_search(k, v)
if search_kwargs:
kwargs.update(search_kwargs)
q = reduce(lambda x, y: x | y, [models.Q(**{u'%s__icontains' % _k:_v}) for _k, _v in kwargs.items()])
self.result = Host.objects.filter(q)
else:
# detect loops and restrict access to sensitive fields
# this import is intentional here to avoid a circular import
from awx.api.filters import FieldLookupBackend
FieldLookupBackend().get_field_from_lookup(Host, k)
kwargs[k] = v
self.result = Host.objects.filter(**kwargs)
def strip_quotes_traditional_logic(self, v):
if type(v) is six.text_type and v.startswith('"') and v.endswith('"'):
return v[1:-1]
return v
def strip_quotes_json_logic(self, v):
if type(v) is six.text_type and v.startswith('"') and v.endswith('"') and v != u'"null"':
return v[1:-1]
return v
'''
TODO: We should be able to express this in the grammar and let
pyparsing do the heavy lifting.
TODO: separate django filter requests from our custom json filter
request so we don't process the key any. This could be
accomplished using a whitelist or introspecting the
relationship refered to to see if it's a jsonb type.
'''
def _json_path_to_contains(self, k, v):
if not k.startswith(SmartFilter.SEARCHABLE_RELATIONSHIP):
v = self.strip_quotes_traditional_logic(v)
return (k, v)
# Strip off leading relationship key
if k.startswith(SmartFilter.SEARCHABLE_RELATIONSHIP + '__'):
strip_len = len(SmartFilter.SEARCHABLE_RELATIONSHIP) + 2
else:
strip_len = len(SmartFilter.SEARCHABLE_RELATIONSHIP)
k = k[strip_len:]
pieces = k.split(u'__')
assembled_k = u'%s__contains' % (SmartFilter.SEARCHABLE_RELATIONSHIP)
assembled_v = None
last_v = None
last_kv = None
for i, piece in enumerate(pieces):
new_kv = dict()
if piece.endswith(u'[]'):
new_v = []
new_kv[piece[0:-2]] = new_v
else:
new_v = dict()
new_kv[piece] = new_v
if last_kv is None:
assembled_v = new_kv
elif type(last_v) is list:
last_v.append(new_kv)
elif type(last_v) is dict:
last_kv[last_kv.keys()[0]] = new_kv
last_v = new_v
last_kv = new_kv
v = self.strip_quotes_json_logic(v)
if type(last_v) is list:
last_v.append(v)
elif type(last_v) is dict:
last_kv[last_kv.keys()[0]] = v
return (assembled_k, assembled_v)
def _extract_key_value(self, t):
t_len = len(t)
k = None
v = None
# key
# "something"=
v_offset = 2
if t_len >= 2 and t[0] == "\"" and t[2] == "\"":
k = t[1]
v_offset = 4
# something=
else:
k = t[0]
# value
# ="something"
if t_len > (v_offset + 2) and t[v_offset] == "\"" and t[v_offset + 2] == "\"":
v = u'"' + six.text_type(t[v_offset + 1]) + u'"'
#v = t[v_offset + 1]
# empty ""
elif t_len > (v_offset + 1):
v = u""
# no ""
else:
v = string_to_type(t[v_offset])
return (k, v)
def _expand_search(self, k, v):
if 'search' not in k:
return None
model, relation = None, None
if k == 'search':
model = get_model('host')
elif k.endswith('__search'):
relation = k.split('__')[0]
try:
model = get_model(relation)
except LookupError:
raise ParseException('No related field named %s' % relation)
search_kwargs = {}
if model is not None:
search_fields = get_search_fields(model)
for field in search_fields:
if relation is not None:
k = '{0}__{1}'.format(relation, field)
else:
k = field
search_kwargs[k] = v
return search_kwargs
class BoolBinOp(object):
def __init__(self, t):
self.result = None
i = 2
while i < len(t[0]):
if not self.result:
self.result = t[0][0].result
right = t[0][i].result
self.result = self.execute_logic(self.result, right)
i += 2
class BoolAnd(BoolBinOp):
def execute_logic(self, left, right):
return left & right
class BoolOr(BoolBinOp):
def execute_logic(self, left, right):
return left | right
@classmethod
def query_from_string(cls, filter_string):
'''
TODO:
* handle values with " via: a.b.c.d="hello\"world"
* handle keys with " via: a.\"b.c="yeah"
* handle key with __ in it
'''
filter_string_raw = filter_string
filter_string = six.text_type(filter_string)
unicode_spaces = list(set(six.text_type(c) for c in filter_string if c.isspace()))
unicode_spaces_other = unicode_spaces + [u'(', u')', u'=', u'"']
atom = CharsNotIn(unicode_spaces_other)
atom_inside_quotes = CharsNotIn(u'"')
atom_quoted = Literal('"') + Optional(atom_inside_quotes) + Literal('"')
EQUAL = Literal('=')
grammar = ((atom_quoted | atom) + EQUAL + Optional((atom_quoted | atom)))
grammar.setParseAction(cls.BoolOperand)
boolExpr = infixNotation(grammar, [
("and", 2, opAssoc.LEFT, cls.BoolAnd),
("or", 2, opAssoc.LEFT, cls.BoolOr),
])
try:
res = boolExpr.parseString('(' + filter_string + ')')
except ParseException:
raise RuntimeError(u"Invalid query %s" % filter_string_raw)
if len(res) > 0:
return res[0].result
raise RuntimeError("Parsing the filter_string %s went terribly wrong" % filter_string)
|
I feel robbed of your humor bcuz I’m on the west coast. I remember going to your shows in the 80’s. You are hilarious. Maybe this summer for our 25th anniversary we can get to Ptown.
This podcast will be GREAT!
You are the best. Love your videos, watch them often!
Love your first outing re your podcast. Can’t wait for you to get it up and running. It’s one more thing that will take your time so hope once you get it going, you can sustain it. Thanks for your humor and putting things into perspective. Keep up the good work and hope to see you in one of the clubs soon doing your new show.
Hi Kate. Love ya. So glad you’re doing this. Can’t wait until your next podcast. We’re in big trouble girl and we need you guidance and your wisdom. RESIST!!!!!
Sylvia (yes, I’m still here trying to figure it all out.
return to normal? What’s that?
Love your humor! Keep it up!!!
I’ve seen your shows in P-Town. I’ve got all your books and re-read them whenever I need a lift or can’t sleep. They calm my mind. I also watch and re-watch all of your videos. Now I can add your podcasts – Yay!
Always enjoy what you have to say.Keep ’em coming!
Kate in long form, how can I not like that?! Curious where we can find the podcast other than here? I’d like to subscribe.
It’s about damn time! Can’t wait for more. Oh and BTW, re: Krista Tippet…as Maude would say, “God’ll get you for that Walter”. Thanks Kate.
My wife and I enjoy looking for your latest video for comic relief. Now we can listen to the podcast.
|
# Auto generated by Edalize
def load_module_from_file(name, python_file):
import importlib.util
spec = importlib.util.spec_from_file_location(name, python_file)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def load_runner_hooks(python_file = r''):
if len(python_file) > 0:
return load_module_from_file('vunit_runner_hooks', python_file)
else:
return __import__('edalize.vunit_hooks', fromlist=['vunit_hooks'])
runner = load_runner_hooks().VUnitRunner()
# Override this hook to allow custom creation configuration of the VUnit instance:
vu = runner.create()
lib = vu.add_library("vunit_test_runner_lib")
lib.add_source_files("sv_file.sv")
lib.add_source_files("vlog_file.v")
lib.add_source_files("vlog05_file.v")
lib.add_source_files("vhdl_file.vhd")
lib.add_source_files("vhdl2008_file", vhdl_standard="2008")
# Override this hook to customize the library, e.g. compile-flags etc.
# This allows full access to vunit.ui.Library interface:
runner.handle_library("vunit_test_runner_lib", lib)
lib = vu.add_library("libx")
lib.add_source_files("vhdl_lfile")
# Override this hook to customize the library, e.g. compile-flags etc.
# This allows full access to vunit.ui.Library interface:
runner.handle_library("libx", lib)
# override this hook to perform final customization and parametrization of VUnit, custom invokation, etc.
runner.main(vu)
|
Your family can experience the story of Jesus' unwavering love for mankind.
If there are no tickets available for the time you want, you may still attend this event. Just expect your wait time to be longer.
The Spokane Valley Adventist Church presents “Journey to the Cross” outdoor walk-through event on Easter weekend. You won’t just hear the story, you’ll be in it as you are surrounded by a cast of 250!
Tours start every 15 minutes and last approximately 30 minutes. This event is free of charge.
|
import six
from peachpy.c.types import Type, \
int8_t, int16_t, int32_t, int64_t, \
uint8_t, uint16_t, uint32_t, uint64_t, \
float_, double_
from peachpy.parse import parse_assigned_variable_name
class Constant:
_supported_sizes = [1, 2, 4, 8, 16, 32, 64]
_supported_types = [uint8_t, uint16_t, uint32_t, uint64_t,
int8_t, int16_t, int32_t, int64_t,
float_, double_]
def __init__(self, size, repeats, data, element_ctype, name=None, prename=None):
assert isinstance(size, six.integer_types), "Constant size must be an integer"
assert size in Constant._supported_sizes, "Unsupported size %s: the only supported sizes are %s" \
% (str(size), ", ".join(map(str, sorted(Constant._supported_sizes))))
assert isinstance(repeats, six.integer_types), "The number of contant repeats must be an integer"
assert size % repeats == 0, "The number of constant repeats must divide constant size without remainder"
assert isinstance(element_ctype, Type), "Element type must be an instance of peachpy.c.Type"
assert element_ctype in Constant._supported_types, "The only supported types are %s" \
% ", ".join(Constant._supported_types)
self.size = size
self.repeats = repeats
self.element_ctype = element_ctype
self.data = data
self.name = name
self.prename = prename
self.address = None
self.label = None
self.prefix = None
def __str__(self):
format_spec = "%%0%dX" % (self.size / self.repeats * 2)
return "<" + ", ".join(format_spec % data for data in self.data) + ">"
def __hash__(self):
return hash(self.data) ^ hash(self.size) ^ hash(self.repeats)
def __eq__(self, other):
return isinstance(other, Constant) and self.data == other.data and self.element_ctype == other.element_ctype
def encode(self, encoder):
from peachpy.encoder import Encoder
assert isinstance(encoder, Encoder)
encode_function = {
1: encoder.uint8,
2: encoder.uint16,
4: encoder.uint32,
8: encoder.uint64
}[self.size / self.repeats]
return sum([encode_function(data) for data in self.data], bytearray())
@property
def alignment(self):
if self.size == 10:
return 16
else:
return self.size
@property
def as_hex(self):
from peachpy.encoder import Encoder, Endianness
bytestring = self.encode(Encoder(Endianness.Little))
return "".join("%02X" % byte for byte in bytestring)
def format(self, assembly_format):
if assembly_format == "go":
return "const0x" + self.as_hex + "(SB)"
else:
return str(self)
@staticmethod
def _uint64xN(name, prename, n, *args):
from peachpy.util import is_int, is_int64
assert is_int(n)
args = [arg for arg in args if arg is not None]
if len(args) == 0:
raise ValueError("At least one constant value must be specified")
if len(args) != 1 and len(args) != n:
raise ValueError("Either 1 or %d values must be specified" % n)
for i, number in enumerate(args):
if not is_int(number):
raise TypeError("The value %s is not an integer" % str(number))
if not is_int64(number):
raise ValueError("The number %d is not a 64-bit integer" % number)
if number < 0:
args[i] += 0x10000000000000000
if len(args) == 1:
args = [args[0]] * n
return Constant(8 * n, n, tuple(args), uint64_t)
@staticmethod
def _uint32xN(name, prename, n, *args):
from peachpy.util import is_int, is_int32
assert is_int(n)
args = [arg for arg in args if arg is not None]
if len(args) == 0:
raise ValueError("At least one constant value must be specified")
if len(args) != 1 and len(args) != n:
raise ValueError("Either 1 or %d values must be specified" % n)
for i, number in enumerate(args):
if not is_int(number):
raise TypeError("The value %s is not an integer" % str(number))
if not is_int32(number):
raise ValueError("The number %d is not a 32-bit integer" % number)
if number < 0:
args[i] += 0x100000000
if len(args) == 1:
args = [args[0]] * n
return Constant(4 * n, n, tuple(args), uint32_t)
@staticmethod
def _float64xN(name, prename, n, *args):
args = [arg for arg in args if arg is not None]
if len(args) == 0:
raise ValueError("At least one constant value must be specified")
if len(args) != 1 and len(args) != n:
raise ValueError("Either 1 or %d values must be specified" % n)
args = [Constant._parse_float64(arg) for arg in args]
if len(args) == 1:
args = [args[0]] * n
return Constant(8 * n, n, tuple(args), double_)
@staticmethod
def _float32xN(name, prename, n, *args):
args = [arg for arg in args if arg is not None]
if len(args) == 0:
raise ValueError("At least one constant value must be specified")
if len(args) != 1 and len(args) != n:
raise ValueError("Either 1 or %d values must be specified" % n)
args = [Constant._parse_float32(arg) for arg in args]
if len(args) == 1:
args = [args[0]] * n
return Constant(4 * n, n, tuple(args), double_)
@staticmethod
def uint64(number, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint64")
return Constant._uint64xN(name, prename, 1, number)
@staticmethod
def uint64x2(number1, number2=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint64x2")
return Constant._uint64xN(name, prename, 2, number1, number2)
@staticmethod
def uint64x4(number1, number2=None, number3=None, number4=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint64x4")
return Constant._uint64xN(name, prename, 4, number1, number2, number3, number4)
@staticmethod
def uint64x8(number1, number2=None, number3=None, number4=None,
number5=None, number6=None, number7=None, number8=None,
name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint64x8")
return Constant._uint64xN(name, prename, 8,
number1, number2, number3, number4, number5, number6, number7, number8)
@staticmethod
def uint32(number, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint32")
return Constant._uint32xN(name, prename, 1, number)
@staticmethod
def uint32x2(number1, number2=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint32x2")
return Constant._uint32xN(name, prename, 2, number1, number2)
@staticmethod
def uint32x4(number1, number2=None, number3=None, number4=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint32x4")
return Constant._uint32xN(name, prename, 4, number1, number2, number3, number4)
@staticmethod
def uint32x8(number1, number2=None, number3=None, number4=None,
number5=None, number6=None, number7=None, number8=None,
name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint32x8")
return Constant._uint32xN(name, prename, 8,
number1, number2, number3, number4, number5, number6, number7, number8)
@staticmethod
def uint32x16(number1, number2=None, number3=None, number4=None,
number5=None, number6=None, number7=None, number8=None,
number9=None, number10=None, number11=None, number12=None,
number13=None, number14=None, number15=None, number16=None,
name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint32x16")
return Constant._uint32xN(name, prename, 16,
number1, number2, number3, number4, number5, number6, number7, number8,
number9, number10, number11, number12, number13, number14, number15, number16)
@staticmethod
def float64(number, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float64")
return Constant._float64xN(name, prename, 1, number)
@staticmethod
def float64x2(number1, number2=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float64x2")
return Constant._float64xN(name, prename, 2, number1, number2)
@staticmethod
def float64x4(number1, number2=None, number3=None, number4=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float64x4")
return Constant._float64xN(name, prename, 4, number1, number2, number3, number4)
@staticmethod
def float32(number, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float32")
return Constant._float32xN(name, prename, 1, number)
@staticmethod
def float32x2(number1, number2=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float32x2")
return Constant._float32xN(name, prename, 2, number1, number2)
@staticmethod
def float32x4(number1, number2=None, number3=None, number4=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float32x4")
return Constant._float32xN(name, prename, 4, number1, number2, number3, number4)
@staticmethod
def float32x8(number1, number2=None, number3=None, number4=None,
number5=None, number6=None, number7=None, number8=None,
name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float32x8")
return Constant._float32xN(name, prename, 8,
number1, number2, number3, number4, number5, number6, number7, number8)
@staticmethod
def _convert_to_float32(number):
import array
float_array = array.array('f', [number])
return float_array[0]
@staticmethod
def _parse_float32(number):
if isinstance(number, float):
number = float.hex(Constant._convert_to_float32(number))
elif isinstance(number, str):
# Validity check
try:
number = float.hex(Constant._convert_to_float32(float.fromhex(number)))
except ValueError:
raise ValueError("The string %s is not a hexadecimal floating-point number" % number)
else:
raise TypeError("Unsupported type of constant number %s" % str(number))
if number == "inf" or number == "+inf":
return 0x7F800000
elif number == "-inf":
return 0xFF800000
elif number == "nan":
return 0x7FC00000
is_negative = number.startswith("-")
point_position = number.index('.')
exp_position = number.rindex('p')
number_prefix = number[int(is_negative):point_position]
assert number_prefix == '0x0' or number_prefix == '0x1'
mantissa = number[point_position + 1:exp_position]
if number_prefix == '0x0' and int(mantissa) == 0:
# Zero
return int(is_negative) << 31
else:
exponent = number[exp_position + 1:]
mantissa_bits = len(mantissa) * 4
if mantissa_bits == 23:
mantissa = int(mantissa, 16)
elif mantissa_bits < 23:
mantissa = int(mantissa, 16) << (23 - mantissa_bits)
else:
mantissa = int(mantissa, 16) >> (mantissa_bits - 23)
exponent = int(exponent)
if exponent <= -127:
# Denormals
mantissa = (mantissa + (1 << 23)) >> -(exponent + 126)
exponent = -127
return mantissa + (int(exponent + 127) << 23) + (int(is_negative) << 31)
@staticmethod
def _parse_float64(number):
if isinstance(number, float):
number = float.hex(number)
elif isinstance(number, str):
# Validity check
try:
number = float.hex(float.fromhex(number))
except ValueError:
raise ValueError("The string %s is not a hexadecimal floating-point number" % number)
else:
raise TypeError("Unsupported type of constant number %s" % str(number))
if number == "inf" or number == "+inf":
return 0x7FF0000000000000
if number == "-inf":
return 0xFFF0000000000000
if number == "nan":
return 0x7FF8000000000000
is_negative = number.startswith("-")
point_position = number.index('.')
exp_position = number.rindex('p')
number_prefix = number[int(is_negative):point_position]
assert number_prefix == '0x0' or number_prefix == '0x1'
mantissa = number[point_position + 1:exp_position]
if number_prefix == '0x0':
# Zero
assert int(mantissa) == 0
return int(is_negative) << 63
else:
exponent = number[exp_position + 1:]
mantissa_bits = len(mantissa) * 4
if mantissa_bits == 52:
mantissa = int(mantissa, 16)
elif mantissa_bits < 52:
mantissa = int(mantissa, 16) << (52 - mantissa_bits)
else:
mantissa = int(mantissa, 16) >> (mantissa_bits - 52)
exponent = int(exponent)
if exponent <= -1023:
# Denormals
mantissa = (mantissa + (1 << 52)) >> -(exponent + 1022)
exponent = -1023
elif exponent > 1023:
# Infinity
mantissa = 0
exponent = 1023
return mantissa + (int(exponent + 1023) << 52) + (int(is_negative) << 63)
|
Why Comcast Business in Anacortes, Washington?
Get crystal-clear calling powered by our Gig-speed network. With advanced solutions that can grow with your Anacortes, Washington business, one-touch conference dialing, an easy-to-use mobile app, and reasonable monthly prices, you can finally go beyond the office — and we can go with you.
Stay connected to your business from anywhere. Like, Anacortes, Washington. Sharp hi-res images let you see what’s happening, day or night.
|
import socket, gourmet.threadManager, urllib.request, urllib.parse, urllib.error
from gettext import gettext as _
DEFAULT_SOCKET_TIMEOUT=45.0
URLOPEN_SOCKET_TIMEOUT=15.0
socket.setdefaulttimeout(DEFAULT_SOCKET_TIMEOUT)
class URLReader (gourmet.threadManager.SuspendableThread):
def __init__ (self, url):
self.url = url
gourmet.threadManager.SuspendableThread.__init__(
self,
name=_('Downloading %s'%url)
)
def do_run (self):
self.read()
def read (self):
message = _('Retrieving %s'%self.url)
socket.setdefaulttimeout(URLOPEN_SOCKET_TIMEOUT)
sock = urllib.request.urlopen(self.url)
socket.setdefaulttimeout(DEFAULT_SOCKET_TIMEOUT)
bs = 1024 * 8 # bite size...
# Get file size so we can update progress correctly...
self.content_type = None;
if hasattr(sock,'headers'):
fs = int(sock.headers.get('content-length',-1)) # file size..
self.content_type = sock.headers.get('content-type')
print('CONTENT TYPE = ',self.content_type)
else:
fs = -1
block = sock.read(bs)
self.data = block
sofar = bs
while block:
if fs>0:
self.emit('progress',float(sofar)/fs, message)
else:
self.emit('progress',-1, message)
sofar += bs
block = sock.read(bs)
self.data += block
sock.close()
self.emit('progress',1, message)
def read_socket_w_progress (sock, suspendableThread=None, message=None):
"""Read piecemeal reporting progress via our suspendableThread
instance (most likely an importer) as we go."""
if not suspendableThread:
data = sock.read()
else:
bs = 1024 * 8 # bite size...
if hasattr(sock,'headers'):
fs = int(sock.headers.get('content-length',-1)) # file size..
else: fs = -1
block = sock.read(bs)
data = block
sofar = bs
print("FETCHING:",data)
while block:
if fs>0:
suspendableThread.emit('progress',float(sofar)/fs, message)
else:
suspendableThread.emit('progress',-1, message)
sofar += bs
block = sock.read(bs)
data += block
print("FETCHED:",block)
sock.close()
print("FETCHED ",data)
print("DONE FETCHING")
suspendableThread.emit('progress',1, message)
return data
def get_url (url, suspendableThread):
"""Return data from URL, possibly displaying progress."""
if isinstance(url, str):
socket.setdefaulttimeout(URLOPEN_SOCKET_TIMEOUT)
sock = urllib.request.urlopen(url)
socket.setdefaulttimeout(DEFAULT_SOCKET_TIMEOUT)
return read_socket_w_progress(sock,suspendableThread,_('Retrieving %s'%url))
else:
sock = url
return read_socket_w_progress(sock,suspendableThread,_('Retrieving file'))
|
They say Christmas brings out the best – and the worst – in every family. Gainford Drama Club”s Autumn production is a timely reminder of the joys and sorrows of the festivities hovering on the horizon. The characters fight, bully, sulk, cry and get far too excited – and the children of the family never actually appear! Alan Ayckbourn”s popular portrayal of a classic family Christmas certainly reminded some members of the audience of Christmases Past, judging by the passing remarks and gales of laughter prompted by this highly enjoyable and well-executed production. The scene opened on the hallway of a contemporary home with tasteful colour-co-ordinated decorations and a huge pile of presents. Stage settings were created by John Lowery, Paul Richardson and Richard Stephenson, with excellent props and additional detail by Linda Lawrence and Joan White. The sometimes complex sound and lighting were ably managed by the above-named John and Paul assisted by Will Barker, Kathy Kerr and Harry Robinson. It is the night before Christmas and gadget-obsessed Neville Bunker, played by Paul Illingworth has again forgotten to buy his wife a present. Sister-in-law Rachel (Louise Stephenson) is convincing in her fretting about her love life, and bombastic Uncle Harvey (Barrington Wearmouth) is more eccentric than ever, obsessed with giving the children guns for Christmas and sure that any outsider is a thief. His interjections castigating everyone”s efforts are timed to perfection. Neville”s friend and business associate Eddie (Lawrence Chandler) is avoiding his parental duties as usual, while in the kitchen, Auntie Phyllis (Jean McCann) is drunk in charge of their dinner. Phyllis”s inebriation is amiably apparent without being overdone. Rachel”s new love interest, writer Clive Morris (James Glendinning) arrives to stay and is immediately smitten by Belinda (Karen Hawley); Rachel”s sister and the neglected wife of Neville. The result of this eclectic mix is midnight assignations, malfunctioning technical gadgets, drunken board games and a puppet show laid on by the hapless Uncle Bernard, superbly acted by John Robinson. His scene with the Three Little Pigs, ably assisted by pregnant Pattie played histrionically by new member Kathrin Glendinning is hilarious. Diana Peat and Pamela Westgarth shared the directing and succeeded in producing some great character acting and confident performances from all concerned. James Glendinning is also a welcome addition in his debut performance with the company, caught up unresistingly with the action. The whole production will provide the audience with an all-too-familiar preview of a family Christmas, at least, we hope, until the final scene…….
|
# thesquirrel.org
#
# Copyright (C) 2015 Flying Squirrel Community Space
#
# thesquirrel.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
# thesquirrel.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with thesquirrel.org. If not, see <http://www.gnu.org/licenses/>.
import os
from django.conf import settings
from django.http import Http404, HttpResponse
from mediabuilder import bundles
def check_source_path(bundle, path):
"""Check JS Source paths
This method will raise Http404 if path is not a source path any of the
bundle.
"""
if not os.path.exists(path):
raise Http404()
for source_path in bundle.source_paths():
if os.path.samefile(path, source_path):
return
raise Http404()
def js_source(request, bundle_name, path):
bundle = bundles.JSBundle.get_bundle(bundle_name)
check_source_path(bundle, path)
path = os.path.join(settings.BASE_DIR, path)
return HttpResponse(open(path).read(),
content_type='application/javascript')
def sass_source(request, bundle_name):
# We can't just send the SASS source to the browser, so we build it here
# and output it.
bundle = bundles.SassBundle.get_bundle(bundle_name)
return HttpResponse(bundle.build_content('nested'),
content_type='text/css')
|
Stitch can replicate data from all your sources (including Iterable) to a central warehouse. From there, it's easy to use Periscope Data to perform the in-depth analysis you need.
Integrate Iterable and Periscope Data to turn your data into actionable insights.
|
"""
PyRATBridge
===========
This Plugin imports the functionality of PyRAT into QGIS
:author: Felix Weinmann <felix.weinmann@dlr.de>
"""
from qgis.core import QgsTask, QgsTaskManager, Qgis, QgsProject
from qgis.PyQt.QtWidgets import QAction, QFileDialog, QInputDialog, QDockWidget
from qgis.PyQt.QtCore import Qt
from qgis.utils import iface
import copy
import numpy as np
from os import path
try:
import pyrat
from pyrat.viewer.Dialogs import FlexInputDialog, LayerTreeWidget
pyratImport = True
except ImportError:
pyratImport = False
qgis_types = {
0: None, # UnknownDataType
1: "int8",
2: "uint16",
3: "int16",
4: "uint32",
5: "int32",
6: "float32",
7: "float64",
8: None, # CInt16
9: None, # CInt32
10: "complex32",
11: "complex64",
12: None, # ARGB32. Color, alpha, red, green, blue
13: None, # ARGB32_Premultiplied alpha, red, green, blue
}
class PyRATBridge:
"""This is the main plugin class for GUI and the connection to PyRAT"""
def __init__(self):
self.taskManager = QgsTaskManager()
if pyratImport:
pyrat.viewer.GenPyramid = GenPyramidInterface
pyrat.tools.ProgressBar = ProgressBarToQGIS
def unload(self):
"""Cleanup when disabling the plugin"""
if pyratImport:
PyRATBridge.clearPyRAT()
self.pyratMenu.clear()
iface.removeDockWidget(self.pyratLayerTree)
self.taskManager.cancelAll()
ViewerToQGISInterface.display.clear()
def addMenuEntry(self, pyratTool):
"""Adds a PyRAT Tool to the QGIS-Menu"""
menus = pyratTool.gui['menu'].split('|')
submenu = self.pyratMenu
for menu in menus:
if menu not in [action.text() for action in submenu.actions()]:
submenu = submenu.addMenu(menu)
else:
submenu = [action.menu() for action in submenu.actions() if
action.text() == menu][0]
action = QAction(pyratTool.gui['entry'], iface.mainWindow())
action.triggered.connect(lambda:
PyRATBridge.menuAction(self, pyratTool))
submenu.addAction(action)
def initGui(self):
"""Initalise the Plugin-UI"""
if not pyratImport:
iface.messageBar().pushMessage("PyRAT not found!",
level=Qgis.Critical)
return
if 'PyRAT' not in [action.text() for action in
iface.mainWindow().menuBar().actions()]:
self.pyratMenu = iface.mainWindow().menuBar().addMenu('PyRAT')
else:
self.pyratMenu = [action.menu() for action in
iface.mainWindow().menuBar().actions() if
action.text() == 'PyRAT'][0]
action = QAction("Layer2PyRAT", iface.mainWindow())
action.triggered.connect(PyRATBridge.layerToPyrat)
self.pyratMenu.addAction(action)
action = QAction("PyRAT2Layer", iface.mainWindow())
action.triggered.connect(PyRATBridge.pyratToLayer)
self.pyratMenu.addAction(action)
action = QAction("Cleanup PyRAT", iface.mainWindow())
action.triggered.connect(PyRATBridge.clearPyRAT)
self.pyratMenu.addAction(action)
action = QAction("Show PyRAT GUI", iface.mainWindow())
action.triggered.connect(self.showPyrat)
self.pyratMenu.addAction(action)
self.pyratMenu.addSeparator()
# Init PyRAT-Tools, adapted from pyrat.viewer for qgis
from inspect import getmembers, isclass
modules = [pyrat.load, pyrat.save, pyrat.transform, pyrat.filter,
pyrat.polar, pyrat.insar, pyrat.plugins, pyrat.viewer]
for current_module in modules:
modules = getmembers(current_module, isclass)
for mod in modules:
if issubclass(mod[1], pyrat.Worker):
plugin = mod[1]
if(hasattr(plugin, 'gui') and
plugin.gui['entry'] != "Python console"):
self.addMenuEntry(plugin)
self.pyratLayerTree = QDockWidget("PyRAT Layers", iface.mainWindow())
PyRATBridge.layerTreeWidget = LayerTreeWidget(
parent=self.pyratLayerTree,
viewer=ViewerToQGISInterface)
self.pyratLayerTree.setObjectName("PyRAT Layers")
self.pyratLayerTree.setWidget(PyRATBridge.layerTreeWidget)
iface.addDockWidget(Qt.LeftDockWidgetArea, self.pyratLayerTree)
def menuAction(self, pyratTool):
"""Start pyratTool after Menu-Click"""
para_backup = copy.deepcopy(pyratTool.para)
if 'name' not in dir(pyratTool):
pyratTool.name = pyratTool.__name__
if len(pyratTool.para) > 0:
if pyratTool is pyrat.load.FSAR:
dlg = pyrat.load.FsarImportWidget(parent=iface.mainWindow())
dlg.update()
elif pyratTool is pyrat.load.ESAR:
dlg = pyrat.load.EsarImportWidget(parent=iface.mainWindow())
dlg.update()
elif pyratTool is pyrat.load.UAVSAR:
dlg = pyrat.load.UAVSARImportWidget(parent=iface.mainWindow())
dlg.update()
else:
dlg = FlexInputDialog(pyratTool.para,
parent=iface.mainWindow(),
title=pyratTool.name,
doc=pyratTool.__doc__)
if len(pyratTool.para) == 0 or dlg.exec_() == 1:
task = PyRATTask(pyratTool, para_backup)
self.taskManager.addTask(task)
def layerToPyrat():
"""Imports a QGIS-Layer into PyRAT"""
layers = list()
for layer in QgsProject.instance().layerTreeRoot().layerOrder():
# 1: QgsMapLayer.LayerType.RasterLayer
if layer.type() == 1:
layers.append(layer.name())
layername, s = QInputDialog.getItem(
iface.mainWindow(),
"Select a layer",
"Select a layer to export to PyRAT:",
layers,
editable=False)
if not s:
return
layer = QgsProject.instance().mapLayersByName(layername)[0]
dataProv = layer.dataProvider()
extent = dataProv.extent()
rows = layer.height()
cols = layer.width()
block = dataProv.block(1, extent, cols, rows)
arr = np.frombuffer(block.data(),
dtype=qgis_types[block.dataType()]
).reshape((rows, cols))
pyratlayer = pyrat.adddata(arr)
# Add metadata to the PyRAT-Layer
description = layer.crs().description()
meta = {"info": layer.name(),
"geo_min_east": extent.xMinimum(),
# Subtract 1 due to QGIS inclusive minimum
"geo_min_north": extent.yMinimum() - 1,
"geo_ps_east": layer.rasterUnitsPerPixelX(),
"geo_ps_north": layer.rasterUnitsPerPixelY()}
if description.startswith('WGS 84 / UTM zone '):
zone = int(description[:-1].rsplit(" ", 1)[1])
if description[-1] == "S":
zone = -zone
meta["geo_projection"] = 1
meta["geo_zone"] = zone
pyrat.setmeta(meta)
ViewerToQGISInterface.display[pyratlayer] = {'scaling': 'min->max',
'bwlayer': pyratlayer,
'colour': False}
PyRATBridge.layerTreeWidget.redraw()
def pyratToLayer(layerid=None):
"""Exports a PyRAT-layer into QGIS"""
if type(layerid) is str:
pyrat.data.activateLayer(layerid)
annotation = pyrat.data.getAnnotation()
if 'info' in annotation:
filename = path.join(pyrat.data.tmpdir, annotation['info'] +
".rat")
else:
filename = path.join(pyrat.data.tmpdir, "PyRAT.rat")
filename, s = QFileDialog.getSaveFileName(
iface.mainWindow(),
"Save the PyRAT-Layer",
filename,
"RAT-File (*.rat)")
if not s or filename == "":
return
pyrat.save.rat((filename, "rat"), geo_envi_hdr=True)
iface.addRasterLayer(filename, path.basename(filename).split(".")[0])
def showPyrat(self):
pyrat.show()
def clearPyRAT():
pyrat.pyrat_reset()
ViewerToQGISInterface.display.clear()
PyRATBridge.layerTreeWidget.redraw()
class ViewerToQGISInterface:
"""This Class is a 'viewer' for pyrats LayerTree Widget shown in QGIS"""
config = {'colour': False, 'bwlayer': "/Undefined",
'rgblayer': (None, None, None)}
display = {}
def updateViewer(layer=None):
pass
class GenPyramidInterface:
"""
This class replaces pyrat.viewer.GenPyramid to disable
the scaling method options in the LayerTree Widget in QGIS
"""
def __init__(self, layer=None, force=None, mode=None):
pass
def run(self):
pass
class ProgressBarToQGIS:
"""
Disables the ProgressBar to prevent crashes with opened QGIS Python Console
"""
def __init__(self, message, max, width=None):
pass
def __del__(self):
pass
def update(self, val):
pass
class PyRATTask(QgsTask):
"""This class handles the async execution of a PyRAT-Tool"""
def __init__(self, pyratTool, para_backup):
QgsTask.__init__(self)
self.pyratTool = pyratTool
self.para_backup = para_backup
self.failed = False
self.guionly = False
self.layer = None
self.existinglayers = list()
def run(self):
"""The async executed code"""
self.plugin = self.pyratTool()
self.plugin.crash_handler = self.crashHandler
self.existinglayers = pyrat.data.getLayerIDs()
self.layer = self.plugin.run()
setattr(self.pyratTool, 'para', self.para_backup)
return self.layer is not False
def crashHandler(self, ex):
"""
Overrides the PyRAT crash handler to prevent
the termination of QGIS
"""
try:
raise ex
except AttributeError:
# Gui-only Plugins
self.guionly = True
except Exception:
self.failed = True
raise ex
def finished(self, result):
"""
This function is threadsafe for GUI-Actions and
called after run terminates.
"""
if self.guionly:
self.pyratTool.guirun(iface.mainWindow())
if result and not self.failed:
iface.messageBar().pushMessage(self.pyratTool.name + " finished.",
level=Qgis.Success)
for layer in [newlayer for newlayer in pyrat.data.getLayerIDs()
if newlayer not in self.existinglayers]:
# Show the generated Layer(s) in QGIS
anno = pyrat.data.getAnnotation(layer=layer)
if 'info' not in anno:
anno['info'] = "Pyrat-Layer " + layer
pyrat.data.setAnnotation({'info': anno['info'] + "-" +
self.pyratTool.name},
layer=layer)
ViewerToQGISInterface.display[layer] = {'scaling': 'min->max',
'bwlayer': layer,
'colour': False}
PyRATBridge.pyratToLayer(self.layer)
PyRATBridge.layerTreeWidget.redraw()
else:
iface.messageBar().pushMessage(self.pyratTool.name +
" failed. Look in the (system)" +
" console for more information.",
level=Qgis.Critical)
del self.plugin
|
Antal Organizers – East Marine Asia, marine parts Thailand & chandlery in Phuket, Bangkok, Pattaya, Krabi, Langkawi. Boating & yacht equipment, supplies, accessories.
The 40 and 50 mm sheaves are manufactured in high-strength resin with a double side ball bearing. The 60 mm sheave is manufactured in aluminium, hard black anodized, with a main composite fibre bearing and 2 side ball bearings.
These new organizers do not disassemble after tacking off the screws.
Double version also available, just add /D to the model number.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import nose
from voting_bot import VotingBot
class VotingBotTest(unittest.TestCase):
def setUp(self):
self.vb = VotingBot
def tearDown(self):
del self.vb
def test_parse_public_message(self):
# voting topic
content = "votingbot new poll\none\ntwo\nthree"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title = "topic", "new poll"
e_arg = ["one", "two", "three"]
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll: one, two, three"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title = "topic", "new poll"
e_arg = ["one", "two", "three"]
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll one, two, three"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title = "topic", "new poll"
e_arg = ["one", "two", "three"]
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll one"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title = "topic", "new poll"
e_arg = ["one"]
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
# new option for existing topic
content = "votingbot new poll\nadd: four"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "option", "new poll", "Four"
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll: add: four"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "option", "new poll", "Four"
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll: add four"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "option", "new poll", "Four"
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll add four"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "option", "new poll", "Four"
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll ADD four"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "option", "new poll", "Four"
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
# vote
content = "votingbot new poll\n1"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "vote", "new poll", 1
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll: 1"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "vote", "new poll", 1
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll 1"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "vote", "new poll", 1
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
# results
content = "votingbot new poll\nresults"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "results", "new poll", None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll: results"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "results", "new poll", None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll results"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "results", "new poll", None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll RESULTS"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "results", "new poll", None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
# help
content = "votingbot\nhelp"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "help", None, None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot: help"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "help", None, None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot help"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "help", None, None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot HELP"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "help", None, None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
# @unittest.skip("need a debug_run.py module to run this test")
# class VotingBotIntegrationTest(unittest.TestCase):
# """Integration test for VotingBot.
# It runs a test instance of the bot configured by a debug_run module not
# included in GitHub because it must contain credentials. A template of
# a debug_run for VotingBot is provided instead.
# """
# @classmethod
# def setUpClass(cls):
# from debug_run import get_voting_bot
# cls.vb = get_voting_bot()
# cls.vb.main()
# @classmethod
# def tearDownClass(cls):
# pass
# @unittest.skip("need a debug_run.py module to run this test")
# def test_complete_voting_process(self):
# stream = "test-bot"
# subject = "votingbot tests"
if __name__ == '__main__':
nose.run(defaultTest=__name__)
|
Doggie Health Care » We Need To Knead Our Dogs!
We Need To Knead Our Dogs!
Most of us have had a nice, relaxing therapeutic massage at least one time in our lives, I would guess. If you have, then you will agree that it relaxes muscle tension, relieves stress, and just makes you feel good. For more than 120 years, research in massage therapy has been ongoing. However, no one needs to do a study to determine that animals benefit from massage the same way humans do.
Not only will a pampering, rejuvenating, therapeutic massage relieve our stress, it will help your anxiety filled pooch relax too. Dogs actually do have stress, believe it or not. Many owners have found out the hard way that their pooch has separation anxiety, probably after returning home to what looks like a “war zone”. Thunderstorms are another trigger, but a dog with temperament problems would surely gain from a rub down, especially if it comes from “their human”.
Dogs can get strained and sore muscles for a number of reasons, but often it is result of inactivity due to injury, illness, age or obesity. Properly applied massage techniques will help to tone the muscles of an inactive pooch, as well as ease the pain and decrease the ache of both arthritis and hip-dysplasia. Massage helps decrease swelling and accelerate the healing process by stimulating blood circulation and lymph flow. A massage will actually boost your dogs internal health, as well.
Massage therapy would not be recommended in some instances though, such as infection, open wounds, fever, shock, fractures or lumps. There are certified dog massage practitioners who are trained in dog anatomy and can apply the correct techniques without risking your pups health. To reap the maximum benefits special training is required, but there are simple techniques that you can do at home for your dog. Not only will you be able to spend quality time with your pup while you give him a rub down, but you will be able to spot situations on your dog’s body that you may not have caught, otherwise. So go ahead and show your dog how much you “knead him”, he will love you for it!
|
from attributes import *
from constants import *
# ------------------------------------------------------------------------------
#
class UnitManager (Attributes) :
"""
UnitManager class -- manages a pool
"""
# --------------------------------------------------------------------------
#
def __init__ (self, url=None, scheduler='default', session=None) :
Attributes.__init__ (self)
# --------------------------------------------------------------------------
#
def add_pilot (self, pid) :
"""
add (Compute or Data)-Pilot(s) to the pool
"""
raise Exception ("%s.add_pilot() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def list_pilots (self, ptype=ANY) :
"""
List IDs of data and/or compute pilots
"""
raise Exception ("%s.list_pilots() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def remove_pilot (self, pid, drain=False) :
"""
Remove pilot(s) (does not cancel the pilot(s), but removes all units
from the pilot(s).
`drain` determines what happens to the units which are managed by the
removed pilot(s). If `True`, the pilot removal is delayed until all
units reach a final state. If `False` (the default), then `RUNNING`
units will be canceled, and `PENDING` units will be re-assinged to the
unit managers for re-scheduling to other pilots.
"""
raise Exception ("%s.remove_pilot() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def submit_unit (self, description) :
"""
Instantiate and return (Compute or Data)-Unit object(s)
"""
raise Exception ("%s.submit_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def list_units (self, utype=ANY) :
"""
List IDs of data and/or compute units
"""
raise Exception ("%s.list_units() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def get_unit (self, uids) :
"""
Reconnect to and return (Compute or Data)-Unit object(s)
"""
raise Exception ("%s.get_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def wait_unit (self, uids, state=[DONE, FAILED, CANCELED], timeout=-1.0) :
"""
Wait for given unit(s) to enter given state
"""
raise Exception ("%s.wait_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def cancel_units (self, uids) :
"""
Cancel given unit(s)
"""
raise Exception ("%s.cancel_unit() is not implemented" % self.__class__.__name__)
# ------------------------------------------------------------------------------
#
|
Shop at SubscriptionSave,Then you will save more money.
Please Never miss out this SubscriptionSave, Get the code to get your money off at SubscriptionSave.
You can Apply this SubscriptionSave coupon during checkout at SubscriptionSave and save more money.
It is easy to claim an amazing discount on sales items at SubscriptionSave. Just copy the code and apply it during check out.
The best offers of SubscriptionSave are ready.Time waits for no one.
Take advantage of this amazing SubscriptionSave coupons to enjoy big savings.
If you want to shop at SubscriptionSave, then never miss out this offer with Up to 60% off sale items to save your money.
US Next Day Delivery. Shop the entire store and chose from SubscriptionSave.Go Shopping it before it expired.
You will Enjoy hot savings by using this SubscriptionSave at checkout to receive 15% off all Orders.
Get The Best Free SubscriptionSave Coupons!
34 hottest SubscriptionSave coupon codes and sales in April 2019 are here for you. Well, today's star coupon is 65% off. Want more choices of discounts, have a little check on Couponbind! Imputing the SubscriptionSave promo code at the checkout with just a few clicks, you can enjoy your shopping more to save much more money without any difficulties. It is indeed that easy! Now, check SubscriptionSave coupons to get more surprises!
Get the Best Free SubscriptionSave Promo Codes!
|
import json,os, urllib2, uncurl, requests, pymongo, time
from clint.textui import colored
def get_season_results(team):
seasons = ['2012','2013','2014', '2015']
for year in seasons:
seasonsquery="https://erikberg.com/nba/results/"+team+".json?season="+year
print colored.yellow("[*] Getting "+seasonsquery)
r = requests.get(seasonsquery,
headers={
"Authorization": "",
'User-Agent': 'python test',
'From': ''
},
cookies={},
)
for item in r.json():
seasonresults.insert(item)
print colored.green("[+] "+team+year+" complete")
def get_team_stats(team):
teamquery = "https://erikberg.com/nba/team-stats.json?team_id="+team
print colored.yellow("[*] Getting "+teamquery)
r = requests.get(teamquery,
headers={
"Authorization": "",
'User-Agent': '',
'From': 'm'
},
cookies={},
)
teamstats.insert(r.json())
print colored.green("[+] " + teamquery+" complete")
def get_box_score(eventid):
print colored.yellow("[*] Fetching "+ eventid)
boxquery="https://erikberg.com/nba/boxscore/"+eventid+".json"
r = requests.get(boxquery,
headers={
"Authorization": "",
'User-Agent': '',
'From': ''
},
cookies={},
)
print r.headers
boxresult = r.json()
boxresult['eventkey']=eventid
boxscores.insert(boxresult)
print colored.green("[+] "+eventid+" complete.")
if __name__ == '__main__':
client = pymongo.MongoClient('localhost',27017)
db = client.nba
seasonresults = db.seasonresults
teamstats = db.teamstats
boxscores= db.boxscores
teamlist = ["atlanta-hawks", "boston-celtics", "brooklyn-nets", "charlotte-hornets", "chicago-bulls", "cleveland-cavaliers", "dallas-mavericks", "denver-nuggets", "detroit-pistons", "golden-state-warriors", "houston-rockets", "indiana-pacers", "los-angeles-clippers", "los-angeles-lakers", "memphis-grizzlies", "miami-heat", "milwaukee-bucks", "minnesota-twins", "new-orleans-pelicans", "new-york-knicks", "oklahoma-city-thunder", "orlando-magic", "philadelphia-76ers", "phoenix-suns", "portland-trail-blazers", "sacramento-kings", "san-antonio-spurs", "toronto-raptors", "utah-jazz", "washington-wizards"]
#for team in teamlist:
#get_season_results(team)
#get_team_stats(team)
# time.sleep(20)
#for eventid in seasonresults.distinct("event_id"):
for eventid in open('schedule-ids.txt').readlines():
get_box_score(eventid.rstrip())
time.sleep(12)
print colored.green("[+] Fetching complete.")
|
Can I disable or deactivate an active HitPay Checkout?
Can I import products from my existing e-commerce platform?
What does archiving a live product mean?
How can I fulfil an order using HitPay Checkouts?
Can I edit an active product?
Can a customer checkout multiple products from the same merchant or seller?
How can customers track their order and shipping status?
|
"""
This module defines customised widgets for use with forms in this application.
"""
from django.forms.widgets import CheckboxInput, MultiWidget, NumberInput, Select, TextInput
__all__ = []
__author__ = "Michael Winter (mail@michael-winter.me.uk)"
__license__ = """
Biological Dataset Repository: data archival and retrieval.
Copyright (C) 2015 Michael Winter
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# noinspection PyAbstractClass
# The render method is not abstract.
class SelectableTextInput(MultiWidget):
"""
This widget combines a text box with a checkbox to create an optional
input.
If the associated checkbox is deselected when the form is submitted, the
textbox value is ignored.
This widget is intended to be used with the Bootstrap CSS framework. The
two controls are rendered as an input group with the checkbox integrated in
the `:before` position (typically the left-hand side of the text box).
"""
selection_widget = CheckboxInput
value_widget = TextInput
def __init__(self, attrs=None):
super(SelectableTextInput, self).__init__([self.selection_widget, self.value_widget], attrs)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The first element is the state of the checkbox. The second element is
the value of the text box; this will be `None` if the checkbox was
deselected.
:param value: A compressed value to be represented by this widget.
:type value: str | unicode
:return: The decompressed interpretation of the value.
:rtype: list of (bool, str | unicode)
"""
if value:
return [True, value]
return [False, None]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
:param rendered_widgets: A list of widgets rendered in HTML.
:type rendered_widgets: list of unicode
:return: A HTML string combining each widget.
:rtype: unicode
"""
return u"""
<div class="input-group">
<span class="input-group-addon">{0}</span>
{1}
</div>
""".format(*rendered_widgets)
# noinspection PyAbstractClass
# Base class implements the render method
class ScaledNumberInput(MultiWidget):
"""
This widget combines a text box with a select menu to enable the user to
specify values at different scales.
The widget normalises the value according to the factor associated with
each scale.
This widget is intended to be used with the Bootstrap CSS framework.
"""
def __init__(self, choices, default, attrs=None):
self._choices = list(choices)
self._default = default
widgets = (NumberInput, Select(choices=choices))
super(ScaledNumberInput, self).__init__(widgets, attrs)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The first element is the numeric value. The second element is the scale
type.
:param value: A compressed value to be represented by this widget.
:type value: str | unicode
:return: The decompressed interpretation of the value.
:rtype: list of (bool, str | unicode)
"""
if value is None or value == 0:
return 0, self._default
for factor, _ in sorted(self._choices, key=lambda x: x[0], reverse=True):
if value % factor == 0:
return [value / factor, factor]
return [value, self._default]
def value_from_datadict(self, data, files, name):
"""
Return the normalised value of this widget derived from the submitted
data dictionaries.
:param data: A dictionary of strings submitted by the user via a form.
:type data: dict of (str | unicode)
:param files: A dictionary of files uploaded by the user.
:type files: dict of str
:param name: The key name of this widget.
:type name: str
:return: The value of this widget.
:rtype: str | unicode
"""
number, interval_type = super(ScaledNumberInput, self).value_from_datadict(data, files, name)
return int(float(number) * float(interval_type))
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
:param rendered_widgets: A list of widgets rendered in HTML.
:type rendered_widgets: list of unicode
:return: A HTML string combining each widget.
:rtype: unicode
"""
return u"""
<div class="row">
<div class="col-sm-6">
{0}
</div>
<div class="col-sm-6">
{1}
</div>
</div>
""".format(*rendered_widgets)
# noinspection PyAbstractClass
# Base class implements the render method
class ComboTextInput(MultiWidget):
"""
This widget combines a select menu with a text box to create a list of
suggested values and the ability to define a custom value.
This widget is intended to be used with the Bootstrap CSS framework.
"""
def __init__(self, choices, default="", attrs=None):
if attrs is None:
attrs = {}
attrs["data-type"] = "combobox"
self._choices = choices
self._default = default
super(ComboTextInput, self).__init__([Select(choices=self._choices), TextInput], attrs)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The first element is the selected, suggested value. The second element
is the customised value.
:param value: A compressed value to be represented by this widget.
:type value: str | unicode
:return: The decompressed interpretation of the value.
:rtype: list of (bool, str | unicode)
"""
if value is None:
return [self._default, ""]
if value == "":
return ["None", ""]
for val, txt in self._choices:
if value == val:
return [value, ""]
return ["", value]
def value_from_datadict(self, data, files, name):
"""
Return the value of this widget derived from the submitted data
dictionaries.
:param data: A dictionary of strings submitted by the user via a form.
:type data: dict of (str | unicode)
:param files: A dictionary of files uploaded by the user.
:type files: dict of str
:param name: The key name of this widget.
:type name: str
:return: The value of this widget.
:rtype: str | unicode
"""
suggested, custom = super(ComboTextInput, self).value_from_datadict(data, files, name)
value = suggested if suggested != "" else custom
return value if value != "None" else None
class Media(object):
"""
Declares resources that should be included when this form is displayed.
"""
js = ("bdr/js/combo.js",)
|
"Well it's true, I push too hard I guess To use whatever fuel is left At it's best it's all the art of doubt"
JULY TALK was amazing. ZOE was great. METRIC was incredible. Can’t wait to see them again at some point. Venue was nice. I heard that they ran out of beer, but the music was just fantastic.
|
#!python
# coding=utf-8
from pocean.cf import CFDataset
from pocean import logger
class ContiguousRaggedTimeseries(CFDataset):
@classmethod
def is_mine(cls, dsg):
try:
rvars = dsg.filter_by_attrs(cf_role='timeseries_id')
assert len(rvars) == 1
assert dsg.featureType.lower() == 'timeseries'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
o_index_vars = dsg.filter_by_attrs(
sample_dimension=lambda x: x is not None
)
assert len(o_index_vars) == 1
assert o_index_vars[0].sample_dimension in dsg.dimensions # Sample dimension
# Allow for string variables
rvar = rvars[0]
# 0 = single
# 1 = array of strings/ints/bytes/etc
# 2 = array of character arrays
assert 0 <= len(rvar.dimensions) <= 2
except AssertionError:
return False
return True
def from_dataframe(self, df, variable_attributes=None, global_attributes=None):
variable_attributes = variable_attributes or {}
global_attributes = global_attributes or {}
raise NotImplementedError
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True):
# if df is None:
# df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows)
raise NotImplementedError
def to_dataframe(self):
raise NotImplementedError
|
Upland | Waterfowl | Turkey Hunting — Bespoke Sporting, Ltd.
Northern Michigan is blessed with an abundance of habitat for good bird hunting. Whether it is hunting quail or chukars on our 176-acre shooting grounds or chasing grouse/woodcock, waterfowl or turkey, we have you covered. We offer guided hunts for upland and waterfowl in the Fall and wild turkey in the Spring. For more information, please click here.
Fall in the north woods, is something to be experienced. On top of that, being able to chase grouse and woodcock for the day is the cherry on top! Join us for a day of hunting local coverts chasing the King of the Gamebirds! Our pointing and flushing dogs will have you in the thick and on the birds! You won't forget this day for a while. You even get a field lunch of yak burger from our family ranch!
While northern Michigan is not the mecca of waterfowl hunting, there are uique and enjoyable ways to chase ducks and geese here in our paradise. Join us for a half or full day hunt on the river, in the orchards/vineyards or in the field.
You probably travel to northern Michigan to sip wine or visit local breweries but did you ever consider coming up in late April or May to chase toms in those same vinyards? For an enjoyable alternative while your significant other rests up before wine tasting, give us a call!
|
"""
QUESTION:
Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).
For example:
Given binary tree {3,9,20,#,#,15,7},
3
/ \
9 20
/ \
15 7
return its bottom-up level order traversal as:
[
[15,7],
[9,20],
[3]
]
ANSWER:
dfs, bfs
"""
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
res = []
if not root:
return res
queue = [root]
while len(queue):
ll = [] #last level
cur = []
for q in queue:
if q.left: ll.append(q.left)
if q.right: ll.append(q.right)
cur.append(q.val)
res = [cur] + res
queue = ll
return res
def levelOrder_2(self, root):
if not root:
return []
queue = [root]
res = []
last = 0
while last != len(queue):
n = len(queue)-last
cur = []
for i in xrange(n):
node = queue[last]
last += 1
cur.append(node.val)
if node.left: queue.append(node.left)
if node.right: queue.append(node.right)
res = [cur] + res
return res
def levelOrder_3(self, root):
def dfs(root,level):
if root == None:
return
if len(res) <= level:
res.insert(0,[root.val])
else:
res[len(res)-level-1].append(root.val)
dfs(root.left,level+1)
dfs(root.right,level+1)
res = []
dfs(root,0)
return res
if __name__ == '__main__':
print
|
Welcome to Eerikkilä! We provide great surroundings and facilities for leisure time, company events and top-athlete training.
Sami Hyypiä Academy and Floorball Academy.
Floorball Academy (FBA) concentrates on the comprehensive development of floorball in accordance with the requirements of top-level sports, from childhood to adulthood. Learn more about progression follow up events, training camp facilities and floorball schools.
|
import unicodedata
import difflib
import datetime
import regex
import requests
import pytz
from pynab.db import db_session, Release, Movie, MetaBlack, Category, DataLog, windowed_query
from pynab import log
import config
PROCESS_CHUNK_SIZE = 500
OMDB_SEARCH_URL = 'http://www.omdbapi.com/?s='
OMDB_DETAIL_URL = 'http://www.omdbapi.com/?i='
def process(limit=None, online=True):
"""Process movies without imdb data and append said data."""
expiry = datetime.datetime.now(pytz.utc) - datetime.timedelta(config.postprocess.get('fetch_blacklist_duration', 7))
with db_session() as db:
# clear expired metablacks
db.query(MetaBlack).filter(MetaBlack.movie != None).filter(MetaBlack.time <= expiry).delete(
synchronize_session='fetch')
query = db.query(Release).filter(Release.movie == None).join(Category).filter(Category.parent_id == 2000)
if online:
query = query.filter(Release.movie_metablack_id == None)
query = query.order_by(Release.posted.desc())
if limit:
releases = query.limit(limit)
else:
releases = windowed_query(query, Release.id, PROCESS_CHUNK_SIZE)
for release in releases:
name, year = parse_movie(release.search_name)
if name and year:
method = 'local'
imdb = db.query(Movie).filter(
Movie.name.ilike('%'.join(clean_name(name).split(' ')))
).filter(Movie.year == year).first()
if not imdb and online:
method = 'online'
movie = search(clean_name(name), year)
if movie and movie['Type'] == 'movie':
imdb = db.query(Movie).filter(Movie.id == movie['imdbID']).first()
if not imdb:
imdb = Movie()
imdb.id = movie['imdbID']
imdb.name = movie['Title']
imdb.year = movie['Year']
db.add(imdb)
if imdb:
log.debug('imdb: [{}] - [{}] - movie data added: {}'.format(
release.id,
release.search_name,
method
))
release.movie = imdb
release.movie_metablack_id = None
db.add(release)
elif not imdb and online:
log.debug('imdb: [{}] - movie data not found: online'.format(
release.search_name
))
mb = MetaBlack(status='ATTEMPTED', movie=release)
db.add(mb)
else:
log.debug('imdb: [{}] - [{}] - movie data not found: local'.format(
release.id,
release.search_name
))
else:
log.debug('imdb: [{}] - [{}] - movie data not found: no suitable regex for movie name'.format(
release.id,
release.search_name
))
db.add(MetaBlack(status='IMPOSSIBLE', movie=release))
db.add(DataLog(description='imdb parse_movie regex', data=release.search_name))
db.commit()
def search(name, year):
"""Search OMDB for a movie and return the IMDB ID."""
# if we managed to parse the year from the name
# include it, since it'll narrow results
if year:
year_query = '&y={}'.format(year.replace('(', '').replace(')', ''))
else:
year_query = ''
data = {}
try:
r = requests.get(OMDB_SEARCH_URL + name + year_query)
data = r.json()
except:
log.critical('There was a problem accessing the IMDB API page.')
return None
if 'Search' in data:
for movie in data['Search']:
# doublecheck, but the api should've searched properly
ratio = difflib.SequenceMatcher(None, clean_name(name), clean_name(movie['Title'])).ratio()
if ratio > 0.8 and year == movie['Year'] and movie['Type'] == 'movie':
return movie
def get_details(id):
r = requests.get(OMDB_DETAIL_URL + id)
data = r.json()
if 'Response' in data:
imdb = {
'_id': data['imdbID'],
'title': data['Title'],
'year': data['Year'],
'genre': data['Genre'].split(',')
}
return imdb
else:
return None
def parse_movie(search_name):
"""Parses a movie name into name / year."""
result = regex.search('^(?P<name>.*)[\.\-_\( ](?P<year>19\d{2}|20\d{2})', search_name, regex.I)
if result:
result = result.groupdict()
if 'year' not in result:
result = regex.search(
'^(?P<name>.*)[\.\-_ ](?:dvdrip|bdrip|brrip|bluray|hdtv|divx|xvid|proper|repack|real\.proper|sub\.?fix|sub\.?pack|ac3d|unrated|1080i|1080p|720p|810p)',
search_name, regex.I)
if result:
result = result.groupdict()
if 'name' in result:
name = regex.sub('\(.*?\)|\.|_', ' ', result['name'])
if 'year' in result:
year = result['year']
else:
year = ''
return name, year
return None, None
def clean_name(name):
"""Cleans a show name for searching (against omdb)."""
name = unicodedata.normalize('NFKD', name)
name = regex.sub('[._\-]', ' ', name)
name = regex.sub('[\':!"#*’,()?$&]', '', name)
return name
|
2. Business always want to engage more people through different channels. Quests is one of these.
Mobile platform for quests in your city.
- No resources to find proper team of engineers, marketers, sales etc.
With no investments into marketing and virality project will have small ROI, however the market is huge.
|
from panda3d import core
empty_format = core.GeomVertexFormat.get_empty()
def test_geom_decompose_in_place():
vertex_data = core.GeomVertexData("", empty_format, core.GeomEnums.UH_static)
prim = core.GeomTristrips(core.GeomEnums.UH_static)
prim.add_vertex(0)
prim.add_vertex(1)
prim.add_vertex(2)
prim.add_vertex(3)
prim.close_primitive()
geom = core.Geom(vertex_data)
geom.add_primitive(prim)
geom.decompose_in_place()
prim = geom.get_primitive(0)
assert tuple(prim.get_vertex_list()) == (0, 1, 2, 2, 1, 3)
def test_geom_decompose():
vertex_data = core.GeomVertexData("", empty_format, core.GeomEnums.UH_static)
prim = core.GeomTristrips(core.GeomEnums.UH_static)
prim.add_vertex(0)
prim.add_vertex(1)
prim.add_vertex(2)
prim.add_vertex(3)
prim.close_primitive()
geom = core.Geom(vertex_data)
geom.add_primitive(prim)
new_geom = geom.decompose()
new_prim = new_geom.get_primitive(0)
assert tuple(new_prim.get_vertex_list()) == (0, 1, 2, 2, 1, 3)
# Old primitive should still be unchanged
assert prim == geom.get_primitive(0)
def test_geom_calc_sphere_bounds():
# Ensure that it ignores NaN
data = core.GeomVertexData("", core.GeomVertexFormat.get_v3(), core.Geom.UH_static)
vertex = core.GeomVertexWriter(data, "vertex")
vertex.add_data3((float("NaN"), 0, 0))
vertex.add_data3((1, 1, 1))
vertex.add_data3((1, 1, 2))
prim = core.GeomPoints(core.Geom.UH_static)
prim.add_next_vertices(3)
geom = core.Geom(data)
geom.add_primitive(prim)
geom.set_bounds_type(core.BoundingVolume.BT_sphere)
bounds = geom.get_bounds()
assert isinstance(bounds, core.BoundingSphere)
assert bounds.get_center() == (1, 1, 1.5)
assert bounds.get_radius() == 0.5
def test_geom_calc_box_bounds():
# Ensure that it ignores NaN
data = core.GeomVertexData("", core.GeomVertexFormat.get_v3(), core.Geom.UH_static)
vertex = core.GeomVertexWriter(data, "vertex")
vertex.add_data3((float("NaN"), 0, 0))
vertex.add_data3((1, 1, 1))
vertex.add_data3((1, 1, 2))
prim = core.GeomPoints(core.Geom.UH_static)
prim.add_next_vertices(3)
geom = core.Geom(data)
geom.add_primitive(prim)
geom.set_bounds_type(core.BoundingVolume.BT_box)
bounds = geom.get_bounds()
assert isinstance(bounds, core.BoundingBox)
assert bounds.get_min() == (1, 1, 1)
assert bounds.get_max() == (1, 1, 2)
|
Spy Car is a enjoyable fast-paced action destruction game. Get your spy car and race via the streets to wipe out all your enemies. Gather cash and other bonuses to buy updates. Have a good time!
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
collections = kwargs.pop("collections", None)
v = None
# Get expected variable name.
with ops.name_scope(
kwargs.get("name", None), "Variable", skip_on_eager=False) as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
def _get_element_from_tensor_info(tensor_info, graph):
"""Simplified copy of the deprecated `get_tensor_from_tensor_info`."""
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
# We may get operations here in some cases. TensorInfo is a bit of a
# misnomer if so.
return graph.as_graph_element(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
graph.get_tensor_by_name(
tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = struct_coder.decode_proto(spec_proto)
components = [graph.get_tensor_by_name(component.name) for component in
tensor_info.composite_tensor.components]
return spec._from_components(components) # pylint: disable=protected-access
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
def _lift_single_variable(old_variable, graph, variable_holder):
"""Lifts `old_variable` out of the `FuncGraph` `graph`."""
new_variable = resource_variable_ops.UninitializedVariable(
shape=old_variable.shape,
dtype=old_variable.dtype,
name=old_variable.op.name,
trainable=old_variable.trainable,
extra_handle_data=old_variable.handle)
new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access
graph.add_capture(new_variable.handle, old_variable.handle)
# Now that we've added the new variable to graph.captures,
# graph.capture will use that cached value and do some post-processing
# on the capture like recording it on the tape.
graph.capture(new_variable.handle)
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
variable_holder._variables_by_name[variable_name] = new_variable
graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
graph.watch_variable(new_variable)
return new_variable
def _lift_unlifted_variables(graph, variable_holder):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
Args:
graph: The FuncGraph to lift variables from.
variable_holder: A VariableHolder to record the lifted variables in.
"""
with graph.as_default():
global_collection_variables = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
local_collection_variables = ops.get_collection(
ops.GraphKeys.LOCAL_VARIABLES)
existing_captures = {id(c) for c in graph.internal_captures}
lifted_variables = {}
def _should_lift_variable(v):
return ((v._in_graph_mode # pylint: disable=protected-access
and v.graph.building_function)
and isinstance(v, resource_variable_ops.BaseResourceVariable)
and id(v.handle) not in existing_captures)
for old_variable in global_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
for old_variable in local_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
if new_variable._in_graph_mode: # pylint: disable=protected-access
outer_graph = new_variable.graph
# Variables are added to the global collection by default. In this
# case we only want the variable in the local collection, so we'll pop
# it out.
global_collection = outer_graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES)
global_collection.remove(new_variable)
outer_graph.add_to_collection(
ops.GraphKeys.LOCAL_VARIABLES, new_variable)
# Update the FuncGraph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES
]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(id(current), current)
if not resource_variable_ops.is_resource_variable(
mutable_collection[index]):
logging.log_first_n(
logging.WARN,
"Unable to create a python object for variable {} because it is "
"a reference variable. It may not be visible to training APIs. "
"If this is a problem, consider rebuilding the SavedModel after "
"running tf.compat.v1.enable_resource_variables().".format(
mutable_collection[index]),
5)
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
self._variable_holder = variable_holder
_lift_unlifted_variables(fn_graph, variable_holder)
# We call __init__ after lifting variables so that the function's signature
# properly reflects the new captured inputs.
for f in fn_graph.as_graph_def().library.function:
context.context().add_function_def(f)
self._signature = signature
super(WrappedFunction, self).__init__(fn_graph, attrs=attrs)
def _call_impl(self, args, kwargs, cancellation_manager=None):
if self._arg_keywords is None:
if kwargs:
raise NotImplementedError(
"Keyword arguments not supported when calling a "
"wrap_function-decorated function.")
if self._signature is not None:
args = list(args)
for i, arg in enumerate(args):
if isinstance(self._signature[i], tensor_spec.DenseSpec):
args[i] = ops.convert_to_tensor(arg, self._signature[i].dtype)
return self._call_flat(args, self.captured_inputs)
else:
return super(WrappedFunction, self)._call_impl(
args, kwargs, cancellation_manager)
def prune(self, feeds, fetches, name=None, input_signature=None):
"""Extract a subgraph of this function's underlying graph.
Wraps the subgraph in a new `WrappedFunction` object.
Args:
feeds: Input tensors to the subgraph to extract, as `Tensor` objects.
fetches: Possibly-nested Python data structure containing information
about outputs of the target subgraph. Each entry can either be a
`Tensor` object (for data outputs), an `Operation` object (for control
outputs), or a `TensorInfo` proto. Any additional shape/dtype
information provided in a `TensorInfo` and not present in the original
graph will be added to the returned subgraph.
name: (optional) Name to give to the underlying `FuncGraph` of the
returned object. If no name is provided, the graph's name will be
`"pruned"`.
input_signature: (optional) possibly-nested Python data structure
containing `TensorSpec` objects, with which to populate the returned
functions's `FuncGraph`'s `structured_input_signature` field.
Returns:
A new `WrappedFunction` object containing a copy of the portion of this
object's graph that goes from `feeds` to `fetches`.
"""
# TODO(b/129646028): Add support for CompositeTensors.
name = name or "pruned"
flat_feeds = nest.flatten(feeds, expand_composites=True)
flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds]
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = {id(c) for c in self.graph.internal_captures}
flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures]
operation_fetches = []
tensor_fetches = []
tensor_infos = []
def _fetch_preprocessing_callback(fetch):
"""Extract out lists of ops, tensors, and tensor type info.
Turns TensorInfos into Tensors in the original `fetches` structure.
Also extracts ops from `fetches`.
Args:
fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or
string identifying a Tensor or Operation.
Returns:
`fetch` converted to a Tensor.
"""
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if (tensor_util.is_tensor(decoded) or
isinstance(decoded, composite_tensor.CompositeTensor)):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(fetch, (ops.Tensor, composite_tensor.CompositeTensor)):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocessing_callback(graph_element)
fetches = nest.map_structure(_fetch_preprocessing_callback, fetches)
# Expand composite tensors into their component dense Tensors.
tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True)
for f in (flat_feeds + tensor_fetches + operation_fetches):
if f.graph is not self._func_graph:
raise ValueError("Can only prune function whose feeds and fetches "
"are from this graph (%s). Input %s is from graph %s" %
(self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
lift_map = lift_to_graph.lift_to_graph(
operation_fetches + tensor_fetches,
pruned_graph,
sources=flat_feeds + self.graph.internal_captures,
base_graph=self._func_graph)
# Note that we add the component tensors of any composite tensors to the
# returned function's outputs list; the list must contain these component
# tensors, or the function's sparse outputs won't work properly.
pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches)
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
for external_capture, internal_capture in self.graph.captures:
pruned_graph.add_capture(external_capture, lift_map[internal_capture])
for ti in tensor_infos:
if ti.WhichOneof("encoding") == "name": # Dense tensors only
t = pruned_graph.as_graph_element(ti.name)
if tensor_util.is_tensor(t):
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
# pylint: disable=protected-access
for f in self.graph._functions.values():
pruned_graph._add_function(f)
# pylint: enable=protected-access
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
"""callback for `nest.map_structure()`"""
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
# expand_composites=True here causes composite tensors to be expanded
# into their component dense Tensors, mapped to the new graph, and then
# reconstituted into their original composite form.
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches, expand_composites=True)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
"""Filtering out any ops returned by function.
Args:
fn: a function
Returns:
A tuple of (
Wrapped function that returns `None` in place of any ops,
dict that maps the index in the flat output structure to the returned op
)
"""
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
"""Class for wrapping multiple TF 1.X functions in a single graph.
Maintains a dictionary mapping names to wrapped functions. See
`tf.compat.v1.wrap_function` to learn more about wrapping V1 functions.
Functions wrapped using this class have access to variables and collections
created in other wrapped functions, using the standard TF 1.X API (
`tf.compat.v1.get_variable` or
`tf.compat.v1.get_default_graph().get_collection(...)`)
Outside a function, variables and collections may be accessed using the
`variables` and `graph` properties.
Example:
```
def add_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v + x
def increment_var_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v.assign_add(x)
g = WrappedGraph()
add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)])
increment_var = g.wrap_function(increment_var_v1,
[tf.TensorSpec([], tf.int32)])
assert len(g.variables) == 1
assert g.variables[0].numpy() == 0
increment_var(tf.constant(5))
assert g.variables[0].numpy() == 5
```
"""
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
"""Wraps a TF 1.X function and returns an eager-compatible function.
All functions wrapped in the same `WrappedGraph` will have access to the
same graph (`tf.compat.v1.get_default_graph` to get the graph object
within a function, or `WrappedGraph.graph` to get the graph outside a
function). Variables created within the function will be added to the
`variables` list.
Function inputs: All inputs to the function must be tensors (nested ok),
with their shapes and dtypes defined in the `signature` argument.
Function outputs:
* The 1.X function may return tensors, variables, and ops. The wrapped
eager-compatible function will always return tensors in the same nested
structure.
* Variables are replaced with a tensor containing the latest read values.
* Returned ops are executed, and replaced with None.
* The order of op execution and variable reads in the return is
nondeterministic. For example:
```
def update_var(x):
v = tf.Variable(0)
op = tf.compat.v1.assign(v, x).op
return v, op
g = WrappedGraph()
fn = g.wrap_function(update_var)
read_value, _ = fn(tf.constant(3))
print(read_value.numpy()) # could be 0 or 3
print(g.variables[0].numpy()) # always 3
```
To ensure that ops in the function are executed (e.g. ops added to the
`tf.GraphKeys.UPDATE_OPS` collection), include them in the function returns.
Args:
fn: a 1.X tensorflow function.
signature: a possibly nested sequence of `TensorSpecs` specifying the
shapes and dtypes of the arguments.
name: an optional string name for the function. The function will be saved
with key `name` in the `functions` dictionary.
Returns:
An eager-compatible function.
"""
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(self,
fn,
args=None,
kwargs=None,
signature=None,
name=None):
"""Internal wrap function method with extended func_graph arguments."""
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args,
kwargs=kwargs,
signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
# Return filtered ops to the flattened outputs.
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the TF 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = tf.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with tf.control_dependencies([op]):
return v.read_value()
f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `tf.compat.v1.wrap_function` and `tf.function` create a callable
TensorFlow graph. But while `tf.function` runs all stateful operations
(e.g. `tf.print`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
TensorFlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `tf.function`, `wrap_function` will only trace the Python function
once. As with placeholders in TF 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the wrapped
function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None,
kwargs=None,
signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
|
I grant a two-week trial period to individuals to try a bow before committing to a purchase. A check in the amount of $250 US will be required as a deposit. This check is held as an assurance, and will only be cashed pending a sale. Please review my Shipping Policy for more information.
For schools and orchestras I provide a numbers of bows from which to choose, please contact me directly if you are a representative of a school or orchestra and would like to explore such an arrangement.
|
#! /usr/bin/env python
#
# Copyright 2016 University of Oxford
#
# Author
# Name: Gary Ballantine
# Email: gary.ballantine at it.ox.ac.uk
# GitHub: AltMeta
# Distributed under terms of the MIT license.
"""
Authorises AFS via shelling out and using a kerberos keytab
and performing aklog
"""
import os, subprocess
from autopkglib import Processor, ProcessorError
__all__ = ["AFSAuth"]
class AFSAuth(Processor):
input_variables = {
'auth_method': {
'description': 'keytab is the only option atm',
'required': False,
'default': 'keytab',
},
'aklog_path': {
'description': 'Path to aklog binary',
'required': False,
'default': '/usr/local/bin/aklog',
},
}
output_variables = {
'test': {
'description': 'for testing',
'required': False,
},
}
def gettoken(self):
keytabname = os.environ.get("KEYTABNAME", None)
principal = os.environ.get("PRINCIPAL",None)
if keytabname is None:
raise ProcessorError('Missing keytab environment variable')
self.output('Using Keytab %s with principal %s'
% (keytabname, principal), verbose_level=3)
self.output('Calling kinit ...', verbose_level=5)
try:
subprocess.call(["kinit","-t",keytabname,principal])
except Exception as kiniterror:
raise ProcessorError('Problem running kinit %s' % kiniterror)
aklog = self.env['aklog_path']
if aklog is None:
raise ProcessorError('Missing aklog_path setting')
self.output('Calling aklog %s ...' % aklog, verbose_level=5 )
try:
subprocess.call([ aklog ])
except Exception as aklogerror:
raise ProcessorError('Problem running aklog %s' % aklogerror)
def main(self):
auth_method = self.env['auth_method']
if auth_method != 'keytab':
raise ProcessorError('Unsupported authentication method: %s' % (auth_method) )
self.gettoken()
if __name__ == '__main__':
PROCESSOR = AFSAuth()
|
Carlsbad's Premiere Carpet Cleaning Service!
For exceptional carpet cleaning in Carlsbad, look no further than us. For a mid-range price, we deliver a high value service. We know there is more to providing professional work than just getting good cleaning results. We work hard to ensure smooth and pleasant service. Things like talking to a helpful person on the phone, having cleaners who arrive on time, and professional, precise job results are important to you–that’s why we make an effort where those details are concerned.
Our technicians boast years of experience and training on top of international certification. The most difficult spots and spills can be conquered by our team. Each job is unique and they will explain any unusual conditions that may exist.
Our on-time guarantee reflects the high value we place on your time. We go out of our way to be on time. In the case of an unforeseen delay, we will give you a $25 gift card for the inconvenience.
A large part of our family of customers have been with us for years. We hope to become the company you rely on for all your cleaning needs. Feel free to call us at any time with your questions about spots and spills or general flooring maintenance.
We are prepared to answer your questions or set up an appointment. We are eager to treat you to the best in service.
|
# форма для domens
class DomensForm(Form):
id = fields.TextField(u'id' , default=u'None', validators=[validators.required()])
cdate = fields.TextField(u'cdate' , default=u'None', validators=[validators.required()])
udate = fields.TextField(u'udate' , default=u'None', validators=[validators.required()])
name = fields.TextField(u'name' , default=u'None', validators=[validators.required()])
title = fields.TextField(u'title' , default=u'None', validators=[validators.required()])
counter = fields.TextAreaField (u'counter' , default=u'None', validators=[validators.required()])
description = fields.TextAreaField (u'description' , default=u'None', validators=[validators.required()])
keywords = fields.TextField(u'keywords' , default=u'None', validators=[validators.required()])
phone = fields.TextField(u'phone' , default=u'None', validators=[validators.required()])
template_path = fields.TextField(u'template_path' , default=u'None', validators=[validators.required()])
address = fields.TextField(u'address' , default=u'None', validators=[validators.required()])
# форма для users
class UsersForm(Form):
id = fields.TextField(u'id' , default=u'None', validators=[validators.required()])
first_name = fields.TextField(u'first_name' , default=u'None', validators=[validators.required()])
last_name = fields.TextField(u'last_name' , default=u'None', validators=[validators.required()])
login = fields.TextField(u'login' , default=u'None', validators=[validators.required()])
email = fields.TextField(u'email' , default=u'None', validators=[validators.required()])
password = fields.TextField(u'password' , default=u'None', validators=[validators.required()])
role = fields.TextField(u'role' , default=u'None', validators=[validators.required()])
active = fields.TextField(u'active' , default=u'None', validators=[validators.required()])
cdate = fields.TextField(u'cdate' , default=u'None', validators=[validators.required()])
|
Published 04/25/2019 03:58:05 am at 04/25/2019 03:58:05 am in Electric Wood Burner Effect Fires.
electric wood burner effect fires gazco stockton 5 gas stove balanced flue with log effect fire electric wood burner effect firestone.
electric wood burner effect fireside,electric wood burner effect firestick,electric wood burner effect firestore,electric wood burner effect firestorm,electric wood burner effect firestone,electric wood burner effect fires, gas log gas log fires uk pictures of gas log fires uk, electric stoves for the home from dimplex fortrose optimyst electric stove, electric fire tv screenset tgc inset electric log effect fire lcd video flame effect electric fire savoy electric log effect fire , dovre electric stove dovre electric stove dovre electric, dimplex chevalier chv white electric stove optiflame effect fire dimplex chevalier chv white electric stove optiflame effect fire.
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((13012, 8631.32, 11090.1), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((13877, 9048.87, 10761.3), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((12113.2, 8961.57, 10062.5), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((10056, 8884.11, 8907.61), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((9414.66, 8846.33, 8530.96), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((9608.39, 10969.3, 8523.77), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((8060.51, 10707.2, 7411.06), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((7549.91, 12021.5, 6425.36), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((6882.09, 11947.1, 4985.25), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((5665.35, 11809.5, 3601.1), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((6065.3, 10648.6, 2353.08), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((6710.12, 11458.7, 492.015), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((7347.21, 12348.2, -1256.37), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((8371.96, 12356.3, -16.2648), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((6720.45, 12319.7, -225.849), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((5510.13, 11963.1, 738.339), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5387.18, 11772.2, 2205.22), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((4958.32, 11468.4, 3712.17), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((5501.74, 12814.9, 4799.53), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((4487.21, 12764.1, 5971.04), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((2945.66, 12866.7, 6966.53), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((1576.07, 13682.3, 7619.42), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((2094.41, 12355.5, 7436.96), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((2472.54, 10614, 6287.82), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((2275.44, 9200.72, 4677.96), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((2110.01, 8543.95, 3872.56), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((2688.2, 6479.45, 5549.21), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((2346.99, 4770.64, 6172.19), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((2063.94, 4918.25, 7382.58), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((898.218, 5181.26, 9371.25), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((1966.32, 5287.84, 9067.58), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1619.21, 3722.08, 9071.97), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((1698.9, 1985.06, 10547.8), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((3081.43, 1509.46, 10720.7), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((4513.52, 1779.17, 11208.1), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((5630.34, 1636.18, 12573), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((6005.41, 2413.96, 14027.9), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4718.21, 3372.32, 13784.8), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((3604.62, 2350.52, 13450.1), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((3524.31, 2154.19, 11460.5), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((3878.54, 954.22, 11318.3), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((3896.11, 2181.21, 10715.5), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((4388.44, 2667.28, 11088.9), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((3989.01, 2093.55, 10873.5), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2514.18, 2435.99, 9842.08), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((2953.14, 3358.87, 7142.93), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((3828.42, 2784.49, 5578.65), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((4816.17, 2393.81, 5122.71), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((5623.48, 576.744, 5103.53), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((5830.56, -2063.82, 4883.1), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((5146.08, -2244.87, 6415.63), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((2883.08, -442.984, 6060.7), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((3395.36, -120.583, 6565.48), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((3967.13, -590.962, 8337.25), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((4901.43, -401.329, 9552.23), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5714.31, 1256.76, 9531.74), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
It seems to be a tradition for him.
Malaysia’s been through some rough times, with riots, rallies and unhappiness with the Pakatan Harapan government.
But one man is doing his part to reach out and spread some cheer this Chinese New Year.
Datuk Seri Dr Zulkifli Mohamad Al-Bakri, the Mufti (Muslim legal expert) of the Federal Territories, shared photos of himself on Facebook, giving out gifts to his Chinese neighbours.
Screen shot from Mufti Wilayah Persekutuan’s Facebook page.
He also encouraged Malaysians to go back to the “essential foundations”, that humanity is all part of one big family.
“Create the beauty of Islam, spread the spirit of unity,” he added.
As of Feb. 6, 1:30 pm, the post had garnered over 10,000 reactions and 2,500 shares.
Most of the comments on his post commended Zulkifli on his generosity.
Hopefully more people will heed the Mufti’s message.
Top image adapted from Mufti Wilayah Persekutuan’s Facebook page.
|
# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
#
# License: BSD (3-clause)
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
_data_path_doc_accept)
has_brainstorm_data = partial(has_dataset, name='brainstorm.bst_resting')
_description = u"""
URL: http://neuroimage.usc.edu/brainstorm/DatasetResting
- One subject
- Two runs of 10 min of resting state recordings
- Eyes open
"""
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
*, accept=False, verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='brainstorm',
download=download, archive_name='bst_resting.tar.gz',
accept=accept)
_data_path_doc = _data_path_doc_accept.format(
name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH')
_data_path_doc = _data_path_doc.replace('brainstorm dataset',
'brainstorm (bst_resting) dataset')
data_path.__doc__ = _data_path_doc
def get_version(): # noqa: D103
return _get_version('brainstorm.bst_resting')
get_version.__doc__ = _version_doc.format(name='brainstorm')
def description():
"""Get description of brainstorm (bst_resting) dataset."""
for desc in _description.splitlines():
print(desc)
|
The best thing about writing your own blog is that you get to choose who you write about and are not just restricted to new releases, so taking advantage of that fact here is a review of one of last years (2014) finest instrumental releases "All Black Rainbow Moon" by Les Lekin an album that made my top 25 releases for 2014 and is an album you should own!
Let's start off with a very brief rundown of the band before getting into the nitty gritty of a review.
Les Lekin are a trio out of Salzburg, Austria who formed around 2011 and are made up of Peter G.(guitar), Stefan W.(bass) and Kerstin W. (drums). As you can probably guess by the use of a singular letter for their surnames this band like to play their cards very close to their chests, the fact that these guys are on all the important social sites and also have their own web site, finding anything more than rank and serial number about them is a virtual nightmare, then again it does give them a air of mystique.
. I won't go into an in depth track by track dissection of "All Black Rainbow Moon" except to say that this is an album that takes its listener on rollercoaster ride of highs and lows, lights and shades, exploding in a myriad of colours one minute then diving in to the darkest depths the next. The level of musicianship throughout the album is exceptional with Peter G ripping out finger burning solos, shimmering arpeggios and thunderous riffs from his humble six strings. Stefan W's bass playing is a thing of beauty and power, from chest quaking bass riffs that thunder and boom to fluid subtle lines that soothe and warm with their deep caress. Kerstin W is not a tub thumper of any description she has a control and deftness of touch that not only supports the stringed musicians in front of her but drives the sound forward with her exemplary sticksmanship, going from heavy rock bombast to jazzy intricacy in the blink of an eye.
When it comes down to it, most of you stoners, doomers and psychonauts who are reading this blog will already have snapped this album up when it was first released last year but if you have not then I urge you to head over to Bandcamp NOW and buy the CD or download a digital copy you will not regret it!
Mountainwolf are a trio out of Annapolis, MD, Chris Gipple (bass), Thomas Coster (drums) and Tyler Vaillant (guitar, vocals) and have just released their new album " Silk Road"
"Silk Road" blends together 70's heavy rock with 80's alt/grunge, throws in a little doom, a smattering of psych and whole lot of Mountainwolf to make this one of the best "rock" albums you will hear this year.
The album kicks off with "The Jam" five minutes and four seconds of stoner excellence incorporating loud quiet loud grunge dynamics, gritty stoner riffage and moments of psychedelic subtlety..
Next track "Wine x Weed" utilises a dirty fuzzed up doomy riff around a low key, mournful vocal that then descends into spacey atmospherics with Vailllant using a ton of effects to create a lysergic, cosmic mood while Gipple and Coster hold it all together underneath until it all kicks off again at the end.
"LSD" for me is the stand out track on the album with it's phased guitar riff repeating over and around a delicious vocal. Vaillant pulls out his best solo on the album here, slightly Iommi-esque and with plenty of bluesy feel.
"Mountainwolf" sees the band nod their heads towards "Pearl Jam" with a track that would not have sounded out of place on that bands album "Ten" as would the next two "Freedom" and "Heroin 1991"
"Via Amorosa" is a nice little groover that goes from grungey doom into cosmic madness in the final third.
"Hyphy Blues" is an all out rocker with a totally infectious guitar lick that sits over the main riff.
Closer "Via Dante" almost topples "LSD" for best track status, full of moody atmospheric psych and acid blues guitar pyrotechnics.
I love it when a band are unafraid to fuse genres together and I especially love it when they manage to do so as seamlessly as Mountainwolf have done with their new album "Silk Road"
Book Of Wyrms - Ricmond's space metal quartet release three song demo.
Being an avid follower of Bandcamp I’ve come to realise the importance of a good demo as a way of showcasing a bands talents and future potential. Many a time I’ve come across a band who although showing a ton of promise have just not put in the time on their demos and on occasions these demos have been almost unlistenable. This is not the case with Book of Wyrms.
Remember this is a demo and when/if these guys get to make a full album these songs may be totally different or may disappear entirely but as a snapshot of a band who are at the beginning of their journey I highly recommend giving this a listen.
Rosy Finch are two women Elena García (Vocals/Bass guitar), Mireia Porto (Lead Vocals/Guitars) and one guy Lluís Mas (Drums) that play gritty hard/stoner rock flecked with elements of psych, grunge and sludge that in places borders on extreme but always manages to pull back from the edge before falling into chaos.
This month the band released their debut "Witchboro" a concept album, of sorts, that depicts the sounds of a mythical village of the same name, whether this works as concepts go i'm not sure but this does not become an issue as the songs are strong enough to hold their own as individual pieces.
Arrakis ~ Greek Psychonauts Release Debut "Ammu Dia"
There is something going on in Greece, I don't know if there is something in the water over there but the country that gave us the foundations of modern civilised society sure seem to produce the goods when it comes to good "underground " rock.
Arrakis are from Thessaloniki, Greece and were formed in 2012 with the sole intention of playing a form of rock music that would not be restricted to the blues based blueprints of American and English rock bands but would have a more European vibe with plenty of room for improvisation and freedom of expression. Comprising of three members Panagiotis Haris on guitar, Iraklis Dimitriadis on bass and Evaggelos Anastasiou on drums Arrakis have released a number of rough demos via Bandcamp and have recently released an EP "Sanatoriun" and a full album "Ammu Dia" both of which can also be found at Bandcamp.
"Ammu Dia" is a stunning mix of instrumental stoner/desert grooviness and heavy psych/krautrock complexity that is a myriad of colour, texture and beauty .Heavy in places but never brutal " Ammu Dia" never sits still, never gets stuck in a rut and there is a feeling that this music could at any minute descend into chaos if it were not for the three musicians pulling on the reins and steering it back to safer ground. A perfect example to this is the closing 12+ minute "Diplomacy?" a slow burning psych wigout that has Panagiotis Haris' guitar teasing out wailing wah pedal solos, screeching feedback and glorious lysergic licks while Dimitriadis and Anastasiou keep it all grounded with their excellent rhythm work, never letting the music get away from them and pulling it all back to earth when it starts to get a little too cosmic, superb!
If you like your music experimental, exciting and with a certain amount of freedom and if Colour Haze, Sungrazer etc.rock your musical boat then check these guys out you will not be disappointed.
Those of you out there who downloaded last years freebie EP "51973" from Spanish stoners Electric Valley will know that this bands music is a little more than just stoner by numbers, these guys are a little bit special.
Mario Garcia-guitar and Miky Simon-bass and vocals and have been together for about a year, a pretty short time considering the fact they have already released one EP and just recently finished and released their debut album "Multiverse"
"Multiverse" is a stunning debut that takes the promise and raw creativity shown on "51973" and refines and polishes it in the form of eight new tunes of absolutely essential stoner excellence.
Opener "Lizard Queen" sets the tone for the rest of the album with it's rocket engine intro leading into a salvo of drums, a delicious bass line and a glorious choppy guitar riff. This all opens the way for Miky Simons' vocal which has a gruff, smoky edge to it, not especially strong but with a soulful weariness that is a perfect fit for the bands sound.
Electric Valley are a band who play intelligent, well crafted stoner rock with touch of Soundgarden grunge/alt intensity and in my humble opinion have just released one of the best "rock" albums so far this year.
There’s a lot of stoner/desert bands out there who are experimenting with amalgamating a plethora of styles into their sonic cannons, adding some psych here a little doom there a touch of jazz, a little reggae and generally mixing it up. This is all great and can only be good for the scene as a whole but now and then I still get a yearning to hear some of that old school balls to the wall, fat juicy fuzzed up stoner/desert the way it sounded back in the day. Fusskalt deliver this in spades.
Fusskalt are from Aarhus, Denmark and consist of five dudes who take their lead from bands like Kyuss, Red Fang and Monster Magnet in other words they play stoner/hard rock with big chords, big riffs and truckloads of passion and attitude. Guitars are handled by Nick Jenson and Thomas Brandt, Jenson laying down the grit and fuzz on rhythm and Brandt serving up delicious solos and licks over the top. Beneath and below are the powerful engine room of Janus Kinke Pederson(bass) and Jonas Emil Nielson (drums) keeping it tight and solid as they drive the music forward. Vocals are handled by Lars Frederiksen whose gritty, slightly throaty roar fits the bands sound like a hand in a velvet glove, forceful and strong yet gruffly mellow when a songs dynamic calls for it.
Fusskalt recently released their third recording “Overdrive” a thunderous collection of stoner anthems that showcases not only their musical chops but also shows they can write a good tune …or five. Tunes that are full of infectious hooks and melodies and that go to prove this is not just a band who are going to rely on a few riffs to see them through but a band who know how to construct a song from top to bottom and making it work on every level.
It is not often I get to introduce a band who come from my part of the world and even rarer that the band ply their trade in the doom genre.
Myopic Empire are from Southend-on Sea in South Essex, UK an area better known for its connections with blues and blues-rock, spawning bands like Dr. Feelgood, The Kursall Flyers and both guitarist Robin Trower and Procul Harem vocalist Gary Brooker, among others.
Made up of two guys, Dean Derron and Tom (no last name given) , who play guitars and basses, and aided by various bits and bobs of technology Myopic Empire create a doom sound that is very bottom heavy and very infectious. Vocals are handled by Derron and are clean, rugged and in places call to mind the desert grittiness of John Garcia, albeit with a South Essex accent, and are a perfect foil to the six string mayhem circling around them.
To date Myopic Empire have released two EP’s Myopeia (2013) and Doom (2015) both of which blend rumbling doom with elements of stoner metal distortion and Kyuss type heavy desert fuzz and I would recommend both of them to anyone who likes their doom a little sandy.
Blood Cauldron is an EP you need to own, an EP that promises so much for the future and an EP that actually blew a speaker on my PC. To be honest I think the speaker was on its way out anyway but never mind this is still a super EP.
The band who created this superb EP go by the name of Sisters Ov The Blackmoon and play a blend of gritty bluesy stoner and occult doom-ish rock. Exploding out of Los Angeles the band consist of Josh Alves – drums, Andrew Vega – guitar, Dan Schlaich – guitar and Jared Anderson – bass but it is the powerful bluesy tones of Sasha Wheatcroft on vocals that sets this band apart from others ploughing a similar musical furrow.
Blood Cauldron sets the tone for the EP with “Haunt”a barnstormer of a track that opens with the sound of cicadas chirping and an owl hooting and segues into a rolling guitar riff with a horror movie soundbite over its top, then the guitars pick up the volume, drums crash and pound just as it gets gnarly everything drops down for the verse. Here is where you first get the “Wheatcroft” experience. With a voice that mixes Beth Hart’s bluesy holler with Miny Parsonz’ (Royal Thunder) smokey stoner howl, Ms. Wheatcroft is a revelation, her voice dripping with passionate power and control making every phrase and inflection count. One person does not make a band though and the other “sisters” more than prove their worth on this collection of songs. Vega and Sclaich complement each other perfectly on guitars coming on with earth shattering riffage and sublime solos and every now and then breaking into Lizzy-esqe twin melodies. The bottom end is held up just as well too with Alves and Anderson locking in as tight as Scotsman’s purse and creating a solid base for everything going on above them.
This EP shows a band on fire and I for one cannot wait for a full album.
One of the reasons why I joined Bandcamp and why also I started this blog was to find and write about bands like Druid and to let others know about them because music this good should not be allowed to fade away into obscurity.
Druid are from Athens .Ohio and got together in 2014 and for a band who have only been together for roughly a year they should not be this good….but they are. Consisting of Kaleb Shaffner: Guitar, Vocals, Max Schmitz: Bass, Vocals and recent addition Adam Mayhall on Drums ( original drummer Lous Eyerman plays on both "Druid" and "Paper Squares") Druid take elements of 70’s proto –metal and blend it with 00’s stoner and doom and throwing a little blues and psych in. just to make things interesting.
In March this year (2015) they released their self- titled debut album, an absolutely stunning set of tunes that I called “a fantastic album” in my review on Bandcamp. Full to the brim with great tunes and great playing Druid also paid tribute to their influences by tagging the “Luke’s Wall” riff from Sabbath’s “ War Pigs” on to their song “Mushroom Fields”. A bold move but it worked a treat. The biggest nod to their heroes though is on the opening track “Roots and Beings” whose intro welds Hendrix’s “Voodoo Chile” to Status Quo’s “Pictures of Matchstick Men” Having said all that do not be fooled into thinking these guys are just a bunch of plagiarists and pirates, when it comes to writing good tunes these guys are very good. Take for instance “Rid This From My Eyes” a stunning song that starts off with a filthy bass intro before blowing the roof off when it goes into the oh so proto riff. They can even lay it out as on “No Good Man” with its Lynyrd Skynyrd meets Grand Funk vibe. Vocals are shared between Kaleb and Max and both are fine singers and both bring something good to the table individually but when they harmonise it sends shivers down the spine. Special mention has to go to the sterling production work of Greg Omella who manages to capture that organic 70's sound that prevails throughout the album, he also plays rhythm guitar on the album and sings lead on both " No Good Man" and "Roots and Beings".
Today (03 June 2015) saw the band release a three song EP “Paper Squares” which highlights the heavier side to their sound with two original tunes and a cover of Sleep’s “Sonic Titan”. The two self- penned songs “Altarsong” and “Paper Squares” are powerhouse displays of proto-metal/acid rock madness that evoke memories of Blue Cheer and The JPT Scare Band, full of scorching guitar solo’s and heavy bluesy riffs. The cover of “Sonic Titan” shows these guys can do stoner doom and do it well.
I really hope this band manage to climb the hurdles that road weariness, ego’s, music business politics etc. will inevitably throw in front of them, and stick together as I would love to see how this band progress.
Hamburg's Murder of Reality return with their third and best EP to date.
Musically "Walking Dead" owes a large debt to Heaven & Hells "The Devil You Know" album it has that same more mature, darker riffage that Iommi employed at that time. Vocally though Captain Creek is his own man powerful, clear and with just the right amount of epic doom phrasing mixed in with a little grit in the upper register.
The first track "Walking Dead" is a brooding mid tempo doomy observation on the futility of people marching to the beat of large corporations, following a path mapped out for us and excepting our lot in life. It is this track that highlights that Heaven & Hell comparison with it's deep rumbling bass line leading the way before the guitar explodes into the main riff then collapses back down for the verse. The vocal for this song is just sublime, Captain Creek just nails the balance of frustration, anger and menace that the lyrics call for. This has got to be my favourite Murder of Reality song to date.
Track two "Pineal Variations" lyrically reminds me a little of Sabbath's" Into The Void"
where that comparison ends. "Variations" employs a superb stuttering riff backed up by some tight solid drumming that I defy you not to nod along to. For the first time on this record Captain Creek sounds a little Ozzy-ish with his delivery albeit with a little more grit. There is a great use of soundbites in the final third of this tune, a trend often overused in rock music but it works perfectly here.
Abyss the third and final track rounds things of nicely with a stoner paced groove and a slight nwobhm vibe. Captain Creek goes all out on the vocal here, never letting up and Adam Lake rips off a stunning Iommi-esque solo while all the time Jôrn Dackow on drums keeps everything grounded with exceptional timing and skill.
Limb To Limb have released their second EP “For Machines” a five track celebration of all things classic/hard rock that again highlights the truly wonderful golden pipes of vocalist Charlotta Ekebergh whose strong clean bluesy tones take the bands sound to whole new level. Don't think that this band are a one woman show though as the rest of the guys are no slouches either serving up delicious slabs of groove laden rock that is slick, classy but with enough grit to please any stoners or doomers who might be thinking of checking these guys out. The twin guitars of Vinnie Holmes and Nicos Christodoulou rip chords and solo’s that both burn and smoulder while Massimo Tortella (bass) and Ben Nicoll (drums) create a bedrock of rhythms that'll have you leaping around like a kangaroo with a wasp in it's pouch.
ElCam ~ French Instrumental Duo Release New Album!
Rock duo’s seem to be ten a penny now days, and I suppose it makes a lot of sense really, less equipment to haul around more room in the van while on tour and an even split when it comes to getting paid. In ElCam’s case, being an instrumental band, there is also no having to deal with a singer’s enormous ego!.... Win,win.
ElCam are from Sains-en-Gohelle in the Nord-Pas-de-Calais region of France and to anyone who is familiar with their 2013 album “Orchard” will know that these guys play hard fuzz pedal driven instrumental stoner rock that belies the fact that the noise you are hearing comes from just two guys, Aurel on drums and Jey on guitar.
The guys have just released their new album “Shaft”, via Bandcamp, a collection of six of the most fuzzilicious, spine tingling jams you’re likely to hear from two men this side of Christmas. Don’t make the mistake of lumping these guys in with the likes of the White Stripes or the Black Keys, oh no, aside from having no colour associated with their name these guys are more like a two man Karma to Burn than Jack or Dan’s outfits, just listen to the opening track “Life in the Ruts” to confirm that fact. Killer stuff!!!!!
|
'''
Created on 14/05/2013
@author: facuq
'''
from numpy import *
from scipy import ndimage
import os;
from Utils import is_hidden
from matplotlib.image import imread
from PIL import TiffImagePlugin
def imsave(fname, arr, vmin=None, vmax=None, cmap=None, format=None,
origin=None, dpi=100):
"""
Saves a 2D :class:`numpy.array` as an image with one pixel per element.
The output formats available depend on the backend being used.
Arguments:
*fname*:
A string containing a path to a filename, or a Python file-like object.
If *format* is *None* and *fname* is a string, the output
format is deduced from the extension of the filename.
*arr*:
A 2D array.
Keyword arguments:
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* set the color scaling for the image by fixing the
values that map to the colormap color limits. If either *vmin* or *vmax*
is None, that limit is determined from the *arr* min/max value.
*cmap*:
cmap is a colors.Colormap instance, eg cm.jet.
If None, default to the rc image.cmap value.
*format*:
One of the file extensions supported by the active
backend. Most backends support png, pdf, ps, eps and svg.
*origin*
[ 'upper' | 'lower' ] Indicates where the [0,0] index of
the array is in the upper left or lower left corner of
the axes. Defaults to the rc image.origin value.
*dpi*
The DPI to store in the metadata of the file. This does not affect the
resolution of the output image.
"""
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
figsize = [x / float(dpi) for x in (arr.shape[1], arr.shape[0])]
fig = Figure(figsize=figsize, dpi=dpi, frameon=False)
canvas = FigureCanvas(fig)
im = fig.figimage(arr, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin)
fig.savefig(fname, dpi=dpi, format=format)
class ImageReader(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
def read_all(self,folder_path):
images= self.read_images(folder_path);
white=self.read_white(folder_path);
return (images,white)
def is_image(self,filepath):
path,ext= os.path.splitext(filepath)
filename=path.split('\\')[-1]
ext=ext.lower()
return filename.find("total")==-1 and filename.find("~")==-1 and not is_hidden(filepath) and ['.tif','.png'].count(ext)>0
def obtain_filenames(self,path):
def image_file(filename,filepath):
return filename.find("blanco")==-1 and self.is_image( filepath)
files = [os.path.join(path, f) for f in os.listdir(path) if image_file(f, os.path.join(path, f))]
return files
def obtain_white_filename(self,path):
def image_file(filename,filepath):
return filename.find("blanco")!=-1 and filename.find("~")==-1 and not is_hidden(filepath)
files = [os.path.join(path, f) for f in os.listdir(path) if image_file(f, os.path.join(path, f))]
if len(files)==0:
return None
else:
return files[0]
def read_image(self,path):
print path
return ndimage.rotate(imread(path),-90)[:,1:-1,:]
def read_images(self, folder_path):
files=self.obtain_filenames(folder_path)
return map(self.read_image,files )
def read_white(self, folder_path):
file_name=self.obtain_white_filename(folder_path)
if file_name==None:
return None
else:
return transpose(imread(file_name))
if __name__ == '__main__':
pass
|
විස්තරය Learning Goals: Students will be able to Compare and contrast “light photons” and “infrared photons”. Identify what happens to light photons when they get to Earth and why the temperature of the earth and its atmosphere changes. Design experiments to observe how clouds change the photons behavior. Design experiments to observe how greenhouse gases change the photons behavior. Compare and contrast cloud behavior and greenhouse gas behavior. Use the Photon Absorption tab to identify if molecules are Greenhouse Gases and give the microscopic evidence that supports your ideas. Explain why inside a building or car sometimes is a different temperature than outside. Extension: Discover when the “Ice Age” was and what was has changed about the composition of the greenhouse gases.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Mayo Clinic nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
""" Function to build the SNOMED CT Transitive Children file -- a helper table that
returns the number of children given a source concept and depth
"""
from rf2db.db.RF2DBConnection import RF2DBConnection
from rf2db.db.RF2FileCommon import RF2FileWrapper
from rf2db.db.RF2TransitiveClosure import TransitiveClosureDB
class TransitiveChildrenDB(RF2FileWrapper):
table = 'transitive_children'
closuredb = TransitiveClosureDB
createSTMT = ("CREATE TABLE IF NOT EXISTS %(table)s (\n"
" `parent` bigint(20) NOT NULL,\n"
" `depth` int NOT NULL,\n"
" `count` int NOT NULL DEFAULT 0,\n"
" PRIMARY KEY (parent, depth));"
)
def __init__(self, *args, **kwargs):
RF2FileWrapper.__init__(self, *args, **kwargs)
def loadTable(self, rf2file):
db = RF2DBConnection()
print("Populating transitive children table")
tcdb = self.closuredb()
if not tcdb.hascontent():
print("Error: Transitive children load requires transitive closure table")
return
tname = self._fname
tcdbname = TransitiveClosureDB.fname()
db.execute_query("""INSERT IGNORE INTO %(tname)s
SELECT DISTINCT parent, depth, 0 FROM %(tcdbname)s""" % locals())
db.commit()
print("Computing number of children")
db.execute_query("""UPDATE %(tname)s t,
(SELECT c.parent, c.depth, count(t.parent) AS dc
FROM %(tcdbname)s t, %(tname)s c
WHERE t.parent=c.parent AND t.depth<=c.depth
GROUP BY c.parent, c.depth) tc
SET t.count = tc.dc
WHERE t.parent=tc.parent AND t.depth=tc.depth""" % locals())
db.commit()
def numDescendants(self, sctid, maxDepth=0, **_):
# The following assumes that count can't increase as depth increases
query = "SELECT max(count) FROM %s WHERE parent = %s " % (self._fname, sctid)
if maxDepth:
query += " AND depth <= %s " % maxDepth
db = RF2DBConnection()
db.execute(query)
return next(db)
|
To reflect the reduction in fuel prices, HMRC have issued new advisory fuel rates for employees driving employer provided cars. These take effect for all journeys undertaken from 1 January 2009, so employers using the advisory rates should advise affected employees and update any expense forms as soon as possible. The advisory fuel rates should be used for journeys undertaken on or after 1 January 2009.
|
import os
import json
from time import sleep
from subprocess import Popen, PIPE
from datetime import datetime
from sys import platform
if "win" in platform:
exiftool_location="exiftool.exe"
elif "linux" in platform:
exiftool_location="exiftool"
else:
exiftool_location="exiftool.exe"
def getEXIFTags(file_name):
p = Popen([exiftool_location, '-j',file_name], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
try:
output, err = p.communicate(b"")
tags=json.loads(output)[0]
p.terminate()
except:
tags="failed to load"
return tags
def setEXIFTag(file_name, tag='Comment', info='8888888-8888888-8888888-888888888888'):
cmd=exiftool_location+' -' + tag +'="'+ info.replace('"','')+'" "'+file_name+'"'
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
try:
output, err = p.communicate(b"")
result=(err,output,cmd)
p.terminate()
except:
result=""
if os.path.isfile(file_name+'_original'):
os.remove(file_name+'_original')
return result
def setEXIFTags(file_name, tags={"XPComment":"Test complete1!","Comment":"Test Complete2"}):
from subprocess import call
tag_string=""
for key in tags.keys():
tag_string=tag_string+' -' + key +'="'+ str(tags[key]).replace('"','')+'"'
cmd=exiftool_location+tag_string+' "'+file_name+'"'
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
try:
output, err = p.communicate(b"")
result=(err,output,cmd)
p.terminate()
except:
result=""
if os.path.isfile(file_name+'_original'):
os.remove(file_name+'_original')
return result
def increaseRating(file_name):
t=getEXIFTags(file_name)
if "Rating" in t.keys():
if not str(t['Rating'])=="5":
r=str(int(t['Rating'])+1)
return setEXIFTag(file_name,"Rating",r)
else:
return setEXIFTag(file_name,"Rating","5")
def decreaseRating(file_name):
t=getEXIFTags(file_name)
if "Rating" in t.keys():
if not str(t['Rating'])=="0":
r=str(int(t['Rating'])-1)
return setEXIFTag(file_name,"Rating",r)
else:
return setEXIFTag(file_name,"Rating","0")
sleep(1)
if os.path.isfile(fn+'_original'):
os.remove(fn+'_original')
|
Fifth place Heath were looking to put things right after the previous week’s uncharacteristically lacklustre display against Horsham, although Thanet had their eyes on a fifth straight league win in a row, form which had moved them up to seventh place in London South East 2 Division.
What transpired was in fact very much one-way traffic with a confident Heath side scoring tries from all over the park and at times showing a sixth sense to put their supporting players into space.
The try fest was kicked off by a typical sniping run from Man of the Match Brett Menefy. The scrum half, who was a constant threat to Thanet throughout the afternoon, shot down the narrow side to touchdown an individual try for 5-0. On ten minutes the Heath backs cut loose and full back Casey Calder cut back in on the angle for the score which he then converted for 12-0.
Thanet recovered from this set back and started to play some rugby of their own but had little change from the suffocating Heath defence. A penalty chance came but slid wide for Thanet but they were eventually on the board with a more kickable penalty for 12-3 on 28 minutes.
At this stage it looked as though the game could be a tight contest but Heath upped their game even further and put the contest to bed in a 10 minute spell before half time. First Calder went over again for his second try (17 -3) followed by second row Hugo McPherson outsprinting the defence for a try in the corner for 22-3 and the bonus point.
From the restart Heath were back for more, this time quick hands by the backs from deep in their own 22 after the forwards had secured fast ruck ball allowed Jack Lucas to break and put centre Robbie Fotheringham in for the try. Calder converted for a 29-3 lead at the interval.
There was no ease of the tempo in the second half as Heath continued to play champagne rugby wherever the field position. Calder danced through tackles to dive over for his hatrick and promptly handed over kicking duties to Lucas to knock over the conversion for 36-3.
An end to end move involving inter-passing from backs and forwards saw flanker Steve Doku bust a gut to get on the end of a scoring pass and outsprint the defence for 41-3.
Another attack from their own half saw a break from skipper Gareth Fergusson offload to McPherson, who again out-paced the defence but, not fancying a dive for the corner, flicked an inside pass to Jamie Diggle in the in-goal area to score under the posts. This time Fotheringham added the extras for 48-3.
There was no let up for Thanet who kept tackling but had no way of stopping the red and black tide and another incisive attack saw the ball moved to the left wing where James Flicker outstripped the cover to score his debut try for the senior team and a 53-3 scoreline.
Another darting break from the livewire Menefy opened up Thanet once again for McPherson to score his second of the day, converted by Fotheringham for 60-3.
The final try of the game came after more slick passing between backs and forwards opened up space for Max Drage to bounce his way through defenders and score a leisurely try in front of the cameras to record a highest score of the season for Heath and a 65-3 win.
Heath had been threatening to put this sort of score on a team all season and everything finally clicked, although to be hyper-critical they still managed to a leave a couple of tries on the pitch and missed some kicks but for which the final score could have made this close on 100 points. This was Heath’s biggest win this season with the impressive thing about the performance being the interaction between the forwards and backs with all players not only knowing their jobs but executing the basics and playing for each other. An outstanding show of rugby to put on for the large crowd of attentive sponsors, supporters and guests that had gathered at the Clubhouse.
Next week sees Heath’s final game of the campaign away at Charlton Park as they still try to edge their way up to fourth having made sure of fifth place with this latest bonus point win.
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testlib import mox
import unittest
from mapreduce import context
from mapreduce import operation as op
class IncrementTest(unittest.TestCase):
"""Test Increment operation."""
def testIncrement(self):
"""Test applying Increment operation."""
m = mox.Mox()
ctx = context.Context(None, None)
ctx.counters = m.CreateMock(context.Counters)
operation = op.counters.Increment("test", 12)
# Record calls
ctx.counters.increment("test", 12)
m.ReplayAll()
try: # test, verify
operation(ctx)
m.VerifyAll()
finally:
m.UnsetStubs()
if __name__ == '__main__':
unittest.main()
|
This article elaborates the term Logos in two fictitious letters of Candidus, which Marius Victorinus wrote to present Arian points of view concerning the Trinitarian debate in the middle of the 4th century. The article investigates these two short letters and their historical and theological sources to demonstrate Marius Victorinus’ knowledge and understanding of the Arian controversy and the mystery of the Triune God. Although he wrote these letters himself, this research seems to be a particularly important in the interpretation of Marius Victorinus’ theological views and arguments presented in his writings against the Arians, in which he undertakes the most difficult questions concerning the unbegotten and simultaneously begetting God.
|
"""Client Interface for Pygame.
Pygame used to parse keyboard and mouse commands as well as display
images in a window
"""
from PygameClientInterface import PygameClientInterface
class JoystickClientInterface(PygameClientInterface):
"""
__init__ function -> setup XMLRPC as well as pygame
"""
def __init__(self, server_uri, update_image=True, joystick_id=0,
has_control=True):
import pygame
pygame.joystick.init()
self.__joystick = pygame.joystick.Joystick(joystick_id)
self.__joystick.init()
self.__velocity_factor = 1.
self.__has_control = has_control
# Use base class to setup XMLRPC server
PygameClientInterface.__init__(self, server_uri, update_image)
def hasControl(self):
"""Return True if user wants to send control commands.
Tab key and shift-tab toggle this.
"""
# Check if control flag should be toggled.
import pygame
if pygame.key.get_pressed()[pygame.K_TAB]:
self.__has_control = not (
pygame.key.get_mods() & pygame.KMOD_LSHIFT)
if self.__has_control:
print('Take control')
else:
print('Release control')
return self.__has_control
def drive(self):
pitch = -self.__joystick.get_axis(1) * self.__velocity_factor
yaw = -self.__joystick.get_axis(2) * self.__velocity_factor
self._proxy.setVel(0, pitch, 0)
self._proxy.setRot(yaw)
def processClients(self):
exclusive_control = self.hasControl()
if exclusive_control:
self.drive()
self.setWaypoint()
return PygameClientInterface.processClients(
self, exclusive_control=exclusive_control)
def setWaypoint(self):
"""Process waypoint setting functions."""
import pygame
if pygame.key.get_pressed()[pygame.K_y]:
self._proxy.setWayPoint()
|
Do you have an amazing singing voice? If the answer is “yes,” Marvin Sapp is looking for you for his new talent contest.
The Grammy nominated and Stellar Award winning artist has decided to launch this contest “So You Wanna Record with Marvin Sapp,” where the winner will record a digital single with him. This contest will take place on March 27th-March 29th in Grand Rapids, Michigan.
Sapp is the senior pastor of Lighthouse Full Life Center and recently celebrated his 15th anniversary as a pastor there. He also is the Metropolitan Bishop within the Global United Fellowship and is overseeing more than 100 churches in 19 states.
Lastly, the registration for this competition is open now. For more information on this contest visit here and good luck!
|
from dragonfly import (Grammar, AppContext, MappingRule, Dictation,
Key, Text, FocusWindow, IntegerRef, Function)
#---------------------------------------------------------------------------
# Create this module's grammar and the context under which it'll be active.
context = AppContext(executable='java', title='py')
grammar = Grammar('pycharm Python commands', context=context)
#---------------------------------------------------------------------------
# Create a mapping rule which maps things you can say to actions.
#
# Note the relationship between the *mapping* and *extras* keyword
# arguments. The extras is a list of Dragonfly elements which are
# available to be used in the specs of the mapping. In this example
# the Dictation("text")* extra makes it possible to use "<text>"
# within a mapping spec and "%(text)s" within the associated action.
example_rule = MappingRule(
name='pycharm Python commands',
mapping={
'Document comment': Text('"""') + Key('enter'),
'dunder <text>': Text('__%(text)s__'),
'defun': Text('def') + Key('tab'),
'Set trace': Text('import pdb; pdb.set_trace()\n'),
'for <text> in <text2>': Text('for %(text)s in %(text2)s:') + Key('enter'),
'for <text> in X range <n>': Text('for %(text)s in xrange(%(n)d:') + Key('enter')
},
extras=[ # Special elements in the specs of the mapping.
Dictation("text"),
Dictation("text2"),
IntegerRef("n", 1, 10000), # Times to repeat the sequence.
],
)
# Add the action rule to the grammar instance.
grammar.add_rule(example_rule)
#---------------------------------------------------------------------------
# Load the grammar instance and define how to unload it.
grammar.load()
# Unload function which will be called by natlink at unload time.
def unload():
global grammar
if grammar: grammar.unload()
grammar = None
|
The Lawyers Health and Wellness Program is a Free and Confidential Assistance Program available to Members of the Law Society of Manitoba and Manitoba Articling Students through Manitoba Blue Cross.
The Law Society of Manitoba does not have specific rules, regulations or practice directions on computer technology. However, the Code of Professional Conduct applies to e-mails, text messages, data stored "in the cloud" and Facebook as much as it applied to paper files and faxes, or a traditional brick and mortar law office. The principles of client confidentiality and privacy do not change. The only things that change are the mechanisms by which the protected information is stored and transmitted. Various Canadian Law Societies and their insurers have developed materials that provide guidelines and advice on best practice. These materials provide suggestions on how to properly protect electronic data and information, and practice more efficiently through the use of technology. We have provided links to some of the best of that material from other Canadian law societies, as well as American and Commonwealth resources. We also direct you to past articles Law Society of Manitoba articles on technology issues.
|
# -*- coding: utf-8 -*-
u"""
Copyright 2016 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from selenium.webdriver.common.by import By
from toolium.pageelements import *
from toolium.pageobjects.page_object import PageObject
class Row(Group):
def init_page_elements(self):
self.last_name = Text(By.XPATH, './td[1]')
self.first_name = Text(By.XPATH, './td[2]')
self.email = Text(By.XPATH, './td[3]')
self.due = Text(By.XPATH, './td[4]')
self.web = Text(By.XPATH, './td[5]')
self.edit = Link(By.XPATH, './/a[1]')
self.delete = Link(By.XPATH, './/a[2]')
class Table(Group):
def init_page_elements(self):
self.rows = PageElements(By.XPATH, './tbody/tr', page_element_class=Row)
class TablesPageObject(PageObject):
def init_page_elements(self):
self.table1 = Table(By.ID, 'table1')
self.table2 = Table(By.ID, 'table2')
|
Fuze and AppNeta partner on a real-time network monitoring service. Fuze unified communications customers should gain visibility into network performance, user experience and more.
Fuze and AppNeta are partnering on a real-time network monitoring service. The result: Fuze unified communications customers should gain visibility into network performance, and the ability to resolve performance issues before they affect the quality of communications.
The relationship reinforces the growing bond between cloud-based application providers and third-party management platform developers. While Fuze focuses on its core unified communications services, the company can delegate network and end-user performance monitoring to AppNeta’s know-how.
AppNeta’s software will allow Fuze customers to pinpoint the root cause of any service-impacting impairments, whether they originate from the customer’s network or a third-party network that connects the customer to Fuze’s services.
AppNeta’s real-time network monitoring service is available now on the Fuze platform, and can be obtained through Fuze sales and select channel partners.
Both Fuze and AppNeta have been in growth mode. Fuze subscription revenue grew 50 percent in 2017 vs. 2016, the company says. Key customers include Frederique Constant, PTC, and The Rockport Group. Meanwhile, AppNeta attracted private equity funding earlier this year.
|
import sys, os, re
def reformat(f):
of = 'reformatted/'+f
with open(f, 'r') as input, open(of, 'w') as output:
line = input.readline() # Comment 1
output.write(line)
line = input.readline() # Comment 2
output.write(line)
line = input.readline() # Comment 3
output.write(line)
line = input.readline() # Number of elements and elements
output.write(line)
sline = line.strip().split()
while '' in sline:
sline.remove('')
nelements = int(sline[0])
line = input.readline() # nrho drho nr dr cutoff
output.write(line)
sline = line.strip().split()
while '' in sline:
sline.remove('')
nrho = int(sline[0])
drho = float(sline[1])
nr = int(sline[2])
dr = float(sline[3])
cutoff = float(sline[4])
line = 'holder'
while line != []:
line = input.readline().strip().split()
while '' in line:
line.remove('')
try:
[float(x) for x in line]
for num in line:
output.write(num+'\n')
except:
output.write(' '.join(line) + '\n')
def main():
f = sys.argv[1]
if not os.path.exists(os.path.split(os.path.abspath(f))[0] + '/reformatted'):
os.makedirs(os.path.split(os.path.abspath(f))[0] + '/reformatted')
reformat(f)
if __name__ == '__main__':
main()
|
#OneWeek100People2018: Day Four: Selfie Series!
Day Four – and that’s my 100 people for #OneWeek100People2018.
This first one isn’t a great likeness. But it’s a nice Direct Watercolor. Bold shapes, wet-inside, dry on the edges – the shape IS the drawing, right?
#OneWeek100People2018 is a great time to try out a selfie series. If you can take an evening and bang out five (or more) in a row, perhaps you’ll see the benefits of repetition. If you can sketch someone (or something) more than once, you’ll start to memorize the features. Each one gets a bit more on-target.
To be honest, I’m not the best portrait artist. It’s an artform where accuracy counts, and I’m too impatient for that :) I hedged my bets sketching Dave Allen the other day, so I committed to doing these straight into the paper without a sketch underneath. When you know you get more than one shot, it’s less stressful.
BTW, Note the use of the background tone to draw the lit right side on that third head. Negative painting!
The first one is flattering via simplification, but the last one is a little bit more accurate I think. It’s still El-Greco-stretched. That’s like a visual tic of mine.
Anyway – good exercise – and I think if I kept going – like, if a person did 100 of JUST selfies (maybe next year?) – I think you’d really make some painting breakthroughs. Maybe tomorrow I might keep going?
We’ll see. I’m bored with my own face. But that’s a good reason to keep at it. Maybe it could force out some new brushwork or more daring color choices.
Ok – that’s it for now – how are you doing with your #OneWeek100People? Post your progress in the comments! It’s getting down to the wire!
← Are you checking out other people’s #OneWeek100People2018?
These are absolutely wonderful! Thank you for all the comments on the last portrait above giving us a better understanding. Years of practice training your eye to see and hand to follow strengthened your ability. Practice does make perfect! You inspire me. Thank you.
I love to hear what other artists use for skin tones. Never thought to use buff with a red before. Must try. Nice work. I do portraits and you don’t give yourself enough credit bc you caught the likeness. They are never perfect unless projected from a photo!
I keep your pamphlet, “Tea, Milk, Honey,” by my computer and will be doing water color when in Wisconsin for a month. I love the way you used your technique for a selfie!
I know the answer already: practice but I wonder if within your blog, classes and other materials you could pin point a way to find the shadow shapes. I have a problem I only seem to find the very light or very dark, so I mess up my paintings because either use things really watery light or I go and dump a big blob of black or grey. It works super great using the pentel brush pen but not so much with watercolor. With watercolor everything is either a highlight or a dark dark.. :( my eyes refuse to pick up mid-tone shadow shapes.. Anyway, I’m enjoying your “people” sketching so much! Thank you for sharing.
You are lucky to have such an interesting and handsome model for your work! Keep him around.
I’m 22 away. May have to do away with touching up and adding color to a few of them and post as is. The last one I did was from a black and white photo with 17 aviation sailors including myself about 6 months after joining the US Navy in 1980 which has bought me to 22 left.
22 – that seems temptingly close! If you pull an all nighter to finish – it’ll be great to say later you made it under the wire!
These are terrific. I have done self portraits years ago for art school but not for many-years. Now I will try with watercolor. Funny–bored with your own face. Doesn’t bore me. Love these portraits!
Not ignoring you guys- here’s the thing- Liz and I, who do this together, really need it to be 5 days. It’s a big chunk of time so we need to keep it under control. You can do the work the weekend before – ITS OK! – it’s probably a good idea!! Just know the big push to post and discuss will be M-F. It’s ok to bend the time to suit you :) No sketching police coming around to check up ;) You do you grrl!
Love these and the comments on technique. Are you looking in a mirror as you paint these?
Would love to know how you managed to do this self-portrait. Did you look in the mirror or take a series of photos or….?
I got up to 105 today because I went to life drawing (it almost felt like cheating — all I had to do was draw, not go out looking for people ;-) )! It was great fun all week, and I REALLY enjoyed tiny brush pen sketches to capture gestures. As for 100 selfies. . . I did that about 5 years ago when I was just starting to sketch as an exercise in self-torture, I guess. Never again. ;-) Thanks for promoting #oneweek100people each year — it’s great practice, and I love seeing all those sketches online!
Haha – “exercise in self torture” haha!
You’re a great artist — and teacher — Marc. I’ve taken all your classes on Craftsy and highly recommend them. I’ve never considered a selfie for #oneweek100people2018. I’m so far behind this year (crazy time at work this week) I’ll have to extend it through the weekend just to come close, but I am going to hit 100.
Those bike sketches are just beautiful, Man!
You have a rare gift in being both a talented artist and teacher. I’ve learned a lot from you (in fact you got me into water color, which I was terrified of!) thanks for sharing your artistic insights and talents.
Day 25 : #30x30DirectWatercolor2018; FINISHED!!!
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
register_notification_parameters("isms.pl",
Dictionary(
optional_keys = ["splitmax", "timeout", "debug"],
elements = [
( "ismsserver",
TextAscii(
title = _("isms-Server"),
help = _("IP or Hostname of the isms Server")
),
),
( "user",
TextAscii(
title = _("Username"),
help = _("Username used to connect to the isms Server")
),
),
( "password",
TextAscii(
title = _("Password"),
help = _("Password used to connect to the isms Server"),
default_value = ""
),
),
( "host_message",
TextAreaUnicode(
title = _("Message for notifications regarding a host"),
help = _("Here you are allowed to use all macros that are defined in the "
"notification context."),
rows = 9,
cols = 58,
monospaced = True,
default_value = """
$NOTIFICATIONTYPE$ $HOSTNAME$> is $HOSTSTATE$ /$SHORTDATETIME$/ $HOSTOUTPUT$
""",
),
),
( "service_message",
TextAreaUnicode(
title = _("Message for notifications regarding a service"),
help = _("Here you are allowed to use all macros that are defined in the "
"notification context."),
rows = 9,
cols = 58,
monospaced = True,
default_value = """
Nagios Alert Type: $NOTIFICATIONTYPE$
Host: $HOSTNAME$
Service: $SERVICEDESC$
Info: $SERVICEOUTPUT$
""",
),
),
( "timeout",
Integer(
title = _("Timeout"),
help = _("Timeout in seconds"),
default_value = 10
),
),
( "splitmax",
Integer(
title = _("Max. Messages to send for one Notification."),
help = _("Split message into 160 character pieces up to X msgs, 0 means no limitation."),
default_value = 1
),
),
( "debug",
FixedValue(
True,
title = _("debug"),
totext = _("debug messages are printed to ~/var/log/notify.log"),
)
),
])
)
|
From the longer Wikipedia page , which also contains a family tree.
Jasper Tudor, 1st Duke of Bedford, 1st Earl of Pembroke, KG (Welsh: Siasbar Tudur) (c. 1431 – 21/26 December 1495) was the uncle of King Henry VII of England and the architect of his successful conquest of England and Wales in 1485. He was from the noble Tudor family of Penmynydd, North Wales.
Jasper Tudor bore the arms of the kingdom, with the addition of a bordure azure with martlets or (that is, a blue border featuring golden martlets).
His elder brother Edmond was born at Much Hadham Palace in Hertfordshire around 1430. Jasper the second son was born at the Bishop of Ely’s manor at Hatfield in Hertfordshire around 1431. There seemed to be a third son, Jasper's younger brother referred to as either Edward, Thomas or most likely Owen Tudor. Owen was born at Westminster Abbey in 1432, when the Dowager Queen was visiting her son Henry VI, her water broke prematurely and she was forced to seek the help of the monks at Westminster Abbey. Owen was taken from her and raised by the monks and according to his nephew Henry VII's personal historian Polydore Vergil the child was raised as a monk by the name Edward Bridgewater where he lived until his death in 1502. There is mention of a daughter who became a nun by Vergil but nothing is known of her. Jasper's mother's last child would be born in 1437 mere days before Catherine's own untimely death.
In 1436 when Jasper was about five years of age his mother Catherine of Valois once again was expecting another child, however she realised that she was dying from an illness, probably cancer, and sought the help of Bermondsey Abbey to be nursed by the sisters there. By 1 January she had written a will and had given birth to a short lived daughter, possibly named Margaret. On 3 January she died. After her death her husband Owen was arrested. It seems likely that while Catherine had been alive, the regency of Henry VI were reluctant to arrest Owen while the Queen could still protect him. The regents had made it illegal for anyone to marry the widowed queen without their permission, and since Owen was below her in rank, there had been no hope permission would be granted. Owen was sent to Newgate prison. Owen's children Edmund and Jasper, and possibly their unknown sister, were given to Catherine de la Pole who was a nun at Barking Abbey in Essex. She was the sister of William de la Pole, 1st Duke of Suffolk, a great favourite of Henry VI. Catherine de la Pole was to provide Owen Tudor's children with food, clothing, and lodging, and both boys were allowed servants to wait upon them as the King’s half-brothers.
Owen Tudor was released from prison, most likely thanks to his stepson Henry VI who, after providing for his stepfather, also provided for his two half-brothers who had become very dear to him. It is not clear whether Henry VI had known the existence of his half-brothers until his mother told him while she was dying in Bermondsey Abbey. After her death, Henry would take care of them and eventually raise them to the peerage. In turn they gave him unwavering loyalty and fought and promoted his and his Lancastrian family’s interests to the best of their ability. Sometime after March 1442, the young Jasper and his elder brother were brought from Barking Abbey to live at court. Henry arranged for the best priest to educate them not just in their academic studies, but on how to live a moral life. Most likely they also received military training, as when they grew up they were given military positions.
Although there was uncertainty as to whether Jasper and his two (or three) siblings were legitimate, their parents' probably secret marriage not being recognised by the authorities, he enjoyed all the privileges appropriate to his birth, including being invested as a Knight of the Garter. But on the accession of the Yorkist King Edward IV in 1461 he was subject to an attainder for supporting his Lancastrian half-brother, the deposed king Henry, to whom Jasper was a tower of strength. He strove to place his half-nephew Prince Edward of Lancaster on the throne and provided absolute loyalty to his royal half-brother and Margaret of Anjou, his half-brother's wife. Jasper would also help his other sister-in-law Lady Margaret Beaufort, Countess of Richmond and Derby to enable her son Henry Tudor win the throne in 1485 as King Henry VII, father of King Henry the VIII.
Jasper was an adventurer whose military expertise, some of it gained in the early stages of the Wars of the Roses, was considerable, notwithstanding that the only major battle he had taken part in was Mortimer's Cross in February 1461, where he lost the battle to the future Edward IV, and his father Owen to the executioner's axe. He remained in touch with Margaret of Anjou, Queen of Henry VI, as she struggled to regain her son's inheritance, and he held Denbigh Castle for the House of Lancaster.
Jasper also brought up his nephew, Henry Tudor, whose father had died before his birth, until 1461 when he lost Pembroke Castle to William Herbert. After being welcomed by Louis XI of France the following year, he returned to North Wales in 1468 only to be defeated by Herbert.
He briefly regained the earldom of Pembroke a couple of years later but following the return of the Yorkist king Edward IV from temporary exile in 1471, he fled again onto the continent. Escaping from Tenby with Henry, storms in the English Channel forced them to land at Le Conquet in Brittany where they sought refuge from Duke Francis II. Although Edward placed diplomatic pressure on the Duke of Brittany, the uncle and nephew remained safe from the clutches of the English king who died in April 1483.
It was thanks to Jasper that Henry acquired the tactical awareness that made it possible for him to defeat the far more experienced Richard III at the Battle of Bosworth Field in 1485. On Henry's subsequent accession to the throne as Henry VII, Jasper was restored to all his former titles, including Knight of the Garter, and made Duke of Bedford. In 1488, he took possession of Cardiff Castle.
Catherine was the daughter of Richard Woodville, 1st Earl Rivers and Jacquetta of Luxembourg, and thus was sister to (among others) Edward IV's queen Elizabeth Woodville, Anthony Woodville, 2nd Earl Rivers and Richard Woodville, 3rd Earl Rivers. She was also the widow of Henry Stafford, 2nd Duke of Buckingham.
They may have had one stillborn son c. 1490. Catherine survived Jasper and later married Sir Richard Wingfield of Kimbolton Castle.
Helen Tudor (by Mevanvy or Myvanwy ferch N (b. Wales, d. bef. 1485), born c. 1459), wife of a skinner William Gardiner, of London, sometimes spelled William Gardynyr (born c. 1450), having by him: Thomas Gardiner, Prior of Tynemouth and four daughters, Philippa, Margaret, Beatrice and Anne. After her husband's death she married William Sybson. 19th century genealogists mistakenly conflated Thomas Gardiner with Stephen Gardiner, bishop of Winchester.
Joan Tudor, wife of William ap Yevan (son of Yevan Williams and Margaret Kemoys), and reported mother of Morgan ap William (or Williams) (born Llanishen, Glamorganshire, Wales, 1479), later married at Putney Church, Norwell, Nottinghamshire, in 1499 to Catherine or Katherine Cromwell, born Putney, London, c. 1483, an older sister of Tudor statesman Thomas Cromwell, 1st Earl of Essex. They were fourth-generation ancestors to Oliver Cromwell, meaning Oliver Cromwell was a descendant of the Welsh Royal Family, via Joan Tudor's grandfather, Owen Tudor.
He died on 21 or 26 December 1495, and was buried at Keynsham Abbey in Somerset which Lady Agnes Cheyne, the incumbent of Chenies Manor House, bequeathed to him in 1494.
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1TCPSocketAction(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, port=None):
"""
V1TCPSocketAction - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'port': 'IntstrIntOrString'
}
self.attribute_map = {
'port': 'port'
}
self._port = port
@property
def port(self):
"""
Gets the port of this V1TCPSocketAction.
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:return: The port of this V1TCPSocketAction.
:rtype: IntstrIntOrString
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port of this V1TCPSocketAction.
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param port: The port of this V1TCPSocketAction.
:type: IntstrIntOrString
"""
if port is None:
raise ValueError("Invalid value for `port`, must not be `None`")
self._port = port
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
At Rupp Seeds, our top priority continues to be providing seed that’s ideal for your fields. And your bottom line.
We test, grow and select the highest yielding hybrids for the tristate area. By partnering with multiple sources, we’re focused solely on providing you with the exact traits you need – helping you pull your yields to new levels.
Because we know you value a partner who shares your goals of achieving the healthiest plants and highest yields your farm can possibly produce, we invite you to plant Rupp and See What Good Yields®.
|
# Copyright (c) 2008-2011 by Enthought, Inc.
# Copyright (c) 2013-2015 Continuum Analytics, Inc.
# All rights reserved.
from __future__ import absolute_import
import logging
import sys
import json
from os.path import abspath, basename, exists, join
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
if sys.platform.startswith('linux'):
from .linux import Menu, ShortCut
elif sys.platform == 'darwin':
from .darwin import Menu, ShortCut
elif sys.platform == 'win32':
from .win32 import Menu, ShortCut
from .win_elevate import isUserAdmin, runAsAdmin
def _install(path, remove=False, prefix=sys.prefix, mode=None):
if abspath(prefix) == abspath(sys.prefix):
env_name = None
else:
env_name = basename(prefix)
data = json.load(open(path))
try:
menu_name = data['menu_name']
except KeyError:
menu_name = 'Python-%d.%d' % sys.version_info[:2]
shortcuts = data['menu_items']
m = Menu(menu_name, prefix=prefix, env_name=env_name, mode=mode)
if remove:
for sc in shortcuts:
ShortCut(m, sc).remove()
m.remove()
else:
m.create()
for sc in shortcuts:
ShortCut(m, sc).create()
def install(path, remove=False, prefix=sys.prefix, recursing=False):
"""
install Menu and shortcuts
"""
# this sys.prefix is intentional. We want to reflect the state of the root installation.
if sys.platform == 'win32' and not exists(join(sys.prefix, '.nonadmin')):
if isUserAdmin():
_install(path, remove, prefix, mode='system')
else:
from pywintypes import error
try:
if not recursing:
retcode = runAsAdmin([join(sys.prefix, 'python'), '-c',
"import menuinst; menuinst.install(%r, %r, %r, %r)" % (
path, bool(remove), prefix, True)])
else:
retcode = 1
except error:
retcode = 1
if retcode != 0:
logging.warn("Insufficient permissions to write menu folder. "
"Falling back to user location")
_install(path, remove, prefix, mode='user')
else:
_install(path, remove, prefix, mode='user')
|
Title Loans Newport Beach Can Help!
Get Cash Today with Title Loans Newport Beach!
Title Loans Newport Beach Can Help You!
Are big banks and financial institutions turning you away and denying you small loans to meet your urgent emergency? Are you too embarrassed to ask your friends and family members to help you ride out your financial crisis? Don’t fear, there’s no need to put yourself in humiliating situations where you get turned down over and over again. If you’re in need of an immediate cash source, look to Title Loans Newport Beach! We’ll help you secure the loan you need to take care of your emergency situation!
What Makes Title Loans Newport Beach Outstanding?
No empty promises! We can approve your loan in just 15 minutes! Title Loans Newport Beach likes to keep our application, assessment and processing simple and quick so you can be on your way to getting your cash as quickly as possible.
We don’t harass our clients with aggressive marketing tactics! We believe in absolute transparency and in upholding ethical practices! Title Loans Newport Beach strictly abides by California state laws concerning car title loans both in letter and in spirit. Our loan contracts are crystal clear and are not loaded with injurious legal clauses and hidden charges! What you see is what you get!
Title Loans Newport Beach offers the best short term loans in California! No one can beat our low interest rates and excellent service!
Don’t let the banks drag you down with their complicated applications and wait time! Let Title Loans Newport Beach handle all the work! We’ll make sure you get your loan in the fastest way possible! Before you know it, you’ll be walking out the door with the money you need! Call us today at (949) 258-9068 or fill out your application online!
|
#!/usr/bin/env python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2010, Even Rouault <even dot rouault at mines dash paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Test async reader.
Rewrite of
http://trac.osgeo.org/gdal/browser/trunk/autotest/gcore/asyncreader.py
"""
import contextlib
import unittest
from osgeo import gdal
import unittest
from autotest2.gcore import gcore_util
from autotest2.gdrivers import gdrivers_util
@contextlib.contextmanager
def AsyncReader(src, xoff, yoff, xsize, ysize, buf=None, buf_xsize=None,
buf_ysize=None, buf_type=None, band_list=None, options=None):
options = options or []
asyncreader = src.BeginAsyncReader(xoff, yoff, xsize, ysize, buf, buf_xsize,
buf_ysize, buf_type, band_list, options)
yield asyncreader
src.EndAsyncReader(asyncreader)
class AsyncReaderTest(unittest.TestCase):
def testAsyncReader(self):
filepath = gcore_util.GetTestFilePath('rgbsmall.tif')
src = gdal.Open(filepath)
x_size = src.RasterXSize
y_size = src.RasterYSize
bands = src.RasterCount
asyncreader = src.BeginAsyncReader(0, 0, src.RasterXSize, src.RasterYSize)
buf = asyncreader.GetBuffer()
self.assertEqual(asyncreader.GetNextUpdatedRegion(0),
[gdal.GARIO_COMPLETE, 0, 0, x_size, y_size])
src.EndAsyncReader(asyncreader)
expected = [src.GetRasterBand(i).Checksum() for i in range(1, bands + 1)]
asyncreader = None
src = None
drv = gdal.GetDriverByName(gdrivers_util.GTIFF_DRIVER)
dst = drv.Create('/vsimem/asyncresult.tif', x_size, y_size, bands)
dst.WriteRaster(0, 0, x_size, y_size, buf)
checksum = [dst.GetRasterBand(i).Checksum() for i in range(1, bands + 1)]
dst = None
gdal.Unlink('/vsimem/asyncresult.tif')
self.assertEqual(checksum, expected)
def testAsyncReaderContextManager(self):
filepath = gcore_util.GetTestFilePath('rgbsmall.tif')
src = gdal.Open(filepath)
x_size = src.RasterXSize
y_size = src.RasterYSize
with AsyncReader(src, 0, 0, x_size, y_size) as asyncreader:
self.assertEqual(asyncreader.GetNextUpdatedRegion(0),
[gdal.GARIO_COMPLETE, 0, 0, x_size, y_size])
if __name__ == '__main__':
unittest.main()
|
I’m a city gal. I was born in one of the biggest cities in New Jersey, which is only a train stop away from my favorite Metropolis, New York City. I love the high buildings, the corner stores, and the fact that there is always something open and ready for you to shop. I’m also a fan of city fashion. But, I do have one item i’ve always wanted: Cowboy Boots.
They are just so intricate, and even though they are meant for hard labor, they look so chic. The detailing on a Cowboy boot can be a piece of artwork, and it can also tell so much about the wearer. If they are stoic, full of humor, or a big fan of color. I’ve always wanted a pair that reflected me.
When Country Outfitter allowed me to pick a pair of cowboy boots of my very own, I was giddy with delight. There were so many to choose from; they’re selection is vast and it literally took me a few hours to narrow down the pair just for me. But I found them. Her name is Sorrel.
Isn’t she lovely? The Sorrel Apache Boot just screams “Amiyrah!” The classic chocolate color with a cool square toe, and the wonderfully colorful stitching on the side fits my personality to a tee. It was me. In a boot. As you can see by the little hand, The Duchess loved them too. I have a feeling she might inherit these later in life.
I even made a video featuring the boots in 2 chic, thrifty, city girl styles.
Would you like to win a pair of your own? You would? Great! Country Outfitter is excited to offer one of my readers a $150 gift card to their site so you can find the boots of your dreams too.
Click Here to Enter. This is your only mandatory entry to win. Put in your email address and that’s it! Want an extra entry? Comment down below and tell me the name of outfit you liked best from the video.
Just a note: Country Outfitter will occasionally send you marketing messages. You are free to opt out at any time. You must be a US resident 18 years or older to enter. Giveaway ends on December 14th and the winner will be contacted via email from Country Outfitter.
Update: The contest is now closed. Thanks for entering!
Disclosure: Country Outfitter, a retailer of Justin cowboy boots, sent me these Justin Sorrel Apache Boots to review.
« Win A Wii U just in time for the Holidays!
|
#!/usr/bin/python3
""" Test script for lru_timestamp function.
usage: lru.py [-h] [-r REFRESH] [-s SLEEP]
optional arguments:
-h, --help show this help message and exit
-r REFRESH, --refresh REFRESH
refresh interval (default 60 min)
-s SLEEP, --sleep SLEEP
sleep interval (default 10 min)
"""
import argparse
import datetime
import functools
import random
import time
def lru_timestamp(refresh_interval=60):
""" Return a timestamp string for @lru_cache decorated functions.
The returned timestamp is used as the value of an extra parameter
to @lru_cache decorated functions, allowing for more control over
how often cache entries are refreshed. The lru_timestamp function
should be called with the same refresh_interval value for a given
@lru_cache decorated function. The returned timestamp is for the
benefit of the @lru_cache decorator and is normally not used by
the decorated function.
Positional arguments:
refresh_interval -- in minutes (default 60), values less than 1
are coerced to 1, values more than 1440 are
coerced to 1440
"""
if not isinstance(refresh_interval, int):
raise TypeError('refresh_interval must be an int from 1-1440')
dt = datetime.datetime.now()
if refresh_interval > 60:
refresh_interval = min(refresh_interval, 60*24)
fmt = '%Y%m%d'
minutes = dt.hour * 60
else:
refresh_interval = max(1, refresh_interval)
fmt = '%Y%m%d%H'
minutes = dt.minute
ts = dt.strftime(fmt)
age = minutes // refresh_interval
return '{0}:{1:d}'.format(ts, age)
@functools.lru_cache()
def calulate(x, y, timestamp):
""" Return random int for testing lru_timestamp function."""
print('performing calculation (not from cache), timestamp:', timestamp)
return random.randint(x, y)
def init():
""" Return parsed command line args."""
random.seed()
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument('-r', '--refresh', type=int, dest='refresh',
default=60, help='refresh interval (default 60 min)')
parser.add_argument('-s', '--sleep', type=int, dest='sleep', default=10,
help='sleep interval (default 10 min)')
return parser.parse_args()
def main():
""" Script main."""
args = init()
print('refresh interval (min):', args.refresh)
print('sleep interval (min):', args.sleep)
print()
refresh = args.refresh
doze = args.sleep * 60
#num = calulate(1, 1000, lru_timestamp('junk'))
#num = calulate(1, 1000, lru_timestamp(1.22))
#num = calulate(1, 1000, lru_timestamp(-1))
#num = calulate(1, 1000, lru_timestamp(2000))
while True:
num = calulate(1, 1000, lru_timestamp(refresh))
print('calculation returned', num)
time.sleep(doze)
if __name__ == '__main__':
main()
|
"The pianist seems to have assimilated Keith Jarrett and Chick Corea, among others artists . His left hand does a wonderful work with ostinatos sometimes giving the impression of a double bass. Beautiful tone, pretty attacks, personal phrasing" Serge Baudot . Jazz Hot.France.
|
import yaml
import logging
logger = logging.getLogger(__name__)
class Configs:
server = None
recipes = {}
DB = None
plugins = None
@classmethod
def init_server_configs(cls, server_configs):
with open(server_configs) as s_c:
cls.server = yaml.load(s_c.read())
@classmethod
def init_layer_recipes(cls, recipe_configs):
recipe_name = recipe_configs.split('/')[-1]
if recipe_name[-4:] == '.yml':
recipe_name = recipe_name[:-4]
elif recipe_name[-5:] == '.yaml':
recipe_name = recipe_name[:-5]
else:
raise ValueError('File in layer recipes folder does not have a YAML extension: {0}'.format(recipe_configs))
with open(recipe_configs) as r_c:
load_recipe = yaml.load(r_c.read())
cls.recipes[recipe_name] = Recipe(load_recipe)
# add the recipe name based on the file name
# this is needed by the tilejson query
cls.recipes[recipe_name].name = recipe_name
logger.info('Adding layer: {0}'.format(recipe_name))
'''
Plugins.load()
Plugins.hook('before_load', config=Configs)
def load_recipe(data):
name = data.get('name', 'default')
if name in RECIPES:
raise ValueError('Recipe with name {} already exist'.format(name))
data['name'] = name
RECIPES[name] = Recipe(data)
if len(RECIPES) == 1 and name != 'default':
RECIPES['default'] = RECIPES[data['name']]
for recipe in Configs.layers:
with Path(recipe).open() as f:
load_recipe(yaml.load(f.read()))
Plugins.hook('load', config=config, recipes=RECIPES)
'''
# the following model structures for recipes / layers / queries allows searching up the chain
# for attributes. If not found in the root recipes level then it will check the server configs.
class Recipe(dict):
def __init__(self, data):
super().__init__(data)
self.load_layers(data['layers'])
def load_layers(self, layers):
self.layers = {}
for layer in layers:
self.layers[layer['name']] = Layer(self, layer)
def __getattr__(self, attr):
return self.get(attr, Configs.server.get(attr, None))
class Layer(dict):
def __init__(self, recipe, layer_data):
self.recipe = recipe
super().__init__(layer_data)
self.load_queries(layer_data['queries'])
def load_queries(self, queries):
self.queries = []
for query in queries:
self.queries.append(Query(self, query))
def __getattr__(self, attr):
return self.get(attr, getattr(self.recipe, attr))
@property
def id(self):
return '{0}:{1}'.format(self.recipe.name, self.name)
@property
def description(self):
return self.get('description', 'no description provided')
class Query(dict):
def __init__(self, layer, data):
self.layer = layer
super().__init__(data)
def __getattr__(self, attr):
return self.get(attr, getattr(self.layer, attr))
|
The book description states that this is book “entertaining and engaging style that opens the subject to both scholars and the casual reader of history looking to learn more about the Macedonian king.” Not so very much I’d say. While it is very well written, I would not say that this literate and erudite work is for the casual reader of history. It is pretty dense, not as bad as some I’ve read certainly, but also not easily accessible.
The research that went into this book is both exhausting and deep. The author examines the various theories, stories and legends that make up the “history” of Alexander the Great. While he died at only thirty-three years of age, his reputation and history generated hundreds of thousands of pages of “research.” (I put that in quotes because some of the research was not exactly scientific or accurate.) And the cover of the book is absolutely gorgeous.
I want to thank Netgalley and Troubador Publishing Limited/Matador for forwarding to me a copy of this fine work of scholarship to me to read.
Really interesting book. Scholarly, but not written exclusively for scholars.
I thoroughly enjoyed it. A good resource for both historians and the average layperson, with an interest for history.
|
#!/usr/bin/env python3
import log
import sys
import subprocess
from importlib import import_module
from setter import *
from os.path import dirname, splitext
if sys.platform == 'win32':
winreg = import_module('winreg')
Image = import_module('PIL.Image')
win32gui = import_module('win32.win32gui')
def convert_photo_to_bmp(inpath, outpath):
if splitext(inpath)[1] == '.bmp':
return
Image.open(inpath).save(outpath)
SPI_SETDESKWALLPAPER = 0x0014
class Win32WallpaperSetter(WallpaperSetter):
KEY = winreg.HKEY_CURRENT_USER
SUB_KEY = 'Control Panel\\Desktop'
VALUE_NAME = 'Wallpaper'
BACKUP = True
def _read_value(self, k, valuename = None):
if not valuename: valuename = self.VALUE_NAME
try:
value = winreg.QueryValueEx(k, valuename)
if value[1] != winreg.REG_SZ:
self._logger.fatal('cannot handle non-REG_SZ value %s', value)
return None
except:
self._logger.warn('error encountered during reading value %s', valuename, exc_info=1)
return None
self._logger.debug('read {} from {} get {}'.format(valuename, k, value))
return value
def _set_value(self, k, v, valuename = None):
if not valuename: valuename = self.VALUE_NAME
self._logger.debug('set %s\\%s\\%s to %s', self.KEY, self.SUB_KEY, valuename, v)
try:
winreg.SetValueEx(k, valuename, 0, winreg.REG_SZ, v)
except:
self._logger.error('error encountered during setting value %s', valuename, exc_info=1)
return False
self._logger.debug('set {} of {} to {} succeeds'.format(valuename, k, v))
return True
def set(self, path, args):
k = None
inpath = path.replace('/', '\\')
path = "{}\\wallpaper.bmp".format(dirname(inpath))
# windows only supports BMP, convert before setting
try:
convert_photo_to_bmp(inpath, path)
except Exception as ex:
self._logger.exception(ex)
return False
try:
k = winreg.OpenKey(self.KEY, self.SUB_KEY, 0, winreg.KEY_READ|winreg.KEY_SET_VALUE)
lastvalue = self._read_value(k)
if lastvalue and self.BACKUP:
ret = self._set_value(k, lastvalue[0], self.VALUE_NAME+'Backup')
self._set_value(k, '0', 'TileWallpaper')
self._set_value(k, '10', 'WallpaperStyle')
win32gui.SystemParametersInfo(SPI_SETDESKWALLPAPER, path, 1+2)
except Exception as ex:
ret = False
self._logger.exception(ex)
finally:
if k: k.Close()
return ret
register('win', Win32WallpaperSetter)
if __name__ == '__main__':
log.setDebugLevel(log.DEBUG)
setter = Win32WallpaperSetter()
setter.set(r'w.jpg', None)
|
In which year was IFOAM - Organics International founded?
The Entwicklungsbüro für Ökologischen Landbau Lindenberg's activities include consulting, teaching und training, research and agriculture.
Valoritalia societa per la certificazione delle qualita e delle produzioni vitivinicole Italiane S.r.l.
|
#!/usr/bin/python
import os
import tempfile
import unittest
import sys
from avocado.utils import process
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(basedir, 'virttest')):
sys.path.append(basedir)
from virttest.unittest_utils import mock
from virttest import utils_misc
from virttest import cartesian_config
from virttest import build_helper
class TestUtilsMisc(unittest.TestCase):
def test_cpu_vendor_intel(self):
cpu_info = """processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 58
model name : Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
"""
vendor = utils_misc.get_cpu_vendor(cpu_info, False)
self.assertEqual(vendor, 'GenuineIntel')
def test_cpu_vendor_amd(self):
cpu_info = """processor : 3
vendor_id : AuthenticAMD
cpu family : 21
model : 16
model name : AMD A10-5800K APU with Radeon(tm) HD Graphics
"""
vendor = utils_misc.get_cpu_vendor(cpu_info, False)
self.assertEqual(vendor, 'AuthenticAMD')
def test_vendor_unknown(self):
cpu_info = "this is an unknown cpu"
vendor = utils_misc.get_cpu_vendor(cpu_info, False)
self.assertEqual(vendor, 'unknown')
def test_get_archive_tarball_name(self):
tarball_name = utils_misc.get_archive_tarball_name('/tmp',
'tmp-archive',
'bz2')
self.assertEqual(tarball_name, 'tmp-archive.tar.bz2')
def test_get_archive_tarball_name_absolute(self):
tarball_name = utils_misc.get_archive_tarball_name('/tmp',
'/var/tmp/tmp',
'bz2')
self.assertEqual(tarball_name, '/var/tmp/tmp.tar.bz2')
def test_get_archive_tarball_name_from_dir(self):
tarball_name = utils_misc.get_archive_tarball_name('/tmp',
None,
'bz2')
self.assertEqual(tarball_name, 'tmp.tar.bz2')
def test_git_repo_param_helper(self):
config = """git_repo_foo_uri = git://git.foo.org/foo.git
git_repo_foo_branch = next
git_repo_foo_lbranch = local
git_repo_foo_commit = bc732ad8b2ed8be52160b893735417b43a1e91a8
"""
config_parser = cartesian_config.Parser()
config_parser.parse_string(config)
params = config_parser.get_dicts().next()
h = build_helper.GitRepoParamHelper(params, 'foo', '/tmp/foo')
self.assertEqual(h.name, 'foo')
self.assertEqual(h.branch, 'next')
self.assertEqual(h.lbranch, 'local')
self.assertEqual(h.commit, 'bc732ad8b2ed8be52160b893735417b43a1e91a8')
def test_normalize_data_size(self):
n1 = utils_misc.normalize_data_size("12M")
n2 = utils_misc.normalize_data_size("1024M", "G")
n3 = utils_misc.normalize_data_size("1024M", "T")
n4 = utils_misc.normalize_data_size("1000M", "G", 1000)
n5 = utils_misc.normalize_data_size("1T", "G", 1000)
n6 = utils_misc.normalize_data_size("1T", "M")
self.assertEqual(n1, "12.0")
self.assertEqual(n2, "1.0")
self.assertEqual(n3, "0.0009765625")
self.assertEqual(n4, "1.0")
self.assertEqual(n5, "1000.0")
self.assertEqual(n6, "1048576.0")
class FakeCmd(object):
def __init__(self, cmd):
self.fake_cmds = [
{"cmd": "numactl --hardware",
"stdout": """
available: 1 nodes (0)
node 0 cpus: 0 1 2 3 4 5 6 7
node 0 size: 18431 MB
node 0 free: 17186 MB
node distances:
node 0
0: 10
"""},
{"cmd": "ps -eLf | awk '{print $4}'",
"stdout": """
1230
1231
1232
1233
1234
1235
1236
1237
"""},
{"cmd": "taskset -cp 0 1230", "stdout": ""},
{"cmd": "taskset -cp 1 1231", "stdout": ""},
{"cmd": "taskset -cp 2 1232", "stdout": ""},
{"cmd": "taskset -cp 3 1233", "stdout": ""},
{"cmd": "taskset -cp 4 1234", "stdout": ""},
{"cmd": "taskset -cp 5 1235", "stdout": ""},
{"cmd": "taskset -cp 6 1236", "stdout": ""},
{"cmd": "taskset -cp 7 1237", "stdout": ""},
]
self.stdout = self.get_stdout(cmd)
def get_stdout(self, cmd):
for fake_cmd in self.fake_cmds:
if fake_cmd['cmd'] == cmd:
return fake_cmd['stdout']
raise ValueError("Could not locate locate '%s' on fake cmd db" % cmd)
def utils_run(cmd, shell=True):
return FakeCmd(cmd)
all_nodes_contents = "0\n"
online_nodes_contents = "0\n"
class TestNumaNode(unittest.TestCase):
def setUp(self):
self.god = mock.mock_god(ut=self)
self.god.stub_with(process, 'run', utils_run)
all_nodes = tempfile.NamedTemporaryFile(delete=False)
all_nodes.write(all_nodes_contents)
all_nodes.close()
online_nodes = tempfile.NamedTemporaryFile(delete=False)
online_nodes.write(online_nodes_contents)
online_nodes.close()
self.all_nodes_path = all_nodes.name
self.online_nodes_path = online_nodes.name
self.numa_node = utils_misc.NumaNode(-1,
self.all_nodes_path,
self.online_nodes_path)
def test_get_node_cpus(self):
self.assertEqual(self.numa_node.get_node_cpus(0), '0 1 2 3 4 5 6 7')
def test_pin_cpu(self):
self.assertEqual(self.numa_node.pin_cpu("1230"), "0")
self.assertEqual(self.numa_node.dict["0"], ["1230"])
self.assertEqual(self.numa_node.pin_cpu("1231"), "1")
self.assertEqual(self.numa_node.dict["1"], ["1231"])
self.assertEqual(self.numa_node.pin_cpu("1232"), "2")
self.assertEqual(self.numa_node.dict["2"], ["1232"])
self.assertEqual(self.numa_node.pin_cpu("1233"), "3")
self.assertEqual(self.numa_node.dict["3"], ["1233"])
self.assertEqual(self.numa_node.pin_cpu("1234"), "4")
self.assertEqual(self.numa_node.dict["4"], ["1234"])
self.assertEqual(self.numa_node.pin_cpu("1235"), "5")
self.assertEqual(self.numa_node.dict["5"], ["1235"])
self.assertEqual(self.numa_node.pin_cpu("1236"), "6")
self.assertEqual(self.numa_node.dict["6"], ["1236"])
self.assertEqual(self.numa_node.pin_cpu("1237"), "7")
self.assertEqual(self.numa_node.dict["7"], ["1237"])
self.assertTrue("free" not in self.numa_node.dict.values())
def test_free_cpu(self):
self.assertEqual(self.numa_node.pin_cpu("1230"), "0")
self.assertEqual(self.numa_node.dict["0"], ["1230"])
self.assertEqual(self.numa_node.pin_cpu("1231"), "1")
self.assertEqual(self.numa_node.dict["1"], ["1231"])
self.numa_node.free_cpu("0")
self.assertEqual(self.numa_node.dict["0"], [])
self.assertEqual(self.numa_node.dict["1"], ["1231"])
def test_bitlist_to_string(self):
string = 'foo'
bitlist = [0, 1, 1, 0, 0, 1, 1, 0, 0, 1,
1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1]
self.assertEqual(utils_misc.string_to_bitlist(string), bitlist)
def test_string_to_bitlist(self):
bitlist = [0, 1, 1, 0, 0, 0, 1, 0, 0, 1,
1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0]
string = 'bar'
self.assertEqual(utils_misc.bitlist_to_string(bitlist), string)
def tearDown(self):
self.god.unstub_all()
os.unlink(self.all_nodes_path)
os.unlink(self.online_nodes_path)
if __name__ == '__main__':
unittest.main()
|
Two years ago I was invited to Texas by Tom Yancey (Texas A&M) to look at some curious wiggly tubular fossils in the Lower Permian (about 280 million years old). They form small reefs a meter or so across and have traditionally been referred to as serpulid worm tubes. We suspected otherwise. After field and lab work, and collaboration with our Estonian colleague Olev Vinn, we determined that they are a new genus and species of microconchid. Our paper describing this taxon has just appeared: Wilson, Vinn and Yancey (2011).
A tangled collection of Helicoconchus elongatus Wilson, Vinn and Yancey 2011.
Helicoconchus elongatus is, as you may suspect from the name, an elongate coiled tube. The walls are impunctate (meaning they have no pores) and have diaphragms (horizontal partitions) with little dimples in their centers. They have two kinds of budding: fission (shown in the top image) and lateral budding (shown below). They grew into thick intertwined disks in shallow marine waters where they lived with snails, clams, echinoids and foraminiferans.
A small lateral bud on the side of a microconchid tube.
An acetate peel showing a longitudinal cross-section of a microconchid tube. The thin diaphragm running vertically in this image shows an inflection for the "dimple".
Microconchids (Ordovician – Jurassic) are an evolutionarily interesting group because they appear to be related to bryozoans and brachiopods (much to everyone’s surprise). This is based on their shell structure and their manner of budding (Zatoń and Vinn, 2011). Helicoconchus elongatus will tell us much about the relationships of microconchids to other groups because of the detail we can see in its budding styles and its marvelous preservation.
Helicoconchus elongatus in the field.
|
from __future__ import print_function
import subprocess as sp
import sys
import stat
import shutil
import re
import os
import sys
import glob
import time
import yaml
from util import wait_for_qsub, get_git_hash
class ExpTestHelper(object):
def __init__(self, exp_name, bin_path=None):
self.exp_name = exp_name
self.res = exp_name.split('deg')[0].split('_')[-1] + 'deg'
self.my_path = os.path.dirname(os.path.realpath(__file__))
self.lab_path = os.path.realpath(os.path.join(self.my_path, '../'))
if not bin_path:
self.bin_path = os.path.join(self.lab_path, 'bin')
else:
self.bin_path = bin_path
self.control_path = os.path.join(self.lab_path, 'control')
self.exp_path = os.path.join(self.control_path, exp_name)
self.payu_config = os.path.join(self.exp_path, 'config.yaml')
self.accessom2_config = os.path.join(self.exp_path, 'accessom2.nml')
self.ocean_config = os.path.join(self.exp_path, 'ocean', 'input.nml')
self.archive = os.path.join(self.lab_path, 'archive', exp_name)
self.output000 = os.path.join(self.archive, 'output000')
self.output001 = os.path.join(self.archive, 'output001')
self.accessom2_out_000 = os.path.join(self.output000, 'access-om2.out')
self.accessom2_out_001 = os.path.join(self.output001, 'access-om2.out')
self.src = os.path.join(self.lab_path, 'src')
self.libaccessom2_src = os.path.join(self.src, 'libaccessom2')
self.mom_src = os.path.join(self.src, 'mom')
self.cice_src = os.path.join(self.src, 'cice5')
self.yatm_exe = None
self.mom_exe = None
self.cice_exe = None
self.input_path = '/short/public/access-om2/input_rc'
self.mom_input = os.path.join(self.input_path, 'mom_' + self.res)
self.cice_input = os.path.join(self.input_path, 'cice_' + self.res)
if not os.path.exists(self.bin_path):
os.mkdir(self.bin_path)
def has_run(self):
"""
See wether this experiment has been run.
"""
return os.path.exists(os.path.join(self.output000, 'access-om2.out'))
def make_paths(self, exp_name, run_num=0):
paths = {}
run_num = str(run_num).zfill(3)
paths['archive_link'] = os.path.join(paths['exp'], 'archive')
paths['output'] = os.path.join(paths['archive'], 'output' + run_num)
paths['restart'] = os.path.join(paths['archive'], 'restart' + run_num)
paths['stdout'] = os.path.join(paths['output'], 'access.out')
paths['stderr'] = os.path.join(paths['output'], 'access.err')
paths['stdout_runtime'] = os.path.join(paths['exp'], 'access.out')
paths['stderr_runtime'] = os.path.join(paths['exp'], 'access.err')
return paths
def print_output(self, files):
for file in files:
if file is not None:
if os.path.exists(file):
with open(file, 'r') as f:
print(f.read())
def get_most_recent_run_num(self, archive_path):
"""
Look in the archive directory to find which build this is.
"""
dirs = glob.glob(archive_path + '/output*')
dirs.sort()
return int(dirs[-1][-3:])
def setup_for_programmatic_run(self, exes):
"""
Various config.yaml settings need to be modified in order to run in the
test environment.
"""
yatm_exe, cice_exe, mom_exe = exes
with open(self.payu_config) as f:
doc = yaml.load(f)
doc['submodels'][0]['exe'] = yatm_exe
doc['submodels'][1]['exe'] = mom_exe
doc['submodels'][2]['exe'] = cice_exe
doc['runlog'] = False
with open(self.payu_config, 'w') as f:
yaml.dump(doc, f)
def do_basic_access_run(self, exp, model='cm'):
paths = self.make_paths(exp)
ret, qso, qse, qsub_files = self.run(paths['exp'], self.lab_path)
if ret != 0:
self.print_output([qso, qse,
paths['stdout_runtime'],
paths['stderr_runtime']])
fstring = 'Run {} failed with code {}.'
print(fstring.format(exp, ret), file=sys.stderr)
assert(ret == 0)
run_num = self.get_most_recent_run_num(paths['archive'])
paths = self.make_paths(exp, run_num)
# Model output should exist.
assert(os.path.exists(paths['output']))
assert(os.path.exists(paths['restart']))
assert(os.path.exists(paths['stdout']))
assert(os.path.exists(paths['stderr']))
with open(paths['stdout'], 'r') as f:
s = f.read()
assert('MOM4: --- completed ---' in s)
if model == 'om':
assert('********** End of MATM **********' in s)
def copy_to_bin(self, src_dir, wildcard, libaccessom2_src=None):
exes = glob.glob(wildcard)
if len(exes) != 1:
print("Error: copy_to_bin can't find one {}".format(wildcard), file=sys.stderr)
return None, 1
exe = exes[0]
ghash = get_git_hash(src_dir)
if libaccessom2_src:
libaccessom2_hash = get_git_hash(libaccessom2_src)
else:
libaccessom2_hash = None
eb = os.path.basename(exe)
if libaccessom2_hash:
new_name = '{}_{}_libaccessom2_{}.{}'.format(eb.split('.')[0], ghash,
libaccessom2_hash, eb.split('.')[1])
else:
new_name = '{}_{}.{}'.format(eb.split('.')[0], ghash,
eb.split('.')[1])
dest = os.path.join(self.bin_path, new_name)
if os.path.exists(dest):
os.remove(dest)
shutil.copy(exe, dest)
shutil.chown(dest, group='ik11')
perms = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR \
| stat.S_IXGRP | stat.S_IXOTH
os.chmod(dest, perms)
return dest, 0
def build_libaccessom2(self, clean=False):
"""
Note: the 'clean' arg does nothing.
"""
r1 = sp.call([os.path.join(self.libaccessom2_src, 'build_on_gadi.sh')])
exename, r2 = self.copy_to_bin(self.libaccessom2_src,
self.libaccessom2_src + '/build/bin/yatm.exe')
return exename, r1 + r2
def build_cice(self, clean=False):
os.environ['ACCESS_OM_DIR'] = os.path.join(self.lab_path)
os.environ['LIBACCESSOM2_ROOT'] = os.path.join(self.libaccessom2_src)
if clean:
r1 = sp.call(['make', '-C', self.cice_src, 'clean'])
r1 = sp.call(['make', '-C', self.cice_src, self.res])
if self.res == '025deg':
exe_res = '1440x1080'
elif self.res == '01deg':
exe_res = '3600x2700'
elif self.res == '1deg':
exe_res = '360x300'
else:
assert False
build_dir_wildcard = self.cice_src + '/build_*_' + exe_res + '_*p/*.exe'
exename, r2 = self.copy_to_bin(self.cice_src, build_dir_wildcard,
self.libaccessom2_src)
return exename, r1 + r2
def build_mom(self, clean=False):
"""
Note: the 'clean' arg does nothing.
"""
os.environ['ACCESS_OM_DIR'] = os.path.join(self.lab_path)
os.environ['LIBACCESSOM2_ROOT'] = os.path.join(self.libaccessom2_src)
mydir = os.getcwd()
os.chdir(os.path.join(self.mom_src, 'exp'))
r1 = sp.call(['./MOM_compile.csh', '--type', 'ACCESS-OM',
'--platform', 'nci', '--repro'])
os.chdir(mydir)
exename, r2 = self.copy_to_bin(self.mom_src,
self.mom_src + '/exec/nci/ACCESS-OM/*.x',
self.libaccessom2_src)
return exename, r1 + r2
def build(self, clean=False):
self.yatm_exe, r1 = self.build_libaccessom2(clean)
if r1 != 0:
print('YATM build failed for exp {}'.format(self.exp_name),
file=sys.stderr)
return r1
self.cice_exe, r2 = self.build_cice(clean)
if r2 != 0:
print('CICE build failed for exp {}'.format(self.exp_name),
file=sys.stderr)
self.mom_exe, r3 = self.build_mom(clean)
if r3 != 0:
print('MOM build failed for exp {}'.format(self.exp_name),
file=sys.stderr)
return [self.yatm_exe, self.cice_exe, self.mom_exe], r1 + r2 + r3
def run(self):
"""
Run the experiment using payu and check output.
Don't do any work if it has already run.
"""
if self.has_run():
return 0, None, None, None
else:
return self.force_run()
def force_qsub_run(self):
"""
Run using qsub
"""
# Change to experiment directory and run.
try:
os.chdir(self.exp_path)
sp.check_output(['payu', 'sweep', '--lab', self.lab_path])
run_id = sp.check_output(['payu', 'run', '--lab', self.lab_path])
run_id = run_id.decode().splitlines()[0]
os.chdir(self.my_path)
except sp.CalledProcessError as err:
os.chdir(self.my_path)
print('Error: call to payu run failed.', file=sys.stderr)
return 1, None, None, None
wait_for_qsub(run_id)
run_id = run_id.split('.')[0]
output_files = []
# Read qsub stdout file
stdout_filename = glob.glob(os.path.join(self.exp_path,
'*.o{}'.format(run_id)))
if len(stdout_filename) != 1:
print('Error: there are too many stdout files.', file=sys.stderr)
return 2, None, None, None
stdout_filename = stdout_filename[0]
output_files.append(stdout_filename)
stdout = ''
with open(stdout_filename, 'r') as f:
stdout = f.read()
# Read qsub stderr file
stderr_filename = glob.glob(os.path.join(self.exp_path,
'*.e{}'.format(run_id)))
stderr = ''
if len(stderr_filename) == 1:
stderr_filename = stderr_filename[0]
output_files.append(stderr_filename)
with open(stderr_filename, 'r') as f:
stderr = f.read()
# Read the qsub id of the collate job from the stdout.
# Payu puts this here.
m = re.search(r'(\d+.gadi-pbs)\n', stdout)
if m is None:
print('Error: qsub id of collate job.', file=sys.stderr)
return 3, stdout, stderr, output_files
# Wait for the collate to complete.
run_id = m.group(1)
wait_for_qsub(run_id)
# Return files created by qsub so caller can read or delete.
collate_files = os.path.join(self.exp_path, '*.[oe]{}'.format(run_id))
output_files += glob.glob(collate_files)
return 0, stdout, stderr, output_files
def force_interactive_run(self):
"""
Already in a PBS session, run interactively
"""
# Change to experiment directory and run.
try:
os.chdir(self.exp_path)
sp.check_output(['payu', 'sweep', '--lab', self.lab_path])
sp.check_output(['payu-run', '--lab', self.lab_path])
except sp.CalledProcessError as err:
os.chdir(self.my_path)
print('Error: call to payu run failed.', file=sys.stderr)
return 1, None, None, None
return 0, None, None, None
def force_run(self):
"""
Always try to run.
"""
try:
dont_care = os.environ['PBS_NODEFILE']
is_interactive = True
except:
is_interactive = False
# Check whether this is an interactive PBS session.
if is_interactive:
ret, stdout, stderr, output_files = self.force_interactive_run()
else:
ret, stdout, stderr, output_files = self.force_qsub_run()
return ret, stdout, stderr, output_files
def build_and_run(self):
exes, ret = self.build()
assert ret == 0
self.setup_for_programmatic_run(exes)
self.force_run()
def setup_exp_from_base(base_exp_name, new_exp_name):
"""
Create a new exp by copying the base config
"""
base_exp = ExpTestHelper(base_exp_name)
new_exp_path = os.path.join(base_exp.control_path, new_exp_name)
if os.path.exists(new_exp_path):
shutil.rmtree(new_exp_path)
shutil.copytree(base_exp.exp_path, new_exp_path, symlinks=True)
new_exp = ExpTestHelper(new_exp_name)
if os.path.exists(new_exp.archive):
shutil.rmtree(new_exp.archive)
try:
os.remove(os.path.join(new_exp.control_path, 'archive'))
except OSError:
pass
try:
os.remove(os.path.join(new_exp.control_path, 'work'))
except OSError:
pass
return new_exp
def run_exp(exp_name, force=False):
my_path = os.path.dirname(os.path.realpath(__file__))
helper = ExpTestHelper(exp_name)
exes, ret = helper.build()
assert ret == 0
helper.setup_for_programmatic_run(exes)
if force:
ret, qso, qse, qsub_files = helper.force_run()
else:
ret, qso, qse, qsub_files = helper.run()
assert ret == 0
return helper
|
A few emotive moments carry a clumsily plotted episode of Badlands' third season.
After a tedious fifth episode, Into the Badlands’ third season moves up the pace a little. Opening on one of the most stunning shots of the series so far, the audience is introduced to Black Wind, a morally-corrupt gambling den of a market town – where Bajie is naturally in his element.
Nick Frost’s sidekick continues to mildly entertain, while conveniently driving the plot forward with revelations at key moments, but his irksome characterisation is dulled in Black Wind, as his past literally catches up with him. It’s the first hint at a third dimension for the ex-Abbott, and while his ‘mysterious thief’ shtick has worn thin, Frost’s charm and the suitable setting allow Bajie to handle the spotlight better than expected.
Game of Thrones alum Dean-Charles Chapman has an equally tricky episode. The Widow, having captured the weakening Dark One, opts not to torture him for information but instead use him as a bargaining chip with Pilgrim (Babou Ceesay – the third season’s best addition) – which only serves to cement Castor’s status as someone whose only use is to be manipulated by real power. While this may be familiar territory for Chapman, he carries it well, and where his Baratheon-counterpart was young and foolish, Castor remains tormented yet defiant throughout.
Emily Beecham’s Widow – still with minimal screen time – nevertheless manages to bring out the most interesting moments in Amazon’s brutal, feudal drama. Gifting a brutalised Castor back to Pilgrim, and then momentarily distracted by the offer of her dark gift back, it’s unclear as to who is winning their war of political niceties. With her on-off alliance with Ally Ioannides’ daughter/regent ever-bubbling, one of the major hopes for the future is that their emotional baggage is dealt with.
Black Wind Howls often feels like a stepping stone between the awkwardly progress-driven fifth episode and whatever comes next. Pilgrim’s name appears no matter what desperate district Sunny finds himself in, building the suspense as to his origin and purpose fairly well. Along with this, Sunny’s flashbacks, however contrived, do provoke some interest as he realises he’s retracing his steps from decades ago, without any clue of a destination. These story arcs fit a little better into this episode than the last, and the chapter provides some of Badlands’ trademark complex fight scenes – one arms Sunny with an axe to keep things interesting – but it still struggles to hold onto any character depth while forcing unneeded mystery into the story at every turn.
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import *
TableBase = declarative_base()
class GoBase(TableBase):
__tablename__ = 'training'
filehash = Column(String(44), primary_key=True)
fileformat = Column(String(10), default=None)
filesource = Column(String(64), default=None)
rawfilepath = Column(String(256), nullable=False)
size = Column(Integer, nullable=False)
rule = Column(String(32), nullable=False)
komi = Column(Float, nullable=False)
result = Column(Float, nullable=False)
handicap = Column(Float, nullable=False)
def __repr__(self):
return '<GoBase(filehash = %s, fileformat = %s, filesource = %s, rawfilepath = %s, ' \
'size = %s, rule = %s, komi = %s, result = %s, handicap=%s)>' \
% \
(self.filehash, self.fileformatm, self.filesource,
self.rawfilepath, self.size, self.rule, self.komi, self.result, self.handicap)
def __str__(self):
return self.__repr__()
|
A mobile training games designed for mild dementia patients.
We have partnered Dr Dennis Seow, a consultant from Singapore General Hospital, department of Geriatric Medicine to develop a series of interactive training games.
These games are meant for patients with dementia, memory and cognitive disorders. Intensive research and interviews have been conducted to understand the needs of dementia patients. Hence these games are designed to help patients improve their current condition (e.g. including daily life items that they can associate in the game).
This initiative is supported by Singapore Ministry of Health and co-funded by Economic Development Board.
Copyright © 2018 by Playtivate. All Rights Reserved.
|
#!/usr/bin/python
from homolog4 import Homolog
import os
import argparse
def parser_code():
parser = argparse.ArgumentParser()
parser.add_argument("--input","-i", default="./optimized_gene_block/",help="optimized gene block ")
parser.add_argument("--gene_name", "-g", default='gene_block_names_and_genes.txt',
help="the gene_block_names_and_genes that stores the name of the operon and its genes")
parser.add_argument("--output","-o", default="result/",
help="where the result be stored (result/)")
parser.add_argument("-a", "--accession", default='tree/accession_to_common.csv',
help="Filter file, default as potential file, if NONE then not doing the parse filter")
return parser.parse_args()
def get_accession(accession):
dict = {}
for line in open(accession,'r').readlines():
line = line.strip().split(',')
dict[line[0]]= line[1]
return dict
## parse the gene block names and genes txt file
def parse(operon_genes_dict):
result = {}
infile = open(operon_genes_dict,'r')
for line in infile.readlines():
line = line.strip().split()
result[line[0]] = line[1:]
return result
## Traverses the genome information directory
def traverseAll(path):
res=[]
for root,dirs,files in os.walk(path):
for f in files:
res.append(root+'/'+f)
return res
## given an operon file (astCADBE.txt), format the info into format easier to read
def formatOperon(operon,output,operon_genes_dict,accession_dict):
alphabet = 'abcdefghijklmnop'
operon_name = operon.split('/')[-1].split('.')[0]
genes = sorted(operon_genes_dict[operon_name])
outfile = open(output+operon_name,'w')
for i in range(len(genes)):
outfile.write(genes[i]+','+alphabet[i]+'\t')
outfile.write('\n')
result = {}
for line in [i.strip() for i in open(operon).readlines()]:
hlog = Homolog.from_blast(line)
accession = hlog.accession()[:-2]
start = str(hlog.start())
end = str(hlog.stop())
strand = str(hlog.strand())
gene_name = hlog.blast_annotation()
if accession in result:
result[accession].append([gene_name, start, end, strand])
else:
result[accession]=[[gene_name, start, end, strand]]
for species in accession_dict:
outfile.write(species+':')
if species in result:
for item in result[species]:
outfile.write(','.join(item)+'\t')
outfile.write('\n')
outfile.close()
if __name__ == "__main__":
args = parser_code()
input = args.input
result = args.output
accession = args.accession
operon_genes_dict = args.gene_name
operon_genes_dict = parse(operon_genes_dict)
accession_dict = get_accession(accession)
try:
os.makedirs(result)
except:
print ("Result dic already exists")
# goes through all the file name in the optimized_gene_block dic
res = traverseAll(input)
for file in res:
formatOperon(file,result,operon_genes_dict,accession_dict)
|
After a tremendous success in 2015, Vegfest -Ireland’s biggest vegan food and lifestyle festival- is coming back on September 11th 2016. Over a thousand visitors are expected at the Griffith Conference Centre, South Circular Road.
There will be diet and nutrition talks by experts, demonstrations, live music performances and lots more! Speakers include Bite Size Vegan Emily Moran Barwick and Fiona Oakes, elite marathon runner who broke the female elapsed time record for completing the Seven Continents and Polar Ice Cap Challenge.
Vegan or not vegan, everyone is welcome. The festival is sponsored by KoKo Dairy Free, Moodley Manor, Cornucopia Restaurant and Art Of Zen Foods.
Will Travel For Vegan Food – Kristin Lajeunesse: She’s Eaten at Every Single Vegan Restaurant in the United States!
Tickets (€8) are on sale here.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-04-18 20:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('astrobin_apps_images', '0012_rename_regular_crop_anonymized_to_story_crop'),
]
operations = [
migrations.AddField(
model_name='thumbnailgroup',
name='regular_large',
field=models.CharField(blank=True, max_length=512, null=True),
),
migrations.AddField(
model_name='thumbnailgroup',
name='regular_large_anonymized',
field=models.CharField(blank=True, max_length=512, null=True),
),
migrations.AddField(
model_name='thumbnailgroup',
name='regular_large_inverted',
field=models.CharField(blank=True, max_length=512, null=True),
),
migrations.AddField(
model_name='thumbnailgroup',
name='regular_large_sharpened',
field=models.CharField(blank=True, max_length=512, null=True),
),
migrations.AddField(
model_name='thumbnailgroup',
name='regular_large_sharpened_inverted',
field=models.CharField(blank=True, max_length=512, null=True),
),
]
|
Aluminum Alloy 7178, comprised of copper and magnesium as its primary alloying elements, offers uncompromising strength and accommodates a number of different metalworking processes. These processes include machining, welding, and heat treating.
When annealed, Aluminum Alloy 7178 offers good machinability and also responds well to resistance welding. Heat treatment should be performed at 875 °F with a subsequent water quenching. After treatment, this alloy gains precipitation hardening which improves its mechanical properties. Heavier thicknesses are formable.
This alloy is preferred for many applications in the marine, construction and aerospace industries. Aluminum Alloy 7178 from Continental Steel and Tube Company meets or exceeds all relevant industry standards. These standards include all appropriate ASTM, QQ, SAE, and UNS specifications.
We offer Aluminum Alloy 7178 in coil form or as shapes. To learn more about this alloy or to determine which aluminum alloy will work best in your next project, please contact us directly.
|
#!/usr/bin/env python3
"""
Database utilities
Copyright (C) 2020 Anders Lowinger, anders@abundo.se
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import sys
import PyQt5.QtWidgets as QtWidgets
from logger import log
from settings import sett
import resource
import lib.db as db
def createQApplication():
app = QtWidgets.QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
app.setOrganizationName("Abundo AB")
app.setOrganizationDomain("abundo.se")
app.setApplicationName("ErgoTime")
return app
def openLocalDatabase2(dbname=None):
dbconf = {"name": sett.localDatabaseName}
conn = db.Database(dbconf, driver="sqlite")
conn.connect()
log.info(f"Open local database {dbconf}")
sql = "CREATE TABLE IF NOT EXISTS report ("
sql += " _id INTEGER PRIMARY KEY, "
sql += " user_id INT NOT NULL default -1, "
sql += " activityid INT NOT NULL default -1, "
sql += " start TIMESTAMP NOT NULL, "
sql += " stop TIMESTAMP NOT NULL, "
sql += " comment TEXT NOT NULL default '', "
sql += " modified TIMESTAMP NOT NULL, "
sql += " seq INT NOT NULL default -1, "
sql += " deleted INT NOT NULL default 0, "
sql += " server_id INT NOT NULL default -1, "
sql += " updated INT NOT NULL default -1 "
sql += ");"
conn.execute(sql)
sql = "CREATE TABLE IF NOT EXISTS activity ("
sql += " _id INTEGER PRIMARY KEY, "
sql += " name TEXT NOT NULL default '', "
sql += " description TEXT NOT NULL default '', "
sql += " project_id INT NOT NULL default -1, "
sql += " active INT NOT NULL default 0, "
sql += " server_id INT NOT NULL default -1 "
sql += ");"
conn.execute(sql)
sql = "CREATE TABLE IF NOT EXISTS project ("
sql += " _id INTEGER PRIMARY KEY, "
sql += " activity_id INT NOT NULL default -1, "
sql += " name TEXT NOT NULL default '', "
sql += " costcenter TEXT NOT NULL default '', "
sql += " active INT NOT NULL default 0 "
sql += ");"
conn.execute(sql)
return conn
if __name__ == "__main__":
openLocalDatabase2("c:/temp/ergotime.db")
|
The only constant is the change. This statement stands right for websites as well. Since their advent in mid-nineties, sites have been undergoing drastic changes year on year.
Let us have a look at the top 10 website design trends in 2018.
A site is responsive when it realigns itself by following the screen size. The idea is to create an optimal experience for the user, irrespective of viewing medium.
Responsive website designs started a few years ago and had seen continuous improvement since then.
2018 will see more websites moving towards responsive design, and the trend is here to stay for years to come.
Designs in solid colors are called flat designs. Websites with flat design do not have gradients and mostly use flat digital imagery.
Flat designs look fresh and simplistic. They are also clean and are used by almost all types of websites.
2018 will see an upward trend in the creation of flat designs for websites.
Asymmetric layouts started in 2017, and its popularity is growing in 2018 too. They look unique and thus become memorable.
Asymmetric layouts can be leveraged by new brands to stand out quickly from the big brands who still prefer generic designs.
Long scroll pages are suitable for both user interaction and SEO.
A user does not have to click too much (in cases never) to reach the next block of information.
Long scroll pages were immensely popular and would continue to be popular in 2018 as well.
Creative storytelling makes use of graphics, shapes, and words to narrate a story that is quickly absorbed by users.
Such stories are pleasing to eyes and remain with users for a more extended period.
There is no specific prescribed medium or style. You are free to create visual stories that effectively communicate the message.
Animated sliders feature the most important messaging of a website.
Such sliders could be anything from a set of pictures or video + pictures or a video over an image.
There is a lot of detailing possible, and these sliders are slowly becoming full-featured animation software in themselves.
The quality and detailing of sliders has seen an upward trend and will continue to be so in 2018.
Typography is another element that lends a specific character to a website.
Creative typography makes a mixed-use of multiple styles and sizes of fonts on the website or a specific section of the page.
As with everything else, it makes the page look attractive and distinct.
WordPress is already the champion of all content management systems, and it will remain so in 2018 as well.
With more and more new WordPress themes and plugins released in the market, you can create almost any type of website with WordPress.
Never before did we have such a strong contender to challenge custom web development.
While browsing the internet, every once in a while you come across a website that is clean and white and quite calming to your eyes.
Minimalist designs work on the theory of less is more.
Lesser the design and clutter more is the focus of the message.
Minimalist designs have become very popular in some niches such as graphic design and photography and are expected to be embraced by many other verticals.
With the mobile population exceeding the desktop, more people prefer to see websites on their mobiles.
Mobile usage has given rise to a trend where websites are created for mobiles and then adapted for the desktop.
The trend will only grow in 2018, and we will see more and more websites redesigned with a mobile-first approach.
Recommended reading: Cost of web design – how much and why?
|
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.pipeline import Pipeline
import numpy as np
import pandas as pd
import re
from os import listdir
from os.path import join,isdir
from sklearn.utils import resample
from sklearn.model_selection import cross_val_score
import pickle
from sklearn.utils import resample
class DataSet:
Annotators = []
def __init__(self):
self.Annotators = []
class Annotator:
files = []
documents = []
Name = ""
def __init__(self):
self.files = []
self.documents = []
self.Name = ""
class Document:
Lines = []
DocumentName = ""
DatabaseID = ""
Annotations = []
Text = ""
isSpam = False
Project_Mark_Objective_1A = 0
Project_Mark_Objective_1B = 0
Project_Mark_Objective_1C = 0
Project_Mark_Actors_2A = 0
Project_Mark_Actors_2B = 0
Project_Mark_Actors_2C = 0
Project_Mark_Outputs_3A = 0
Project_Mark_Innovativeness_3A = 0
isProjectObjectiveSatisfied = False
isProjectActorSatisfied = False
isProjectOutputSatisfied = False
isProjectInnovativenessSatisfied = False
isProjectObjectiveSatisfied_predicted = False
isProjectActorSatisfied_predicted = False
isProjectOutputSatisfied_predicted = False
isProjectInnovativenessSatisfied_predicted = False
def __init__(self):
self.Text = ""
self.Lines = []
self.DocumentName = ""
self.DatabaseID = ""
self.Annotations = []
self.isSpam = False
self.Project_Mark_Objective_1A = 0
self.Project_Mark_Objective_1B = 0
self.Project_Mark_Objective_1C = 0
self.Project_Mark_Actors_2A = 0
self.Project_Mark_Actors_2B = 0
self.Project_Mark_Actors_2C = 0
self.Project_Mark_Outputs_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.isProjectObjectiveSatisfied = False
self.isProjectActorSatisfied = False
self.isProjectOutputSatisfied = False
self.isProjectInnovativenessSatisfied = False
self.isProjectObjectiveSatisfied_predicted = False
self.isProjectActorSatisfied_predicted = False
self.isProjectOutputSatisfied_predicted = False
self.isProjectInnovativenessSatisfied_predicted = False
class Line:
StartSpan = 0
EndSpan = 0
Text = ""
Sentences = []
Tokens = []
Annotations = []
def __init__(self):
self.StartSpan = 0
self.EndSpan = 0
self.Text = ""
self.Sentences = []
self.Tokens = []
self.Annotations = []
class Sentence:
SentenceText = ""
StartSpan = -1
EndSpan = -1
Annotations = []
def __init__(self):
self.SentenceText = ""
self.StartSpan = -1
self.EndSpan = -1
self.Annotations = []
class Annotation:
FromFile = ""
FromAnnotator = ""
AnnotationText = ""
StartSpan = -1
EndSpan = -1
HighLevelClass = ""
LowLevelClass = ""
data_folder = "../../../Helpers/FullDataset_Alina/"
ds = DataSet()
total_num_spam = 0
sentences = []
total_num_files = 0
# job = aetros.backend.start_job('nikolamilosevic86/GloveModel')
annotators = [f for f in listdir(data_folder) if isdir(join(data_folder, f))]
for ann in annotators:
folder = data_folder + "/" + ann
Annot = Annotator()
Annot.Name = ann
ds.Annotators.append(Annot)
onlyfiles = [f for f in listdir(folder) if (f.endswith(".txt"))]
for file in onlyfiles:
Annot.files.append(data_folder + "/" + ann + '/' + file)
doc = Document()
total_num_files = total_num_files + 1
doc.Lines = []
# doc.Annotations = []
doc.DocumentName = file
Annot.documents.append(doc)
if (file.startswith('a') or file.startswith('t')):
continue
print file
doc.DatabaseID = file.split("_")[1].split(".")[0]
fl = open(data_folder + "/" + ann + '/' + file, 'r')
content = fl.read()
doc.Text = content
lines = content.split('\n')
line_index = 0
for line in lines:
l = Line()
l.StartSpan = line_index
l.EndSpan = line_index + len(line)
l.Text = line
line_index = line_index + len(line) + 1
sentences.append(line)
doc.Lines.append(l)
an = open(data_folder + "/" + ann + '/' + file.replace(".txt", ".ann"), 'r')
annotations = an.readlines()
for a in annotations:
a = re.sub(r'\d+;\d+', '', a).replace(' ', ' ')
split_ann = a.split('\t')
if (split_ann[0].startswith("T")):
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
low_level_ann = sp_split_ann[0]
if low_level_ann == "ProjectMark":
continue
span_start = sp_split_ann[1]
span_end = sp_split_ann[2]
ann_text = split_ann[2]
Ann = Annotation()
Ann.AnnotationText = ann_text
Ann.StartSpan = int(span_start)
Ann.EndSpan = int(span_end)
Ann.FromAnnotator = Annot.Name
Ann.FromFile = file
Ann.LowLevelClass = low_level_ann
if (low_level_ann == "SL_Outputs_3a"):
Ann.HighLevelClass = "Outputs"
if (
low_level_ann == "SL_Objective_1a" or low_level_ann == "SL_Objective_1b" or low_level_ann == "SL_Objective_1c"):
Ann.HighLevelClass = "Objectives"
if (
low_level_ann == "SL_Actors_2a" or low_level_ann == "SL_Actors_2b" or low_level_ann == "SL_Actors_2c"):
Ann.HighLevelClass = "Actors"
if (low_level_ann == "SL_Innovativeness_4a"):
Ann.HighLevelClass = "Innovativeness"
doc.Annotations.append(Ann)
for line in doc.Lines:
if line.StartSpan <= Ann.StartSpan and line.EndSpan >= Ann.EndSpan:
line.Annotations.append(Ann)
else:
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
mark_name = sp_split_ann[0]
if (len(sp_split_ann) <= 2):
continue
mark = sp_split_ann[2].replace('\n', '')
if (mark_name == "DL_Outputs_3a"):
doc.Project_Mark_Outputs_3A = int(mark)
if int(mark) >= 1:
doc.isProjectOutputSatisfied = True
if (mark_name == "DL_Objective_1a"):
doc.Project_Mark_Objective_1A = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1b" or mark_name == "DL_Objective"):
doc.Project_Mark_Objective_1B = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1c"):
doc.Project_Mark_Objective_1C = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Innovativeness_4a" or mark_name=="DL_Innovativeness"):
doc.Project_Mark_Innovativeness_3A = int(mark)
if int(mark) >= 1:
doc.isProjectInnovativenessSatisfied = True
if (mark_name == "DL_Actors_2a" or mark_name=="DL_Actors"):
doc.Project_Mark_Actors_2A = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2b"):
doc.Project_Mark_Actors_2B = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2c"):
doc.Project_Mark_Actors_2C = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (
doc.Project_Mark_Objective_1A == 0 and doc.Project_Mark_Objective_1B == 0 and doc.Project_Mark_Objective_1C == 0 and doc.Project_Mark_Actors_2A == 0
and doc.Project_Mark_Actors_2B == 0 and doc.Project_Mark_Actors_2B == 0 and doc.Project_Mark_Actors_2C == 0 and doc.Project_Mark_Outputs_3A == 0
and doc.Project_Mark_Innovativeness_3A == 0):
doc.isSpam = True
total_num_spam = total_num_spam + 1
i = 0
j = i + 1
kappa_files = 0
done_documents = []
num_overlap_spam = 0
num_spam = 0
total_objectives = 0
total_outputs = 0
total_actors = 0
total_innovativeness = 0
ann1_annotations_objectives = []
ann2_annotations_objectives = []
ann1_annotations_actors = []
ann2_annotations_actors = []
ann1_annotations_outputs = []
ann2_annotations_outputs = []
ann1_annotations_innovativeness = []
ann2_annotations_innovativeness = []
match_objectives = 0
match_outputs = 0
match_actors = 0
match_innovativeness = 0
while i < len(ds.Annotators) - 1:
while j < len(ds.Annotators):
annotator1 = ds.Annotators[i]
annotator2 = ds.Annotators[j]
for doc1 in annotator1.documents:
for doc2 in annotator2.documents:
if doc1.DocumentName == doc2.DocumentName and doc1.DocumentName not in done_documents:
done_documents.append(doc1.DocumentName)
line_num = 0
ann1_objective = [0] * len(doc1.Lines)
ann2_objective = [0] * len(doc2.Lines)
ann1_output = [0] * len(doc1.Lines)
ann2_output = [0] * len(doc2.Lines)
ann1_actor = [0] * len(doc1.Lines)
ann2_actor = [0] * len(doc2.Lines)
ann1_innovativeness = [0] * len(doc1.Lines)
ann2_innovativeness = [0] * len(doc2.Lines)
while line_num < len(doc1.Lines):
if len(doc1.Lines[line_num].Annotations) > 0:
for a in doc1.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann1_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann1_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann1_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann1_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
for a1 in doc2.Lines[line_num].Annotations:
if a1.HighLevelClass == a.HighLevelClass:
if a1.HighLevelClass == "Objectives":
match_objectives = match_objectives + 1
if a1.HighLevelClass == "Outputs":
match_outputs = match_outputs + 1
if a1.HighLevelClass == "Actors":
match_actors = match_actors + 1
if a1.HighLevelClass == "Innovativeness":
match_innovativeness = match_innovativeness + 1
if len(doc2.Lines[line_num].Annotations) > 0:
for a in doc2.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann2_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann2_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann2_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann2_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
line_num = line_num + 1
ann1_annotations_outputs.extend(ann1_output)
ann2_annotations_outputs.extend(ann2_output)
ann1_annotations_objectives.extend(ann1_objective)
ann2_annotations_objectives.extend(ann2_objective)
ann1_annotations_actors.extend(ann1_actor)
ann2_annotations_actors.extend(ann2_actor)
ann1_annotations_innovativeness.extend(ann1_innovativeness)
ann2_annotations_innovativeness.extend(ann2_innovativeness)
print "Statistics for document:" + doc1.DocumentName
print "Annotators " + annotator1.Name + " and " + annotator2.Name
print "Spam by " + annotator1.Name + ":" + str(doc1.isSpam)
print "Spam by " + annotator2.Name + ":" + str(doc2.isSpam)
if (doc1.isSpam == doc2.isSpam):
num_overlap_spam = num_overlap_spam + 1
if doc1.isSpam:
num_spam = num_spam + 1
if doc2.isSpam:
num_spam = num_spam + 1
kappa_files = kappa_files + 1
j = j + 1
i = i + 1
j = i + 1
print annotators
doc_array = []
text_array = []
objectives = []
actors = []
outputs = []
innovativeness = []
for ann in ds.Annotators:
for doc in ann.documents:
doc_array.append(
[doc.Text, doc.isProjectObjectiveSatisfied, doc.isProjectActorSatisfied, doc.isProjectOutputSatisfied,
doc.isProjectInnovativenessSatisfied])
objectives.append(doc.isProjectObjectiveSatisfied)
actors.append(doc.isProjectActorSatisfied)
outputs.append(doc.isProjectOutputSatisfied)
innovativeness.append(doc.isProjectInnovativenessSatisfied)
text_array.append(doc.Text)
df = pd.DataFrame({'text':text_array,'classa':innovativeness})
df_majority = df[df.classa==0]
df_minority = df[df.classa==1]
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=160, # to match majority class
random_state=83293) # reproducible results
df_upsampled = pd.concat([df_majority, df_minority_upsampled])
# Display new class counts
print df_upsampled.classa.value_counts()
TP = 0
FP = 0
FN = 0
classes = df_upsampled.classa
i = 0
innovative_1 = 0
innovative_2 = 0
innovative_3 = 0
for sample in doc_array:
if "innovation" in sample[0] or "innovative" in sample[0] or "novelty" in sample[0]:
innovative_1 = innovative_1 + 1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
i = i + 1
precision = float(TP)/float(TP+FP)
recall = float(TP)/float(TP+FN)
f_score = 2*precision*recall/(precision+recall)
print "Innovation rule classifier"
print "False positives:"+str(FP)
print "False negatives:"+str(FN)
print "True positive:"+str(TP)
print "Precision: "+str(precision)
print "Recall: "+str(recall)
print "F1-score: "+str(f_score)
TP = 0
FP = 0
FN = 0
i = 0
for sample in doc_array:
if ("new" in sample[0] or "novel" in sample[0] or "alternative" in sample[0] or "improved" in sample[0] or "cutting edge" in sample[0] or "better" in sample[0])\
and ("method" in sample[0] or "product" in sample[0] or "service" in sample[0] or "application" in sample[0] or "technology" in sample[0] or "practice" in sample[0]):
innovative_2 = innovative_2 +1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
i = i + 1
precision = float(TP)/float(TP+FP)
recall = float(TP)/float(TP+FN)
f_score = 2*precision*recall/(precision+recall)
print "Other rule classifier"
print "False positives:"+str(FP)
print "False negatives:"+str(FN)
print "True positive:"+str(TP)
print "Precision: "+str(precision)
print "Recall: "+str(recall)
print "F1-score: "+str(f_score)
TP = 0
FP = 0
FN = 0
i = 0
for sample in doc_array:
isInnovative = False
if ("method" in sample[0] or "product" in sample[0] or "service" in sample[0] or "application" in sample[0] or "technology" in sample[0] or "practice" in sample[0]):
list_items = ["method","product","service","application","technology","practice"]
index_list = []
for item in list_items:
indexes = [m.start() for m in re.finditer(item, sample[0])]
index_list.extend(indexes)
for index in index_list:
end = len(sample[0])
start = 0
if index - 500>0:
start = index - 500
if index + 500<len(sample[0]):
end = index + 500
substr = sample[0][start:end]
if ("new" in substr or "novel" in substr or "alternative" in substr or "improved" in substr or "cutting edge" in substr or "better" in substr):
isInnovative = True
if isInnovative:
innovative_3 = innovative_3 + 1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
precision = float(TP)/float(TP+FP)
recall = float(TP)/float(TP+FN)
f_score = 2*precision*recall/(precision+recall)
print "Third rule classifier"
print "False positives:"+str(FP)
print "False negatives:"+str(FN)
print "True positive:"+str(TP)
print "Precision: "+str(precision)
print "Recall: "+str(recall)
print "F1-score: "+str(f_score)
TP = 0
FP = 0
FN = 0
i = 0
innovative_4 = 0
for sample in doc_array:
isInnovative = False
if "innovation" in sample[0] or "innovative" in sample[0] or "novelty" in sample[0]:
isInnovative = True
if ("method" in sample[0] or "product" in sample[0] or "service" in sample[0] or "application" in sample[0] or "technology" in sample[0] or "practice" in sample[0]):
list_items = ["method","product","service","application","technology","practice"]
index_list = []
for item in list_items:
indexes = [m.start() for m in re.finditer(item, sample[0])]
index_list.extend(indexes)
for index in index_list:
end = len(sample[0])
start = 0
if index - 500>0:
start = index - 500
if index + 500<len(sample[0]):
end = index + 500
substr = sample[0][start:end]
if ("new" in substr or "novel" in substr or "alternative" in substr or "improved" in substr or "cutting edge" in substr or "better" in substr):
isInnovative = True
if isInnovative:
innovative_4 = innovative_4 + 1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
print ""
print "Innovative 1:"+str(innovative_1)
print "Innovative 2:"+str(innovative_2)
print "Innovative 3:"+str(innovative_3)
print "Innovative 4 (1+3):"+str(innovative_4)
#scores = cross_val_score(text_clf, df_upsampled.text, df_upsampled.classa, cv=10,scoring='f1')
# train = text_array[0:int(0.8*len(text_array))]
# train_Y = innovativeness[0:int(0.8*len(actors))]
#
# test = text_array[int(0.8*len(text_array)):]
# test_Y = innovativeness[int(0.8*len(actors)):]
#
# #categories = ['non actor', 'actor']
#
# text_clf = Pipeline([('vect', CountVectorizer()),
# ('tfidf', TfidfTransformer()),
# ('clf', MultinomialNB()),
# ])
#
# scores = cross_val_score(text_clf, df_upsampled.text, df_upsampled.classa, cv=10,scoring='f1')
# final = 0
# for score in scores:
# final = final + score
# print scores
# print "Final:" + str(final/10)
# text_clf.fit(train,train_Y)
#
# TP = 0
# FP = 0
# FN = 0
# i = 0
# outcome = text_clf.predict(test)
# for i in range(0,len(test)):
# if test_Y[i] == True and outcome[i] == True:
# TP = TP+1
# if test_Y[i] == False and outcome[i]==True:
# FP = FP+1
# if test_Y[i]==True and outputs[i]==False:
# FN = FN + 1
# i = i + 1
# precision = float(TP)/float(TP+FP)
# recall = float(TP)/float(TP+FN)
# f_score = 2*precision*recall/(precision+recall)
# print "ML based rule classifier"
# print "False positives:"+str(FP)
# print "False negatives:"+str(FN)
# print "True positive:"+str(TP)
# print "Precision: "+str(precision)
# print "Recall: "+str(recall)
# print "F1-score: "+str(f_score)
|
I headed downtown to class to find that it had been cancelled last minute so I wandered around for a bit. It’s gorgeous outside. I wish I had the time to take pictures.
The paper that’s been giving me hell for the past couple weeks suddenly got a lot easier. Suddenly resources have materialized out of nowhere and I actually have stuff to write about now. I don’t know how that happened. I’m not going to question it. I’ll just write.
Maybe this new development will help my insomnia. I’m not a good sleeper at the best of times but the past two nights have been awful.
I can’t wait for the semester to be over. I have a huge backlog of ideas for things I want to do that I don’t have time for.
This entry was posted on Monday, April 12th, 2010 at 11:27 pm and is filed under Uncategorized. You can follow any responses to this entry through the RSS 2.0 feed. You can leave a response, or trackback from your own site.
|
#!/usr/bin/env python
# example label.py
import pygtk
pygtk.require('2.0')
import gtk
class Labels:
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("destroy", lambda w: gtk.main_quit())
self.window.set_title("Label")
vbox = gtk.VBox(False, 5)
hbox = gtk.HBox(False, 5)
self.window.add(hbox)
hbox.pack_start(vbox, False, False, 0)
self.window.set_border_width(5)
frame = gtk.Frame("Normal Label")
label = gtk.Label("This is a Normal label")
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Multi-line Label")
label = gtk.Label("This is a Multi-line label.\nSecond line\n"
"Third line")
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Left Justified Label")
label = gtk.Label("This is a Left-Justified\n"
"Multi-line label.\nThird line")
label.set_justify(gtk.JUSTIFY_LEFT)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Right Justified Label")
label = gtk.Label("This is a Right-Justified\nMulti-line label.\n"
"Fourth line, (j/k)")
label.set_justify(gtk.JUSTIFY_RIGHT)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
vbox = gtk.VBox(False, 5)
hbox.pack_start(vbox, False, False, 0)
frame = gtk.Frame("Line wrapped label")
label = gtk.Label("This is an example of a line-wrapped label. It "
"should not be taking up the entire "
"width allocated to it, but automatically "
"wraps the words to fit. "
"The time has come, for all good men, to come to "
"the aid of their party. "
"The sixth sheik's six sheep's sick.\n"
" It supports multiple paragraphs correctly, "
"and correctly adds "
"many extra spaces. ")
label.set_line_wrap(True)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Filled, wrapped label")
label = gtk.Label("This is an example of a line-wrapped, filled label. "
"It should be taking "
"up the entire width allocated to it. "
"Here is a sentence to prove "
"my point. Here is another sentence. "
"Here comes the sun, do de do de do.\n"
" This is a new paragraph.\n"
" This is another newer, longer, better "
"paragraph. It is coming to an end, "
"unfortunately.")
label.set_justify(gtk.JUSTIFY_FILL)
label.set_line_wrap(True)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Underlined label")
label = gtk.Label("This label is underlined!\n"
"This one is underlined in quite a funky fashion")
label.set_justify(gtk.JUSTIFY_LEFT)
label.set_pattern(
"_________________________ _ _________ _ ______ __ _______ ___")
frame.add(label)
vbox.pack_start(frame, False, False, 0)
self.window.show_all ()
def main():
gtk.main()
return 0
if __name__ == "__main__":
Labels()
main()
|
As a Mom, cooking for my family is one of the many hats I wear. Generally, I like cooking it’s the washing of utensils, pots and pans that I don’t like so much. Between me and my husband, whenever one has to cook, the other must wash the dishes so guess who always ends up cooking.
Yesterday my little kitchen welcomed 3 scents of Bubble Man Dishwashing Liquid- Lemon, Antibac and Kalamansi.
Immediately we tested it after I cooked our lunch. Our helper said it’s nice, smells good and can easily remove fat and oil. We tried using it as is (pure) and of course it’s stronger than when we added water to two drops of this soap. As per the product description, it is effective even at 3% dilution or that’s 30mL of the soap added to 1L of water. For normal household usage it could last you for almost a 5 days.
Even our cat, Ted, got curious and I think he approves of this product.
and degreaser, safe on virtually all surface(plastics, metal surfaces, ceramics etc).
– 97+% of the chemicals used are readily BIODEGRADABLE.
dryness. Sodium hydroxide is used usually for cheaper chemicals.
but is banned in many countries because of its harmful effects to our environment.
– Long-lasting bubbles help reduce sink water changeover.
I encourage your household to support Bubble Man which meant supporting Filipino entrepreneurs too!
Corp. for sending over sample products for review. Review written above is based on actual experience.
|
# -*-coding:utf-8-*-
# @auth ivan
# @time 2016-10-26 19:01:45
# @goal test for Business Delegate Pattern
class BusinessService:
def __init__(self):
return
def doProcessing(self):
return
class EJBService(BusinessService):
def __init__(self):
BusinessService.__init__(self)
return
def doProcessing(self):
print("Processing task by invoking EJB Service")
class JMSService(BusinessService):
def __init__(self):
BusinessService.__init__(self)
return
def doProcessing(self):
print("Processing task by invoking JMS Service")
class BusinessLookUp:
def getBusinessService(self, serviceType):
if serviceType.upper() == 'EJB':
return EJBService()
else:
return JMSService()
class BusinessDelegate:
def __init__(self):
self.lookupService = BusinessLookUp()
self.businessService = None
self.serviceType = ''
def setServiceType(self, serviceType):
self.serviceType = serviceType
def doTask(self):
self.businessService = self.lookupService.getBusinessService(self.serviceType)
self.businessService.doProcessing()
class Client:
def __init__(self, businessService):
self.businessService = businessService
def doTask(self):
self.businessService.doTask()
class BusinessDelegatePatternDemo:
def __init__(self):
self.businessDelegate = None
def run(self):
self.businessDelegate = BusinessDelegate()
client = Client(self.businessDelegate)
self.businessDelegate.setServiceType("EJB")
client.doTask()
self.businessDelegate.setServiceType("JMS")
client.doTask()
B = BusinessDelegatePatternDemo()
B.run()
|
To Select Proper Size: Use a measurement of the largest circumference on the finger (usually at the PIP) to select the correct DIGI-SLEEVE™ size. If the circumference is near the small end of the range for a particular size, use the next smaller size to ensure sufficient pressure. For example: if the PIP circumference is 1¾", use the PETITE rather than the SMALL.
Each DIGI-SLEEVE is 18" long, enough for 6 to 9 fingers.
Packaging: One 18-inch long DIGI-SLEEVE per package. One 55-inch long DIGI-SLEEVE per box.
|
__all__ = ['ContactList']
import random
from contact import Contact
from print_colors import PrintColors
class ContactList(object):
def __init__(self):
self.items = []
self.items_id_map = {}
self.items_raddr_map = {}
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items)
def add(self, c):
if c.id is None and not c.bootstrap:
raise ValueError('Contact it cannot be None, it its is not bootstrap node')
if c.id is None and c.id in self.items_id_map:
raise ValueError('Bootstrap contact with id=None is already known')
self.items.append(c)
self.items_id_map[c.id] = c
self.items_raddr_map[c.remote_host, c.remote_port] = c
return c
def get(self, id_or_remote_address_or_idx):
c = None
if isinstance(id_or_remote_address_or_idx, (str, bytes)):
c_id = id_or_remote_address_or_idx
try:
c = self.items_id_map[c_id]
except KeyError as e:
pass
elif isinstance(id_or_remote_address_or_idx, (tuple, list)):
remote_host, remote_port = id_or_remote_address_or_idx
try:
c = self.items_raddr_map[remote_host, remote_port]
except KeyError as e:
pass
elif isinstance(id_or_remote_address_or_idx, int):
i = id_or_remote_address_or_idx
try:
c = self.items[i]
except IndexError as e:
pass
return c
def remove(self, c_or_id):
c = None
if isinstance(c_or_id, Contact):
c = c_or_id
self.items.remove(c)
del self.items_id_map[c.id]
del self.items_raddr_map[c.remote_host, c.remote_port]
else:
c_id = c_or_id
c = self.items_id_map.pop(c_id)
self.items.remove(c)
del self.items_raddr_map[c.remote_host, c.remote_port]
return c
def random(self, without_id=None):
if not len(self.items):
return None
# filter contacts
i = random.randint(0, len(self.items) - 1)
c = self.items[i]
if c.id == without_id:
return None
return c
def all(self, version=0, max_old=None):
contacts = []
for c in self.items:
if c.bootstrap:
contacts.append(c)
continue
# FIXME: use version and max_old
contacts.append(c)
return contacts
|
Trophy Performance Las Vegas. photos for trophy performance yelp. trophy performance 54 photos 41 reviews auto repair. independent porsche service center trophy performance. las vegas independent porsche service center trophy. the kardashians wiz khalifa and ty dolla ign at the. trophy performance 64 photos 37 reviews automotive. trophy performance 64 photos 38 reviews automotive. trophy performance over 40 combined years porsche. automobile brokerage leasing in las vegas automobile. mgm resorts denies rumor of bellagio fountains closure. [Dinarjat.com].
|
# -*- coding: utf-8 -*-
#
#
# Copyright (c) 2016 Sucros Clear Information Technologies PLC.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from datetime import datetime
from openerp import api, fields, models
from openerp.addons.decimal_precision import decimal_precision as dp
from openerp.tools.translate import _
class IssueVoucherWizard(models.TransientModel):
_name = 'account.pettycash.fund.voucher'
_desc = 'Petty Cash Fund Issue Voucher Wizard'
@api.model
def _get_fund(self):
fund_id = self.env.context.get('active_id', False)
return fund_id
# Field
#
fund = fields.Many2one('account.pettycash.fund', required=True,
default=_get_fund)
date = fields.Date(required=True, default=datetime.today().date())
partner = fields.Many2one('res.partner')
lines = fields.One2many('account.pettycash.fund.voucher.line', 'wizard')
voucher = fields.Many2one('account.voucher')
@api.multi
def create_voucher(self):
Vouchers = self.env['account.voucher']
for wiz in self:
lines = []
total_lines = 0.0
for line in wiz.lines:
line_vals = {
'name': line.memo,
'type': 'dr',
'account_id': line.expense_account.id,
'amount': line.amount,
}
lines.append((0, 0, line_vals))
total_lines += line.amount
voucher_vals = {
'name': _('Petty Cash Expenditure %s' % (wiz.date)),
'journal_id': wiz.fund.journal.id,
'account_id': wiz.fund.journal.default_credit_account_id.id,
'amount': total_lines,
'petty_cash_fund': wiz.fund.id,
'partner_id': wiz.partner.id,
'date': wiz.date,
'type': 'payment',
'audit': True,
}
onchange_res = Vouchers.onchange_journal(
wiz.fund.journal.id, [], False, wiz.partner.id, wiz.date,
total_lines, 'payment', False)
voucher_vals.update(onchange_res['value'])
voucher_vals.update({'line_dr_ids': lines})
wiz.voucher = Vouchers.create(voucher_vals)
return
class IssueVoucherWizardLine(models.TransientModel):
_name = 'account.pettycash.fund.voucher.line'
_desc = 'Petty Cash Fund Issue Voucher Wizard Line'
# Fields
#
wizard = fields.Many2one('account.pettycash.fund.voucher')
expense_account = fields.Many2one(
'account.account', required=True,
domain=[('type', '=', 'other'), ('user_type.code', '=', 'expense')])
amount = fields.Float(digits_compute=dp.get_precision('Product Price'),
required=True)
memo = fields.Char()
|
A stunning Gray Mohini Georgette Palazzo Suit Online. Georgette top with Santoon inner and Georgette bottom, Chiffon Dupatta. 100% Original Company Product with the HIGH-QUALITY fabric material. Free Delivery in India.
|
# pylint: disable=W0611
'''
Kivy Base
=========
This module contains core Kivy functionality and is not intended for end users.
Feel free to look though it, but calling any of these methods directly may well
result in unpredictable behavior.
Event loop management
---------------------
'''
__all__ = (
'EventLoop',
'EventLoopBase',
'ExceptionHandler',
'ExceptionManagerBase',
'ExceptionManager',
'runTouchApp',
'stopTouchApp',
)
import sys
from kivy.config import Config
from kivy.logger import Logger
from kivy.utils import platform
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.lang import Builder
from kivy.context import register_context
# private vars
EventLoop = None
class ExceptionHandler(object):
'''Base handler that catches exceptions in :func:`runTouchApp`.
You can subclass and extend it as follows::
class E(ExceptionHandler):
def handle_exception(self, inst):
Logger.exception('Exception catched by ExceptionHandler')
return ExceptionManager.PASS
ExceptionManager.add_handler(E())
All exceptions will be set to PASS, and logged to the console!
'''
def __init__(self):
pass
def handle_exception(self, exception):
'''Handle one exception, defaults to returning
ExceptionManager.STOP.
'''
return ExceptionManager.RAISE
class ExceptionManagerBase:
'''ExceptionManager manages exceptions handlers.'''
RAISE = 0
PASS = 1
def __init__(self):
self.handlers = []
self.policy = ExceptionManagerBase.RAISE
def add_handler(self, cls):
'''Add a new exception handler to the stack.'''
if cls not in self.handlers:
self.handlers.append(cls)
def remove_handler(self, cls):
'''Remove a exception handler from the stack.'''
if cls in self.handlers:
self.handlers.remove(cls)
def handle_exception(self, inst):
'''Called when an exception occurred in the runTouchApp() main loop.'''
ret = self.policy
for handler in self.handlers:
r = handler.handle_exception(inst)
if r == ExceptionManagerBase.PASS:
ret = r
return ret
#: Instance of a :class:`ExceptionManagerBase` implementation.
ExceptionManager = register_context('ExceptionManager', ExceptionManagerBase)
class EventLoopBase(EventDispatcher):
'''Main event loop. This loop handles the updating of input and
dispatching events.
'''
__events__ = ('on_start', 'on_pause', 'on_stop')
def __init__(self):
super(EventLoopBase, self).__init__()
self.quit = False
self.input_events = []
self.postproc_modules = []
self.status = 'idle'
self.input_providers = []
self.input_providers_autoremove = []
self.event_listeners = []
self.window = None
self.me_list = []
@property
def touches(self):
'''Return the list of all touches currently in down or move states.
'''
return self.me_list
def ensure_window(self):
'''Ensure that we have a window.
'''
import kivy.core.window # NOQA
if not self.window:
Logger.critical('App: Unable to get a Window, abort.')
sys.exit(1)
def set_window(self, window):
'''Set the window used for the event loop.
'''
self.window = window
def add_input_provider(self, provider, auto_remove=False):
'''Add a new input provider to listen for touch events.
'''
if provider not in self.input_providers:
self.input_providers.append(provider)
if auto_remove:
self.input_providers_autoremove.append(provider)
def remove_input_provider(self, provider):
'''Remove an input provider.
'''
if provider in self.input_providers:
self.input_providers.remove(provider)
def add_event_listener(self, listener):
'''Add a new event listener for getting touch events.
'''
if listener not in self.event_listeners:
self.event_listeners.append(listener)
def remove_event_listener(self, listener):
'''Remove an event listener from the list.
'''
if listener in self.event_listeners:
self.event_listeners.remove(listener)
def start(self):
'''Must be called only once before run().
This starts all configured input providers.'''
self.status = 'started'
self.quit = False
for provider in self.input_providers:
provider.start()
self.dispatch('on_start')
def close(self):
'''Exit from the main loop and stop all configured
input providers.'''
self.quit = True
self.stop()
self.status = 'closed'
def stop(self):
'''Stop all input providers and call callbacks registered using
EventLoop.add_stop_callback().'''
# XXX stop in reverse order that we started them!! (like push
# pop), very important because e.g. wm_touch and WM_PEN both
# store old window proc and the restore, if order is messed big
# problem happens, crashing badly without error
for provider in reversed(self.input_providers[:]):
provider.stop()
if provider in self.input_providers_autoremove:
self.input_providers_autoremove.remove(provider)
self.input_providers.remove(provider)
# ensure any restart will not break anything later.
self.input_events = []
self.status = 'stopped'
self.dispatch('on_stop')
def add_postproc_module(self, mod):
'''Add a postproc input module (DoubleTap, TripleTap, DeJitter
RetainTouch are defaults).'''
if mod not in self.postproc_modules:
self.postproc_modules.append(mod)
def remove_postproc_module(self, mod):
'''Remove a postproc module.'''
if mod in self.postproc_modules:
self.postproc_modules.remove(mod)
def remove_android_splash(self, *args):
'''Remove android presplash in SDL2 bootstrap.'''
try:
from android import remove_presplash
remove_presplash()
except ImportError:
Logger.error(
'Base: Failed to import "android" module. '
'Could not remove android presplash.')
return
def post_dispatch_input(self, etype, me):
'''This function is called by dispatch_input() when we want to dispatch
an input event. The event is dispatched to all listeners and if
grabbed, it's dispatched to grabbed widgets.
'''
# update available list
if etype == 'begin':
self.me_list.append(me)
elif etype == 'end':
if me in self.me_list:
self.me_list.remove(me)
# dispatch to listeners
if not me.grab_exclusive_class:
for listener in self.event_listeners:
listener.dispatch('on_motion', etype, me)
# dispatch grabbed touch
me.grab_state = True
for _wid in me.grab_list[:]:
# it's a weakref, call it!
wid = _wid()
if wid is None:
# object is gone, stop.
me.grab_list.remove(_wid)
continue
root_window = wid.get_root_window()
if wid != root_window and root_window is not None:
me.push()
w, h = root_window.system_size
if platform == 'ios' or root_window._density != 1:
w, h = root_window.size
kheight = root_window.keyboard_height
smode = root_window.softinput_mode
me.scale_for_screen(w, h, rotation=root_window.rotation,
smode=smode, kheight=kheight)
parent = wid.parent
# and do to_local until the widget
try:
if parent:
me.apply_transform_2d(parent.to_widget)
else:
me.apply_transform_2d(wid.to_widget)
me.apply_transform_2d(wid.to_parent)
except AttributeError:
# when using inner window, an app have grab the touch
# but app is removed. the touch can't access
# to one of the parent. (i.e, self.parent will be None)
# and BAM the bug happen.
me.pop()
continue
me.grab_current = wid
wid._context.push()
if etype == 'begin':
# don't dispatch again touch in on_touch_down
# a down event are nearly uniq here.
# wid.dispatch('on_touch_down', touch)
pass
elif etype == 'update':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_move', me)
else:
wid.dispatch('on_touch_move', me)
elif etype == 'end':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_up', me)
else:
wid.dispatch('on_touch_up', me)
wid._context.pop()
me.grab_current = None
if wid != root_window and root_window is not None:
me.pop()
me.grab_state = False
def _dispatch_input(self, *ev):
# remove the save event for the touch if exist
if ev in self.input_events:
self.input_events.remove(ev)
self.input_events.append(ev)
def dispatch_input(self):
'''Called by idle() to read events from input providers, pass events to
postproc, and dispatch final events.
'''
# first, aquire input events
for provider in self.input_providers:
provider.update(dispatch_fn=self._dispatch_input)
# execute post-processing modules
for mod in self.postproc_modules:
self.input_events = mod.process(events=self.input_events)
# real dispatch input
input_events = self.input_events
pop = input_events.pop
post_dispatch_input = self.post_dispatch_input
while input_events:
post_dispatch_input(*pop(0))
def idle(self):
'''This function is called after every frame. By default:
* it "ticks" the clock to the next frame.
* it reads all input and dispatches events.
* it dispatches `on_update`, `on_draw` and `on_flip` events to the
window.
'''
# update dt
Clock.tick()
# read and dispatch input from providers
self.dispatch_input()
# flush all the canvas operation
Builder.sync()
# tick before draw
Clock.tick_draw()
# flush all the canvas operation
Builder.sync()
window = self.window
if window and window.canvas.needs_redraw:
window.dispatch('on_draw')
window.dispatch('on_flip')
# don't loop if we don't have listeners !
if len(self.event_listeners) == 0:
Logger.error('Base: No event listeners have been created')
Logger.error('Base: Application will leave')
self.exit()
return False
return self.quit
def run(self):
'''Main loop'''
while not self.quit:
self.idle()
self.exit()
def exit(self):
'''Close the main loop and close the window.'''
self.close()
if self.window:
self.window.close()
def on_stop(self):
'''Event handler for `on_stop` events which will be fired right
after all input providers have been stopped.'''
pass
def on_pause(self):
'''Event handler for `on_pause` which will be fired when
the event loop is paused.'''
pass
def on_start(self):
'''Event handler for `on_start` which will be fired right
after all input providers have been started.'''
pass
#: EventLoop instance
EventLoop = EventLoopBase()
def _run_mainloop():
'''If no window has been created, this will be the executed mainloop.'''
while True:
try:
EventLoop.run()
stopTouchApp()
break
except BaseException as inst:
# use exception manager first
r = ExceptionManager.handle_exception(inst)
if r == ExceptionManager.RAISE:
stopTouchApp()
raise
else:
pass
def runTouchApp(widget=None, slave=False):
'''Static main function that starts the application loop.
You can access some magic via the following arguments:
:Parameters:
`<empty>`
To make dispatching work, you need at least one
input listener. If not, application will leave.
(MTWindow act as an input listener)
`widget`
If you pass only a widget, a MTWindow will be created
and your widget will be added to the window as the root
widget.
`slave`
No event dispatching is done. This will be your job.
`widget + slave`
No event dispatching is done. This will be your job but
we try to get the window (must be created by you beforehand)
and add the widget to it. Very useful for embedding Kivy
in another toolkit. (like Qt, check kivy-designed)
'''
from kivy.input import MotionEventFactory, kivy_postproc_modules
# Ok, we got one widget, and we are not in slave mode
# so, user don't create the window, let's create it for him !
if widget:
EventLoop.ensure_window()
# Instance all configured input
for key, value in Config.items('input'):
Logger.debug('Base: Create provider from %s' % (str(value)))
# split value
args = str(value).split(',', 1)
if len(args) == 1:
args.append('')
provider_id, args = args
provider = MotionEventFactory.get(provider_id)
if provider is None:
Logger.warning('Base: Unknown <%s> provider' % str(provider_id))
continue
# create provider
p = provider(key, args)
if p:
EventLoop.add_input_provider(p, True)
# add postproc modules
for mod in list(kivy_postproc_modules.values()):
EventLoop.add_postproc_module(mod)
# add main widget
if widget and EventLoop.window:
if widget not in EventLoop.window.children:
EventLoop.window.add_widget(widget)
# start event loop
Logger.info('Base: Start application main loop')
EventLoop.start()
# remove presplash on the next frame
if platform == 'android':
Clock.schedule_once(EventLoop.remove_android_splash)
# we are in a slave mode, don't do dispatching.
if slave:
return
# in non-slave mode, they are 2 issues
#
# 1. if user created a window, call the mainloop from window.
# This is due to glut, it need to be called with
# glutMainLoop(). Only FreeGLUT got a gluMainLoopEvent().
# So, we are executing the dispatching function inside
# a redisplay event.
#
# 2. if no window is created, we are dispatching event loop
# ourself (previous behavior.)
#
try:
if EventLoop.window is None:
_run_mainloop()
else:
EventLoop.window.mainloop()
finally:
stopTouchApp()
def stopTouchApp():
'''Stop the current application by leaving the main loop'''
if EventLoop is None:
return
if EventLoop.status != 'started':
return
Logger.info('Base: Leaving application in progress...')
EventLoop.close()
|
Mini is one of those makes like Porsche that have a certain look. Like Porsche, the style is the brand itself, so Mini isn't going to be releasing anything radically different in design for the foreseeable future, and the design style they have has been in place since 2001.
There's only so much you can do with a standard design style, but stretch limo with a Jacuzzi (doesn't the water slosh out?) seems to be within the parameters of possibility.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
import logging
from course_blocks.api import get_course_blocks
from course_widget.grades import get_progress
from courseware import grades
from courseware.courses import get_course_by_id
from courseware.views.views import is_course_passed
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator
from django.db import models
from django.db.models import Sum, Count
from django.http import Http404
from django.utils import timezone
from django_countries.fields import CountryField
import lms.lib.comment_client as cc
from model_utils.models import TimeStampedModel
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from requests import ConnectionError
from student.models import CourseEnrollment
from track.backends.django import TrackingLog
from xmodule.modulestore.django import modulestore
from xmodule_django.models import CourseKeyField
IDLE_TIME = 900
ANALYTICS_ACCESS_GROUP = "Triboo Analytics Users"
log = logging.getLogger('lt_analytics')
DISPLAY_EXCLUDE = ['_state', 'modified']
class CourseStatus(object):
not_started = 0
in_progress = 1
finished = 2
failed = 3
verbose_names = ['Not Started', 'In Progress', 'Successful', 'Unsuccessful']
def get_day_limits(day=None, offset=0):
day = (day or timezone.now()) + timezone.timedelta(days=offset)
day_start = day.replace(hour=0, minute=0, second=0, microsecond=0)
day_end = day_start + timezone.timedelta(days=1)
return day_start, day_end
def format_time_spent(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def format_badges(badges_earned, badges_possible):
return "{} / {}".format(badges_earned, badges_possible)
def get_badges(report_badges):
badges = report_badges.split(" / ")
if len(badges) == 2:
return int(badges[0]), int(badges[1])
return 0, 0
class UnicodeMixin(object):
def __unicode__(self):
result = {}
for k, v in self.__dict__.iteritems():
if k not in DISPLAY_EXCLUDE:
result[k] = v.strftime('%Y-%m-%d %H:%S %Z') if isinstance(v, timezone.datetime) else v
return unicode(result)
class ReportMixin(object):
@classmethod
def filter_by_day(cls, day=None, **kwargs):
day_start, day_end = get_day_limits(day)
return cls.objects.filter(created__gte=day_start,
created__lt=day_end,
**kwargs)
@classmethod
def get_by_day(cls, day=None, **kwargs):
day_start, day_end = get_day_limits(day)
try:
return cls.objects.get(created__gte=day_start,
created__lt=day_end,
**kwargs)
except cls.DoesNotExist:
return None
def get_day_times_spent(day_start, day_end):
tracking_logs = TrackingLog.objects.filter(
time__gte=day_start, time__lt=day_end).exclude(
user_id=None).only('event_type', 'time', 'user_id', 'agent')
user_logs = defaultdict(list)
for l in tracking_logs:
user_logs[l.user_id].append(l)
for user_id, logs in user_logs.iteritems():
user_logs[user_id] = sorted(logs, key=lambda v: v.time)
times_spent = {}
for user_id, logs in user_logs.iteritems():
user_times_spent = []
if len(logs) >= 2:
log_pairs = zip(logs[:-1], logs[1:])
for log1, log2 in log_pairs:
total_seconds = (log2.time - log1.time).total_seconds()
time_spent = total_seconds if total_seconds < IDLE_TIME else IDLE_TIME
user_times_spent.append((log1, time_spent))
user_times_spent.append((logs[-1], IDLE_TIME))
elif len(logs) == 1:
user_times_spent.append((logs[0], IDLE_TIME))
times_spent[user_id] = user_times_spent
return times_spent
def generate_today_reports():
LearnerVisitsDailyReport.generate_today_reports()
LearnerCourseDailyReport.generate_today_reports()
learner_course_reports = LearnerCourseDailyReport.filter_by_day().prefetch_related('user__profile')
LearnerDailyReport.generate_today_reports(learner_course_reports)
CourseDailyReport.generate_today_reports(learner_course_reports)
MicrositeDailyReport.generate_today_reports(learner_course_reports)
CountryDailyReport.generate_today_reports(learner_course_reports)
class ReportLog(UnicodeMixin, TimeStampedModel):
class Meta(object):
app_label = 'lt_analytics'
get_latest_by = 'created'
learner_visit = models.DateTimeField(default=None, null=True)
learner_course = models.DateTimeField(default=None, null=True)
learner = models.DateTimeField(default=None, null=True)
course = models.DateTimeField(default=None, null=True)
microsite = models.DateTimeField(default=None, null=True)
country = models.DateTimeField(default=None, null=True)
@classmethod
def get_latest_dt(cls):
try:
report = cls.objects.filter(learner_visit__isnull=False,
learner_course__isnull=False,
learner__isnull=False,
course__isnull=False,
microsite__isnull=False,
country__isnull=False).latest()
return report.created
except cls.DoesNotExist:
return None
@classmethod
def update_or_create(cls, **kwargs):
today_start, today_end = get_day_limits()
cls.objects.update_or_create(created__gte=today_start,
created__lt=today_end,
defaults=kwargs)
class LearnerVisitsDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta(object):
app_label = 'lt_analytics'
get_latest_by = "created"
unique_together = ('created', 'user', 'course_id', 'device')
index_together = ['created', 'user', 'course_id', 'device']
user = models.ForeignKey(User, null=False)
course_id = CourseKeyField(max_length=255, null=True)
org = models.CharField(max_length=255, db_index=True, null=True, default=None)
device = models.CharField(max_length=255, null=False)
time_spent = models.PositiveIntegerField(default=0)
@classmethod
def generate_day_reports(cls, day=None):
previous_day_start, previous_day_end = get_day_limits(day=day, offset=-1)
previous_day_times_spent = get_day_times_spent(previous_day_start, previous_day_end)
for uid, one_user_times_spent in previous_day_times_spent.iteritems():
cls.update_or_create(uid, one_user_times_spent, day)
@classmethod
def generate_today_reports(cls):
cls.generate_day_reports()
ReportLog.update_or_create(learner_visit=timezone.now())
@classmethod
def update_or_create(cls, user_id, one_user_times_spent, day=None):
# group visit by course_id, device, accumulate time_spent
reports = defaultdict(lambda: 0)
for visit, time_spent in one_user_times_spent:
reports[(visit.course_id, visit.device)] += time_spent
today_start, today_end = get_day_limits(day)
for (course_id, device), time_spent in reports.iteritems():
org = course_id.org if course_id != CourseKeyField.Empty else None
if day:
cls.objects.update_or_create(user_id=user_id,
course_id=course_id,
org=org,
device=device,
created__gte=today_start,
created__lt=today_end,
created=day,
defaults={'time_spent': int(time_spent)})
else:
cls.objects.update_or_create(user_id=user_id,
course_id=course_id,
org=org,
device=device,
created__gte=today_start,
created__lt=today_end,
defaults={'time_spent': int(time_spent)})
class LearnerCourseDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta(object):
app_label = 'lt_analytics'
get_latest_by = 'created'
unique_together = ('user', 'course_id', 'created')
index_together = ['user', 'course_id', 'created']
user = models.ForeignKey(User, null=False)
course_id = CourseKeyField(max_length=255, db_index=True, null=False)
org = models.CharField(max_length=255, db_index=True, null=False)
status = models.PositiveSmallIntegerField(help_text="not started: 0; in progress: 1; finished: 2; failed: 3; ",
default=0)
current_score = models.PositiveSmallIntegerField(default=0, validators=[MaxValueValidator(100)])
badges = models.CharField(max_length=20, default="0 / 0")
posts = models.IntegerField(default=0)
progress = models.PositiveSmallIntegerField(default=0, validators=[MaxValueValidator(100)])
total_time_spent = models.PositiveIntegerField(default=0)
enrollment_date = models.DateTimeField(default=None, null=True, blank=True)
completion_date = models.DateTimeField(default=None, null=True, blank=True)
@classmethod
def generate_today_reports(cls):
enrollments = CourseEnrollment.objects.filter(is_active=True).prefetch_related('user')
overviews = CourseOverview.objects.filter(start__lte=timezone.now()).only('id')
course_ids = set([o.id for o in overviews])
for enrollment in enrollments:
if enrollment.course_id in course_ids:
cls.update_or_create(enrollment)
ReportLog.update_or_create(learner_course=timezone.now())
@classmethod
def update_or_create(cls, enrollment):
"""
create today's summary. Call multiple times with the same parameters will only create one.
Args:
enrollment: CourseEnrollment object
total_time_spent: total_time_spent calculated for this enrollment
Returns: summary object.
"""
course_id = enrollment.course_id
user = enrollment.user
today_start, today_end = get_day_limits()
with modulestore().bulk_operations(course_id):
if user.is_active:
try:
course = get_course_by_id(course_id, None)
except Http404:
return
total_time_spent = (LearnerVisitsDailyReport.objects.filter(
user=user, course_id=course_id).aggregate(
Sum('time_spent')).get('time_spent__sum') or 0)
course_structure = get_course_blocks(user, course.location)
courseware_summary = grades.progress_summary(user, course, course_structure)
grade_summary = grades.grade(user, course, course_structure=course_structure)
if (not courseware_summary) or (not grade_summary):
log.warning('course: {} does not have progress info, skip.')
return
progress = get_progress(user, course, courseware_summary, grade_summary, False)
passed = is_course_passed(course, grade_summary)
if progress['progress'] == 100:
status = CourseStatus.finished if passed else CourseStatus.failed
else:
try:
last_report = cls.objects.exclude(created__gte=today_start, created__lt=today_end).filter(
course_id=course_id, user=user).latest()
except cls.DoesNotExist:
last_report = None
if last_report and last_report.status == CourseStatus.in_progress:
# if last time, it's already in progress, so keep it
status = last_report.status
else:
# if not, based on if he visit the course page to say if it started
if total_time_spent > 0 or progress['progress'] > 0:
status = CourseStatus.in_progress
else:
status = CourseStatus.not_started
try:
cc_user = cc.User(id=user.id, course_id=course.id).to_dict()
posts = cc_user.get('comments_count', 0) + cc_user.get('threads_count', 0)
except (cc.CommentClient500Error, cc.CommentClientRequestError, ConnectionError):
posts = 0
cls.objects.update_or_create(
user=user, course_id=course_id,
created__gte=today_start, created__lt=today_end,
defaults={'org': course_id.org,
'progress': progress['progress'],
'badges': format_badges(progress['nb_trophies_earned'], progress['nb_trophies_possible']),
'current_score': progress['current_score'],
'enrollment_date': enrollment.created,
'completion_date': enrollment.completed,
'status': status,
'total_time_spent': total_time_spent,
'posts': posts})
class LearnerDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta(object):
app_label = 'lt_analytics'
get_latest_by = "created"
unique_together = ('created', 'user', 'org')
index_together = ['created', 'user', 'org']
user = models.ForeignKey(User, null=False)
org = models.CharField(max_length=255, db_index=True, null=False)
enrollments = models.PositiveSmallIntegerField(default=0)
average_final_score = models.PositiveSmallIntegerField(default=0)
badges = models.CharField(max_length=20, default="0 / 0")
posts = models.IntegerField(default=0)
finished = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.finished])
failed = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.failed])
not_started = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.not_started])
in_progress = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.in_progress])
country = models.CharField(default='', max_length=255)
total_time_spent = models.PositiveIntegerField(default=0)
@classmethod
def generate_today_reports(cls, learner_course_reports):
reports_by_user_org = defaultdict(list)
for report in learner_course_reports:
reports_by_user_org[(report.user_id, report.org)].append(report)
for (user_id, org), reports in reports_by_user_org.iteritems():
cls.update_or_create(user_id, org, reports)
ReportLog.update_or_create(learner=timezone.now())
@classmethod
def update_or_create(cls, user_id, org, learner_course_reports):
enrollments = len(learner_course_reports)
total_score = 0
nb_completed_courses = 0
posts = 0
badges_earned = 0
badges_possible = 0
finished = 0
failed = 0
in_progress = 0
not_started = 0
for report in learner_course_reports:
posts += report.posts
earned, possible = get_badges(report.badges)
badges_earned += earned
badges_possible += possible
if report.status == CourseStatus.finished:
finished += 1
elif report.status == CourseStatus.failed:
failed += 1
elif report.status == CourseStatus.in_progress:
in_progress += 1
elif report.status == CourseStatus.not_started:
not_started += 1
if report.status in [CourseStatus.finished, CourseStatus.failed]:
total_score += report.current_score
nb_completed_courses += 1
average_final_score = 0
if nb_completed_courses > 0:
average_final_score = total_score / nb_completed_courses
total_time_spent = (LearnerVisitsDailyReport.objects.filter(user_id=user_id).aggregate(
Sum('time_spent')).get('time_spent__sum') or 0)
today_start, today_end = get_day_limits()
cls.objects.update_or_create(
user_id=user_id,
org=org,
created__gte=today_start,
created__lt=today_end,
defaults={'enrollments': enrollments,
'average_final_score': average_final_score,
'total_time_spent': total_time_spent,
'posts': posts,
'badges': format_badges(badges_earned, badges_possible),
'finished': finished,
'failed': failed,
'in_progress': in_progress,
'not_started': not_started})
class CourseDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta(object):
app_label = 'lt_analytics'
get_latest_by = "created"
unique_together = ('course_id', 'created')
index_together = ['course_id', 'created']
course_id = CourseKeyField(max_length=255, db_index=True, null=False)
enrollments = models.PositiveIntegerField(default=0)
average_final_score = models.PositiveSmallIntegerField(default=0)
posts = models.IntegerField(default=0)
finished = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.finished])
failed = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.failed])
in_progress = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.in_progress])
not_started = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.not_started])
average_complete_time = models.PositiveIntegerField(default=0)
@classmethod
def generate_today_reports(cls, learner_course_reports):
reports_by_course = defaultdict(list)
for report in learner_course_reports:
reports_by_course[report.course_id].append(report)
for course_id, reports in reports_by_course.iteritems():
cls.update_or_create(course_id, reports)
ReportLog.update_or_create(course=timezone.now())
@classmethod
def update_or_create(cls, course_id, learner_course_reports):
total_score = 0
nb_completed_courses = 0
posts = 0
finished = 0
failed = 0
in_progress = 0
not_started = 0
total_time = 0
for report in learner_course_reports:
posts += report.posts
status = report.status
if status == CourseStatus.finished:
finished += 1
elif status == CourseStatus.in_progress:
in_progress += 1
elif status == CourseStatus.not_started:
not_started += 1
elif status == CourseStatus.failed:
failed += 1
if status in [CourseStatus.finished, CourseStatus.failed]:
total_score += report.current_score
total_time += report.total_time_spent
nb_completed_courses += 1
average_final_score = average_complete_time = 0
if nb_completed_courses > 0:
average_final_score = total_score / nb_completed_courses
average_complete_time = total_time / nb_completed_courses
today_start, today_end = get_day_limits()
cls.objects.update_or_create(
course_id=course_id,
created__gte=today_start,
created__lt=today_end,
defaults={'enrollments': len(learner_course_reports),
'average_final_score': average_final_score,
'posts': posts,
'finished': finished,
'failed': failed,
'in_progress': in_progress,
'not_started': not_started,
'average_complete_time': average_complete_time})
class MicrositeDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta:
app_label = 'lt_analytics'
get_latest_by = "created"
unique_together = ('created', 'org')
index_together = ['created', 'org']
org = models.CharField(max_length=255, null=False)
users = models.PositiveIntegerField(default=0)
courses = models.PositiveIntegerField(default=0)
finished = models.PositiveIntegerField(default=0)
unique_visitors = models.PositiveIntegerField(default=0)
average_time_spent = models.PositiveIntegerField(default=0)
total_time_spent_on_mobile = models.PositiveIntegerField(default=0)
total_time_spent_on_desktop = models.PositiveIntegerField(default=0)
@classmethod
def generate_today_reports(cls, learner_course_reports):
reports_by_org = defaultdict(list)
for report in learner_course_reports:
reports_by_org[report.org].append(report)
for org, reports in reports_by_org.iteritems():
cls.update_or_create(org, reports)
ReportLog.update_or_create(microsite=timezone.now())
@classmethod
def update_or_create(cls, org, learner_course_reports):
users = set()
courses = set()
finished = 0
total_time_spent = 0
time_count = 0
for report in learner_course_reports:
users.add(report.user_id)
courses.add(unicode(report.course_id))
if report.status == CourseStatus.finished:
finished += 1
total_time_spent += report.total_time_spent
time_count += 1
average_time_spent = total_time_spent / time_count if time_count else 0
total_time_spent_on_mobile = 0
total_time_spent_on_desktop = 0
for course_id in courses:
course_key = CourseKey.from_string(course_id)
total_time_spent_on_mobile += (LearnerVisitsDailyReport.objects.filter(
course_id=course_key, device="mobile").aggregate(
Sum('time_spent')).get('time_spent__sum') or 0)
total_time_spent_on_desktop += (LearnerVisitsDailyReport.objects.filter(
course_id=course_key, device="desktop").aggregate(
Sum('time_spent')).get('time_spent__sum') or 0)
unique_visitors = (LearnerVisitsDailyReport.filter_by_day(org=org).aggregate(
Count('user_id', distinct=True)).get('user_id__count') or 0)
today_start, today_end = get_day_limits()
cls.objects.update_or_create(
org=org,
created__gte=today_start,
created__lt=today_end,
defaults={'users': len(users),
'courses': len(courses),
'finished': finished,
'unique_visitors': unique_visitors,
'average_time_spent': average_time_spent,
'total_time_spent_on_mobile': total_time_spent_on_mobile,
'total_time_spent_on_desktop': total_time_spent_on_desktop})
@classmethod
def update_or_create_unique_visitors(cls, day, org):
day_start, day_end = get_day_limits(day)
unique_visitors = (LearnerVisitsDailyReport.filter_by_day(day=day, org=org).aggregate(
Count('user_id', distinct=True)).get('user_id__count') or 0)
print "day=%s, org=%s" % (day, org)
cls.objects.update_or_create(
org=org,
created__gte=day_start,
created__lt=day_end,
defaults={'created': day, 'unique_visitors': unique_visitors})
@classmethod
def get_unique_visitors_csv_data(cls, org):
unique_visitors_csv_data = ""
unique_visitors = cls.objects.filter(org=org).values('created', 'unique_visitors').order_by('created')
for uv in unique_visitors:
unique_visitors_csv_data += "%s,%d\\n" % (uv['created'].strftime('%d-%m-%Y'), uv['unique_visitors'])
return unique_visitors_csv_data
class CountryDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta:
app_label = 'lt_analytics'
get_latest_by = "created"
unique_together = ('created', 'org', 'country')
index_together = ['created', 'org']
org = models.CharField(max_length=255, null=False)
country = CountryField(null=True)
nb_users = models.PositiveIntegerField(default=0)
@classmethod
def generate_today_reports(cls, learner_course_reports):
reports_by_org = defaultdict(list)
for report in learner_course_reports:
reports_by_org[report.org].append(report)
for org, reports in reports_by_org.iteritems():
cls.update_or_create(org, reports)
ReportLog.update_or_create(country=timezone.now())
@classmethod
def update_or_create(cls, org, learner_course_reports):
users_by_country = defaultdict(int)
users = []
for report in learner_course_reports:
if report.user.id not in users:
users.append(report.user.id)
users_by_country[report.user.profile.country] += 1
for country, nb_users in users_by_country.iteritems():
today_start, today_end = get_day_limits()
cls.objects.update_or_create(
org=org,
country=country,
created__gte=today_start,
created__lt=today_end,
defaults={'nb_users': nb_users})
|
marking the answer sheet “Yes” for Heads and “No” for Tails.
Old Man McQuillan walked into a bar and ordered martini after martini, each time removing the olives and placing them in a jar. When the jar was filled with olives and all the drinks consumed, the Irishman started to leave.
An Irishman and his wife entered the dentist’s office.
“Show him your bad tooth, honey,” said the man to his wife.
“Well, she spoke without interruption for about forty years,” said the Irishman.
|
import global_functions
from classes.shopping_list import ShoppingList
class User(object):
def __init__(self, username, password, firstname, lastname):
"""
Attributes:
username (str): A unique name to identify user.
password (str): A secret phrase to authenticate a user.
firstname (str): The user's first name.
lastname (str): The user's last name.
:arg
username (str): A unique name to identify user.
password (str): A secret phrase to authenticate a user.
firstname (str): The user's first name.
lastname (str): The user's last name.
"""
self.username = username
self.password_hash = global_functions.sha1_hash(password)
self.firstname = firstname
self.lastname = lastname
self.shopping_lists = dict()
self.id = global_functions.get_random_id()
def create_shopping_list(self, title=None):
""" Creates a new ShoppingList object
:arg
title: The caption of the shoppinglist
:returns
str: id of the new shoppinglist that has been created
"""
if title is None or len(title) < 1:
return "shopping list must have a title"
if not isinstance(title, str):
return "shopping list title must be a string"
for shoppinglist in self.shopping_lists.values():
if title.lower() == shoppinglist.title.lower():
return "Shopping list `" + title + "` already exists"
new_list = ShoppingList(title)
# add the new shopping list object to the list of shoppinglists
# owned by current user
self.shopping_lists[str(new_list.id)] = new_list
def remove_shopping_list(self, shopping_list_id):
""" Deletes the selected shoppinglist object from memory
:arg
shopping_list_id (str): The caption of the shoppinglist
:returns
True if the shoppinglist has been deleted successfully,
otherwise return
error message
"""
if not isinstance(shopping_list_id, int):
return "Shopping list id should be an Integer"
for shopping_list in self.shopping_lists:
if str(shopping_list.id) == str(shopping_list_id):
del shopping_list
return True
return "Shopping list does not exist"
def list_shopping_lists(self):
"""
:returns
list: Returns a list of all the shoppinglists
owned by current user
"""
list_names = []
for shoppinglist in self.shopping_lists.values():
list_names.append(shoppinglist.title)
return list_names
|
When disaster strikes, we help people build better lives for themselves, and for others. We take on issues like land rights, climate change and discrimination against women. And we won’t stop until every person on the planet can enjoy life free from poverty.
The Oxfam International Secretariat is currently headquartered in Nairobi, with offices in Oxford, Washington DC, New York, Brussels, Geneva, and a liaison office with the Africa Union in Addis Ababa.
Oxfam is committed to preventing any type of unwanted behavior at work including sexual harassment, exploitation and abuse, lack of integrity and financial misconduct; and committed to promoting the welfare of children, young people and adults.
Our next Land Rights Advocacy Lead will require a range of skills and experience.
You will have significant experience of successful policy, advocacy and research work in a campaigning context and demonstrable experience of impactful work on land rights issues/campaigns and experience of working on cases involving large companies and/or development finance institutes.
You will have experience in people management- supporting your team’s professional development, as well as informal leadership skills- being able to influence effectively in a matrixed environment.
Experience of developing and managing budgets is also a prerequisite.
You will also have demonstrable ability to ‘speak truth to power’, addressing and analysing power imbalances.
This is an exciting opportunity for an experienced policy advocate to lead a globally diverse team to achieve change at national, regional and global levels in both the public and private sector in support of land rights.
|
# -*- coding: utf-8 -*-
import re
from .._globals import IDENTITY
from .base import BaseAdapter
class SAPDBAdapter(BaseAdapter):
drivers = ('sapdb',)
support_distributed_transaction = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONG',
'json': 'LONG',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONG',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'FIXED(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INT PRIMARY KEY',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONG',
'list:string': 'LONG',
'list:reference': 'LONG',
'big-id': 'BIGINT PRIMARY KEY',
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def sequence_name(self,table):
return (self.QUOTE_TEMPLATE + '_id_Seq') % table
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
% (table._tablename, table._id.name, table._sequence_name))
self.execute(query)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "sapdb"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
def connector(user=user, password=password, database=db,
host=host, driver_args=driver_args):
return self.driver.Connection(user, password, database,
host, **driver_args)
self.connector = connector
if do_connect: self.reconnect()
def lastrowid(self,table):
self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
return long(self.cursor.fetchone()[0])
|
I seek to pursue research projects that take on multidisciplinary lens. I am interested in the intersection between environmental and social justice issues and trying to better understand trade-offs between conservation/sustainability and empowering livelihoods. Global inequalities should not be ignored when making sustainable development goals and future climate change planning.
Currently, I am an undergrad research assistant helping with the lab’s new “Food and Landscape Diversity” project. It will examines the direct and indirect roles forests play in human nutrition and dietary diversity. In my previous experience, I have worked an Ecological Economics projects as well as researched the success and failures of diverse cap and trade programs around the globe.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.