text
stringlengths 29
850k
|
|---|
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import re
from csv import DictReader
from pathlib import Path
parser = argparse.ArgumentParser(description='Checks how bloated realm has become')
parser.add_argument(
'--short-symbols-input',
type=Path,
help='Path to CSV output of short symbols input file',
)
parser.add_argument(
'--sections-input',
type=Path,
help='Path to CSV output of sections input file',
)
parser.add_argument(
'--compileunits-input',
type=Path,
help='Path to CSV output of compileunits input file',
)
parser.add_argument(
'--analyzed-file',
type=str,
help='Name of file being analyzed by bloaty',
)
evgOpts = parser.add_argument_group('Evergreen Metadata')
evgOpts.add_argument('--output', type=Path, help='The evergreen json output filename')
evgOpts.add_argument('--project', type=str, help='Evergreen project this script is running in')
evgOpts.add_argument('--execution', type=int, help='Execution # of this evergreen task')
evgOpts.add_argument(
'--is-patch',
type=bool,
dest='is_patch',
help='Specify if this is not a patch build',
)
evgOpts.add_argument(
'--build-variant',
type=str,
dest='build_variant',
help='Build variant of the evergreen task',
)
evgOpts.add_argument('--branch', type=str, help='Git branch that was being tested')
evgOpts.add_argument('--revision', type=str, help='Git sha being tested')
evgOpts.add_argument('--task-id', type=str, dest='task_id', help='Evergreen task ID of this task')
evgOpts.add_argument('--task-name', type=str, dest='task_name', help='Name of this evergreen task')
evgOpts.add_argument(
'--revision-order-id',
type=str,
dest='revision_order_id',
help='Evergreen revision order id',
)
evgOpts.add_argument('--version-id', type=str, dest='version_id', help='Name of this evergreen version')
args = parser.parse_args()
patch_username : str = ''
def parse_patch_order():
global patch_username
patch_order_re = re.compile(r"(?P<patch_username>[\w\@\.]+)_(?P<patch_order>\d+)")
match_obj = patch_order_re.match(args.revision_order_id)
patch_username = match_obj.group('patch_username')
return int(match_obj.group('patch_order'))
evg_order = int(args.revision_order_id) if not args.is_patch else parse_patch_order()
cxx_method_re = re.compile(
# namespaces/parent class name
r"(?P<ns>(?:(?:[_a-zA-Z][\w]*)(?:<.*>)?(?:::)|(?:\(anonymous namespace\)::))+)" +
r"(?P<name>[\~a-zA-Z_][\w]*)(?:<.*>)?" + # function/class name
r"(?P<is_function>\(\))?" + # if this is function, this will capture "()"
# will be a number if this is a lambda
r"(?:::\{lambda\(\)\#(?P<lambda_number>\d+)\}::)?")
elf_section_re = re.compile(r"\[section \.(?P<section_name>[\w\.\-]+)\]")
items : list[dict] = []
sections_seen = set()
if args.short_symbols_input:
with open(args.short_symbols_input, 'r') as csv_file:
input_csv_reader = DictReader(csv_file)
for row in input_csv_reader:
raw_name = row['shortsymbols']
if match := cxx_method_re.search(raw_name):
ns = match.group('ns').rstrip(':')
node_name = match.group('name')
if match.group('lambda_number'):
node_name = "{} lambda #{}".format(node_name, match.group('lambda_number'))
type_str: str = 'symbol'
if match.group('lambda_number'):
type_str = 'lambda'
elif match.group('is_function'):
type_str = 'function'
items.append({
'type': type_str,
'name': raw_name,
'ns': ns,
'file_size': int(row['filesize']),
'vm_size': int(row['vmsize']),
})
elif match := elf_section_re.search(raw_name):
section_name = match.group('section_name')
type_str: str = 'section' if not section_name.startswith('.debug') else 'debug_section'
if section_name not in sections_seen:
items.append({
'type': type_str,
'name': section_name,
'file_size': int(row['filesize']),
'vm_size': int(row['vmsize'])
})
else:
items.append({
'type': 'symbol',
'name': raw_name,
'file_size': int(row['filesize']),
'vm_size': int(row['vmsize']),
})
if args.sections_input:
with open(args.sections_input, 'r') as csv_file:
input_csv_reader = DictReader(csv_file)
for row in input_csv_reader:
section_name = row['sections']
type_str: str = 'section' if not section_name.startswith('.debug') else 'debug_section'
if section_name not in sections_seen:
items.append({
'name': section_name,
'type': type_str,
'file_size': int(row['filesize']),
'vm_size': int(row['vmsize'])
})
if args.sections_input:
with open(args.compileunits_input, 'r') as csv_file:
input_csv_reader = DictReader(csv_file)
for row in input_csv_reader:
compileunit_name = row['compileunits']
if not elf_section_re.search(compileunit_name):
items.append({
'name': compileunit_name,
'type': 'compileunit',
'file_size': int(row['filesize']),
'vm_size': int(row['vmsize'])
})
output_obj = {
'items': items,
'execution': args.execution,
'is_mainline': (args.is_patch is not True),
'analyzed_file': args.analyzed_file,
'order': evg_order,
'project': args.project,
'branch': args.branch,
'build_variant': args.build_variant,
'revision': args.revision,
'task_id': args.task_id,
'task_name': args.task_name,
'version_id': args.version_id,
'patch_username': patch_username
}
with open(args.output, 'w') as out_fp:
json.dump(output_obj, out_fp)
|
Our take on a nice leather belt. Created from 10 oz Hermann Oak leather and hand made here in our workshop. Oiled and burnished for a good lookin' finish, with the Ship John Old No. 4 solid brass, USA-made belt buckle.
• See our measurement diagram below. Generally you'll want to order two sizes bigger than the waist size on your jeans. You can also measure your existing belt based on the diagram.
|
"""
Tests for Narrative notebook manager
"""
__author__ = 'Bill Riehl <wjriehl@lbl.gov>'
import unittest
from getpass import getpass
from biokbase.narrative.kbasewsmanager import KBaseWSNotebookManager
from biokbase.workspace.client import Workspace
import biokbase.workspace
import biokbase.auth
import os
import re
from tornado import web
# matches valid names of Narratives = "workspace id"/"narrative name"
# e.g. complicated stuff like:
# wjriehl:my_complicated_workspace123/Here is a new narrative!
name_regex = re.compile('[\w:-]+/[\w:-]+')
# matches a valid Narrative reference name, eg:
# ws.768.obj.1234
obj_regex = re.compile('^ws\.\d+\.obj\.\d+')
bad_narrative_id = "Not a real Narrative id!"
test_user_id = "kbasetest"
class NarrBaseTestCase(unittest.TestCase):
# Before test:
# - Log in (for tests that require login)
# also sets the token in the environment variable so the manager can get to it.
@classmethod
def setUpClass(self):
self.user_id = test_user_id
self.pwd = getpass("Password for {}: ".format(test_user_id))
self.token = biokbase.auth.Token(user_id=self.user_id, password=self.pwd)
# by default, user's left logged out
@classmethod
def setUp(self):
self.mgr = KBaseWSNotebookManager()
@classmethod
def tearDown(self):
self.logout()
pass
@classmethod
def tearDownClass(self):
pass
@classmethod
def login(self):
biokbase.auth.set_environ_token(self.token.token)
@classmethod
def logout(self):
biokbase.auth.set_environ_token(None)
def test_manager_instantiated(self):
self.assertIsInstance(self.mgr, biokbase.narrative.kbasewsmanager.KBaseWSNotebookManager)
# test get_userid()
def test_user_id_loggedin(self):
self.login()
self.assertEquals(self.mgr.get_userid(), self.user_id)
# test get_userid()
def test_user_id_loggedout(self):
self.assertEquals(self.mgr.get_userid(), None)
# test wsclient()
def test_wsclient(self):
self.assertIsInstance(self.mgr.wsclient(), Workspace)
# test info_string (just make sure it's a string)
def test_info_string(self):
self.assertIsInstance(self.mgr.info_string(), basestring)
# test list notebooks while logged in returns a list of strings
def test_list_notebooks_loggedin(self):
self.login()
self.test_list_notebooks()
def test_list_notebooks_loggedout(self):
self.test_list_notebooks()
def test_list_notebooks(self):
nb_list = self.mgr.list_notebooks()
# assert we actually get something
self.assertIsInstance(nb_list, list)
# assert it's a list of formatted dicts
format_failure = self.check_nb_list_format(nb_list)
self.assertIsNone(format_failure)
def check_nb_list_format(self, nb_list):
for nb_info in nb_list:
if not 'name' in nb_info:
return 'Missing a "name" key!'
if not 'notebook_id' in nb_info:
return 'Missing a "notebook_id key!'
if not name_regex.match(nb_info['name']):
return 'Incorrect format for "name" key: {}'.format(nb_info['name'])
if not obj_regex.match(nb_info['notebook_id']):
return 'Incorrect format for "notebook_id" key: {}'.format(nb_info['notebook_id'])
# If we make it this far, don't return anything! Hooray!
return None
def test_clean_id(self):
spacey_str = 'test test test test test'
unspacey_str = 'test_test__test_test___test'
self.assertEquals(self.mgr._clean_id(spacey_str), unspacey_str)
class NarrDocumentTestCase(NarrBaseTestCase):
@classmethod
def setUpClass(self):
try:
self.login()
# id for test notebook that'll get twiddled in this test case
self.nb_id = self.mgr.new_notebook()
self.logout()
except:
print "Unable to create a new Narrative for testing manipulation methods against. Exiting..."
raise
@classmethod
def tearDownClass(self):
try:
self.login()
self.mgr.delete_notebook(self.nb_id)
self.logout()
except:
print "Unable to delete test Narrative with id {} after testing was completed!".format(self.nb_id)
raise
# test that we can create and destroy a new Narrative while logged in
def test_create_delete_new_nb_loggedin(self):
self.login()
try:
test_id = self.mgr.new_notebook()
self.assertIsNotNone(test_id)
except:
raise
try:
self.mgr.delete_notebook(test_id)
except:
raise
# test that trying to create a new Narrative while not logged in fails properly
def test_create_new_nb_loggedout(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.new_notebook()
self.assertEquals(err.exception.status_code, 401)
def test_notebook_exists_valid(self):
self.login()
self.assertTrue(self.mgr.notebook_exists(self.nb_id))
def test_notebook_exists_invalid(self):
self.login()
self.assertFalse(self.mgr.notebook_exists(bad_narrative_id))
def test_notebook_exists_loggedout(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.notebook_exists(self.nb_id)
self.assertEquals(err.exception.status_code, 400)
def test_get_name_valid(self):
self.login()
self.assertIsNotNone(self.mgr.get_name(self.nb_id))
def test_get_name_invalid(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.get_name(bad_narrative_id)
self.assertEquals(err.exception.status_code, 404)
def test_get_name_loggedout(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.get_name(self.nb_id)
self.assertEquals(err.exception.status_code, 404)
# create_checkpoint is a no-op for now, but leave in blank tests
def test_create_checkpoint_valid(self):
pass
def test_create_checkpoint_invalid(self):
pass
def test_create_checkpoint_loggedout(self):
pass
# list_checkpoints is a no-op for now, but leave in blank tests
def test_list_checkpoints_valid(self):
pass
def test_list_checkpoints_invalid(self):
pass
def test_list_checkpoints_loggedout(self):
pass
# restore_checkpoint is a no-op for now, but leave in blank tests
def test_restore_checkpoint_valid(self):
pass
def test_restore_checkpoint_invalid(self):
pass
def test_restore_checkpoint_loggedout(self):
pass
# delete_checkpoint is a no-op for now, but leave in blank tests
def test_delete_checkpoint_valid(self):
pass
def test_delete_checkpoint_invalid(self):
pass
def test_delete_checkpoint_loggedout(self):
pass
def test_read_notebook_valid(self):
self.login()
(last_modified, nb) = self.mgr.read_notebook_object(self.nb_id)
self.assertIsNone(self.validate_nb(last_modified, nb))
def test_read_notebook_invalid(self):
self.login()
with self.assertRaises(web.HTTPError) as err:
self.mgr.read_notebook_object(bad_narrative_id)
self.assertEquals(err.exception.status_code, 500)
def test_read_notebook_loggedout(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.read_notebook_object(bad_narrative_id)
self.assertEquals(err.exception.status_code, 400)
def validate_nb(self, last_modified, nb):
if last_modified is None:
return "Missing 'last modified' field!"
if nb is None:
return "Missing nb field!"
keylist = ['nbformat', 'nbformat_minor', 'worksheets', 'metadata']
for key in keylist:
if not key in nb:
return 'Required key "{}" missing from Narrative object'.format(key)
metadata_check = {
'description': '',
'format': 'ipynb',
'creator': self.user_id,
'data_dependencies': [],
'ws_name': '',
'type': 'KBaseNarrative.Narrative',
'name': '',
'job_ids': []
}
for key in metadata_check.keys():
if key in nb['metadata']:
test_val = metadata_check[key]
if len(test_val) > 0:
if test_val != nb['metadata'][key]:
return 'Metadata key "{}" should have value "{}", but has value "{}"'.format(key, test_val, nb['metadata'][key])
else:
return 'Required metadata key "{}" missing from Narrative object'.format(key)
return None
def test_write_notebook_object_valid(self):
self.login()
(last_modified, nb) = self.mgr.read_notebook_object(self.nb_id)
ret_id = self.mgr.write_notebook_object(nb, notebook_id=self.nb_id)
self.assertEquals(ret_id, self.nb_id)
# Without an id, we would expect it to create a new narrative object in the
# same workspace that Notebook knows about from its metadata
def test_write_notebook_object_valid_without_id(self):
self.login()
(last_modified, nb) = self.mgr.read_notebook_object(self.nb_id)
ret_id = self.mgr.write_notebook_object(nb)
# we haven't changed the notebook's name, so it should be the same
self.assertNotEquals(ret_id, self.nb_id)
# Do a little specific cleanup here.
if (ret_id is not self.nb_id):
self.mgr.delete_notebook(ret_id)
def test_write_notebook_object_invalid(self):
self.login()
with self.assertRaises(web.HTTPError) as err:
self.mgr.write_notebook_object({})
self.assertEquals(err.exception.status_code, 400) # should be 500?
def test_write_notebook_object_loggedout(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.write_notebook_object({})
self.assertEquals(err.exception.status_code, 400)
# not sure the best way to test this, and it's not very relevant for KBase, since we
# don't expose the mapping to users (this is for the typical IPython loading screen)
def test_delete_notebook_id(self):
pass
# cases left to test!
# new notebook name
# new nb name with funky characters
# reading a deleted Narrative
# reading/writing with creds, but unauthorized (e.g. kbasetest trying to write to wjriehl:home)
if __name__ == '__main__':
unittest.main()
|
For more than 85 years, the Cincinnati Numismatic Association (CNA) has been the gathering organization for coin collectors and other numismatic related specialists in the Greater Cincinnati area. Its purpose has remained the same. It is to foster knowledge of numismatics and a fraternal spirit among those who pursue the study of coins, currency, tokens, medals, and exonumia.
The organization meets once a month, usually on the second Friday at 7:30 pm. Our meetings consist of numismatic talks, discussions with well-known experts, digital presentations, exhibits, and a dinner held every April.
The Cincinnati Numismatist is the club’s monthly newsletter. In 2006, 2007, 2016 and 2017 the publication received First Place in the American Numismatic Association’s competition for Outstanding Local Numismatic Publication.
The CNA has its roots in its long history, but strives to serve the needs of current and future collectors.
The Cincinnati Numismatic Association was founded in August 1930 and consisted of thirty-five members including numismatic notables such as B. Max Mehl and Farran Zerbe. The first CNA President was Herbert A. Brand. The CNA was founded for the purpose of hosting the 1931 American Numismatic Association convention. Since then the CNA has hosted the ANA convention three additional times, in 1942, 1980, and 1988. The CNA also hosted the 1998 ANA Spring show.
The CNA employs the Tyler Davidson Fountain as its club symbol. The fountain appears on the original 1930 copper CNA medal that is pictured in the upper left-hand corner. The organization also identifies with an intaglio engraving of the fountain that was made for the CNA by the American Banknote Company.
|
#
# This is an extension to the Nautilus file manager to allow better
# integration with the Subversion source control system.
#
# Copyright (C) 2006-2008 by Jason Field <jason@jasonfield.com>
# Copyright (C) 2007-2008 by Bruce van der Kooij <brucevdkooij@gmail.com>
# Copyright (C) 2008-2010 by Adam Plumb <adamplumb@gmail.com>
#
# RabbitVCS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RabbitVCS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RabbitVCS; If not, see <http://www.gnu.org/licenses/>.
#
"""
Additional strings support.
"""
import sys
import codecs
import re
import six
import locale
__all__ = ["S", "IDENTITY_ENCODING", "UTF8_ENCODING", "SURROGATE_ESCAPE"]
unicode_null_string = six.u("")
non_alpha_num_re = re.compile("[^A-Za-z0-9]+")
SURROGATE_BASE = 0xDC00
RE_SURROGATE = re.compile(six.u("[") + six.unichr(SURROGATE_BASE + 0x80) +
six.u("-") + six.unichr(SURROGATE_BASE + 0xFF) +
six.u("]"))
RE_UTF8 = re.compile("^[Uu][Tt][Ff][ _-]?8$")
# Codec that maps ord(byte) == ord(unicode_char).
IDENTITY_ENCODING = "latin-1"
# An UTF-8 codec that implements surrogates, even in Python 2.
UTF8_ENCODING = "rabbitvcs-utf8"
def utf8_decode(input, errors="strict"):
return codecs.utf_8_decode(input, errors, True)
def utf8_encode(input, errors="strict"):
output = b''
pos = 0
end = len(input)
eh = None
while pos < end:
n = end
m = RE_SURROGATE.search(input, pos)
if m:
n = m.start()
if n > pos:
p, m = codecs.utf_8_encode(input[pos:n], errors)
output += p
pos = n
if pos < end:
e = UnicodeEncodeError(UTF8_ENCODING,
input, pos, pos + 1,
"surrogates not allowed")
if not eh:
eh = codecs.lookup_error(errors)
p, n = eh(e)
output += p
if n <= pos:
n = pos + 1
pos = n
return (output, len(input))
class Utf8IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return utf8_encode(input, self.errors)[0]
class Utf8IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_8_decode
class Utf8StreamWriter(codecs.StreamWriter):
def encode(self, input, errors='strict'):
return utf8_encode(input, errors)
class Utf8StreamReader(codecs.StreamReader):
decode = codecs.utf_8_decode
def utf8_search(encoding):
encoding = non_alpha_num_re.sub("-", encoding).strip("-").lower()
if encoding != UTF8_ENCODING:
return None
return codecs.CodecInfo(
name=UTF8_ENCODING,
encode=utf8_encode,
decode=utf8_decode,
incrementalencoder=Utf8IncrementalEncoder,
incrementaldecoder=Utf8IncrementalDecoder,
streamwriter=Utf8StreamWriter,
streamreader=Utf8StreamReader
)
codecs.register(utf8_search)
# Emulate surrogateescape codecs error handler because it is not available
# Before Python 3.1
SURROGATE_ESCAPE = "rabbitvcs-surrogateescape"
def rabbitvcs_surrogate_escape(e):
if not isinstance(e, UnicodeError):
raise e
input = e.object[e.start:e.end]
if isinstance(e, UnicodeDecodeError):
output = [six.unichr(b) if b < 0x80 else \
six.unichr(SURROGATE_BASE + b) for b in bytearray(input)]
return (unicode_null_string.join(output), e.end)
if isinstance(e, UnicodeEncodeError):
output = b""
for c in input:
b = ord(c) - SURROGATE_BASE
if not 0x80 <= b <= 0xFF:
raise e
output += six.int2byte(b)
return (output, e.end)
raise e
codecs.register_error(SURROGATE_ESCAPE, rabbitvcs_surrogate_escape)
class S(str):
"""
Stores a string in native form: unicode with surrogates in Python 3 and
utf-8 in Python 2.
Provides the following methods:
encode: overloaded to use UTF8_ENCODING and SURROGATE_ESCAPE error handler.
decode: overloaded to use UTF8_ENCODING and SURROGATE_ESCAPE error handler.
bytes: get the string as bytes.
unicode: get the string as unicode.
display: get the string in native form, without surrogates.
"""
if str == bytes:
# Python 2.
def __new__(cls, value, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
if isinstance(value, bytearray):
value = bytes(value)
if isinstance(value, str):
encoding, errors = S._codeargs(encoding, errors)
if encoding.lower() != UTF8_ENCODING:
value = value.decode(encoding, errors)
if isinstance(value, six.text_type):
value = value.encode(UTF8_ENCODING, SURROGATE_ESCAPE)
elif not isinstance(value, str):
value = str(value)
return str.__new__(cls, value)
def encode(self, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
encoding, errors = self._codeargs(encoding, errors)
if encoding.lower() == UTF8_ENCODING:
return str(self)
value = str.decode(self, UTF8_ENCODING, SURROGATE_ESCAPE)
return value.encode(encoding, errors)
def decode(self, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
encoding, errors = self._codeargs(encoding, errors)
return str.decode(self, encoding, errors)
def display(self, encoding=None, errors='replace'):
encoding, errors = self._codeargs(encoding, errors)
value = str.decode(self, UTF8_ENCODING, errors)
return value.encode(encoding, errors)
else:
# Python 3.
def __new__(cls, value, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
if isinstance(value, bytearray):
value = bytes(value)
if isinstance(value, bytes):
encoding, errors = S._codeargs(encoding, errors)
value = value.decode(encoding, errors)
elif not isinstance(value, str):
value = str(value)
return str.__new__(cls, value)
def encode(self, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
encoding, errors = self._codeargs(encoding, errors)
return str.encode(self, encoding, errors)
def decode(self, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
return str(self);
def display(self, encoding=None, errors='replace'):
return RE_SURROGATE.sub(six.unichr(0xFFFD), self)
def bytes(self, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
return self.encode(encoding, errors)
def unicode(self):
return self.decode()
def valid(self, encoding=None, errors=SURROGATE_ESCAPE):
return self.display(encoding, errors) == self
@staticmethod
def _codeargs(encoding, errors):
if not encoding:
encoding = locale.getlocale(locale.LC_MESSAGES)[1]
if not encoding:
encoding = sys.getdefaultencoding()
if RE_UTF8.match(encoding):
encoding = UTF8_ENCODING
if errors.lower() == 'strict':
errors = SURROGATE_ESCAPE
return encoding, errors
|
Digital marketing agency Orange Line has made two senior appointments following a series of new business wins and existing client growth.
The independent Sydney agency has hired former Essence planning lead, Aishling Farrell (right in photo), as general manager of client service and strategy along with former senior ReachLocal executive, Stephanie Ford (left) as operations manager.
For the past 10 years Orange Line has created and executed the digital strategy for tier one and medium-enterprise businesses including Bupa, Expedia and Bayer. Over the past 12 months the agency has increased its scope of work with a number of clients, moving into international markets — including the US — as well as adding a string of new business.
Farrell joins Orange Line after seven years with GroupM’s digital-first agency, Essence. Starting her career as a media planner/buyer she rose through their ranks in London. Charged with leading planning and account management for clients including Google and HP, she was the first person on the ground when Essence opened their Sydney office in 2016.
Ford joins Orange Line after seven years with global online marketing and advertising solutions provider ReachLocal. During that time she gained experience in a variety of roles beginning her career as an account manager and then progressing to UK operations manager. Transferring to Sydney in 2014 she focused on project and program management to bring global alignment and increase operational efficiencies, as well as leading the regional operations specialist team across ANZ.
|
#! /usr/bin/env python
# coding=utf8
from BotModule import BotModule
import urllib, json
class WeatherModule(BotModule):
def __init__(self):
return
def command(self, nick, cmd, args, type):
if cmd == "!wetter":
postalcode = "karlsruhe"
if len(args) > 0:
postalcode = ' '.join(args).lower()
if postalcode.startswith('honoluluu'):
answer = 'Computer sagt: NEIN!'
if type == 'public':
self.sendPublicMessage(answer)
else :
self.sendPrivateMessage(nick, answer)
return
elif postalcode == 'mêlée island':
answer = 'Dublonen, Dublonen!'
if type == 'public':
self.sendPublicMessage(answer)
else :
self.sendPrivateMessage(nick, answer)
return
try:
u = urllib.urlopen("http://api.openweathermap.org/data/2.1/find/name?q=%s&type=like&units=metric" % urllib.quote(postalcode))
except urllib2.HTTPError, e:
if self.DEBUG:
print 'Error fetching data, Error: %s' % e.code
return
except urllib2.URLError, e:
if self.DEBUG:
print 'Error fetching data, Error: %s' % e.args
return
if u.getcode() != 200:
if self.DEBUG:
print 'Error fetching data, Errorcode: %s' % u.getcode()
return
try:
jsondata = json.loads(u.read())
except ValueError, e:
if self.DEBUG:
print "ValueError %s" % e
return
if jsondata['cod'] != '200':
if jsondata['message'] != '':
answer = 'Leck? welches Leck?'
if type == 'public':
self.sendPublicMessage(answer)
else :
self.sendPrivateMessage(nick, answer)
return
if len(jsondata['list']) < 1:
answer = 'Leck? welches Leck?'
if type == 'public':
self.sendPublicMessage(answer)
else :
self.sendPrivateMessage(nick, answer)
return
elif len(jsondata['list']) > 1:
answer = 'Mr Cotton´s Papagei! Die selbe Frage!'
if type == 'public':
self.sendPublicMessage(answer)
else :
self.sendPrivateMessage(nick, answer)
return
weather = {}
try:
weather['city'] = jsondata['list'][0]['name']
weather['temp'] = jsondata['list'][0]['main']['temp']
weather['cond'] = jsondata['list'][0]['weather'][0]['description']
weather['windspeed'] = jsondata['list'][0]['wind']['speed']
weather['cloudiness'] = jsondata['list'][0]['clouds']['all']
weather['rain_last_1h'] = jsondata['list'][0]['rain']['1h']
weather['humidity'] = jsondata['list'][0]['main']['humidity']
except KeyError, e:
if self.DEBUG:
print "KeyError: %s" % e
answer = "Wetter für %s: %.2f°C, %s" % (weather['city'].encode('utf-8'), weather['temp'], weather['cond'].encode('utf-8'))
if 'windspeed' in weather:
answer += ", wind speed: %.1fkm/h" % weather['windspeed']
if 'humidity' in weather:
answer += ", humidity: %d%%" % weather['humidity']
if 'cloudiness' in weather:
answer += ", cloudiness: %d%%" % weather['cloudiness']
if 'rain_last_1h' in weather:
answer += ", rain last 1h: %.3fl/m²" % weather['rain_last_1h']
if type == 'public':
self.sendPublicMessage(answer)
if weather['temp'] > 30:
self.sendPublicMessage('Willkommen in der der Karibik, Schätzchen!')
else :
self.sendPrivateMessage(nick, answer)
def help(self, nick):
self.sendPrivateMessage(nick, "!wetter [Ort] - Gibt aktuelle Wetterdaten aus. Default Ort ist Karlsruhe.")
return
|
I’m trying really hard to keep on the sunny side of my Rocky Mount life (see my last entry). I’d intended to post cheerful book reviews today of what I’ve recently read.
But my thoughts about Where the Crawdads Sing and The Perfect Nanny will just have to wait. I feel compelled to talk about what’s going on with our city government–and what isn’t.
According to an article in Friday’s Rocky Mount Telegram (February 8, 2019), there’s trouble brewing about downtown sites for hotels. It seems Carlton House developers are waiting on a $55,000 grant to support renovations. In order for developers to receive the money, city officials have to complete paperwork.
Are you kidding me? This woman is being paid $120,000 a year. Of course, maybe it is hard to be in the loop when you live in Virginia Beach instead of the community where you are supposed to be in charge of COMMUNITY DEVELOPMENT.
Speculation is the hotel location will be the site of the St. John A.M.E. Zion Church, which sits right on the Event Center parking lot–believe me, I went down there and looked.
I took the picture below sitting in the parking lot of the Event Center.
To be honest, I wouldn’t mind staying in a quaint hotel that was once a historic church. I mean, look at those stained glass windows. Of course, I have no idea whether the church will be renovated or torn down and a Holiday Inn built there.
The problem is the perception of favoritism. Why is the City holding up one developer and lobbying for another?
The most bizarre aspect of the Event Center to me is what I call Andre’s Church, situated smack-dab in the middle of the parking lot of the Event Center, as it has been since Day One. Owned by the family of Councilman Andre Knight, the official name of this structure is the Holy Hope Heritage Center.
Who’s conducting that independent study of our City Manager, Ms. Rochelle Small -Toney? When will those results be made public? If there was no lawsuit (according to the Mayor), then why was John Jesso paid $40,000? Why do City Council members not recuse themselves when votes are taken on issues that could be seen as a conflict of interest?
I could go on and on–why in the name of common sense was Rochelle Small-Toney hired in the first place?–but I feel myself slipping into despair.
It’s really hard to stay on the sunny side with so many questions and a city council that won’t come up with answers.
You are so right on target. Thank you, Patsy. I just hope you get some honest answers. Seems no one else has been able to!
Thanks for taking the time to post a comment, Debbie. I’ll certainly let everyone know if I learn anything that explains our current situation.
A burning question many of us have is whether the former director of Parks, Kelvin Yarnell, has any kind of warnings or discipline write-ups that would warrant his demotion and replacement.
A very good question. He came a year or so ago to speak to my garden club and seemed very knowledgeable and enthusiastic about plans the city had (at that time) to capitalize on our river front. I’ve heard only good things about him.
My wife and I lived in Rocky Mount for almost four years while I was assigned there by the Army. Our plan is to move back after I retire in three years, but with all the negative things that are happening with the City Manager, we are actually looking at moving to another community. We are entertaining the idea of moving to Wilson, Zebulon, or Knightdale. It makes us sad to see what is happening to Rocky Mount and our friends that still remain there. The city manager needs to go! Along with most of the city council!
Wake Forest would love for you to call us home!!!! Thank you for your service.
Thank you Patsy for using your forum for these questions which we as concerned citizens would like to have answers to.
It was quite shocking to read Fridays article….with more shenanigans revealed! Thank you for keeping this in the news!
Thank you Patsy for all that you are doing!!! I have lived in Rocky Mount my entire life and never dreamed our City government could become so corrupt!! Why they let Andre Knight get away with what he does and do NOTHING is beyond me!!! Are they afraid of him??
I am so happy you are using your journalistic platform to publicly ask what we all want answers to and more. And why are non-profits misusing grant money and paying spouses for services? Come on, follow the money. I look forward to reading what you post!!
You address the same questions my husband and I had when we read the article concerning the money waiting for a signature to go to the renovators of the Carlton house. Thank you for keeping these issues in the public eye.
Thank you so much Patsy for using your journalism expertise to enlighten our community. Every day there is one more issue brought to light. Today, February 10, 2019, there is an article about a downtown establishment with direct connections to city councilmen. The problems are becoming more consistent. Thank you for helping the community keep up with this. Our city needs the prayers of all the people!!!!!
Thank you and the Telegram for digging out and exposing the problems. I am not a resident within the city limits, but support the businesses with our dollars. What does it take to get the state auditors along with the State Department of human resources to come to Rocky Mount. I know several present and past employees that could and should file suite with the Department of Human Resources because they have been discriminated against.
Keep up the good writing and holding the council accountable. One more thought…all council meetings should be video taped and immediately accessible to the public.
|
#! /usr/bin/env python
# -*- coding: UTF8 -*-
# Este arquivo é parte do programa Vittolino
# Copyright 2011-2017 Carlo Oliveira <carlo@nce.ufrj.br>,
# `Labase <http://labase.selfip.org/>`__, `GPL <http://is.gd/3Udt>`__.
#
# Vittolino é um software livre, você pode redistribuí-lo e/ou
# modificá-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF), na versão 2 da
# Licença.
#
# Este programa é distribuído na esperança de que possa ser útil,
# mas SEM NENHUMA GARANTIA, sem uma garantia implícita de ADEQUAÇÃO
# a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, veja em <http://www.gnu.org/licenses/>
"""
Gerador de labirintos e jogos tipo 'novel'.
"""
from _spy.vitollino.vitollino import STYLE, INVENTARIO, Cena, Elemento
from _spy.vitollino.vitollino import Texto
from _spy.vitollino.vitollino import JOGO as j
from browser import window, html
Cena._cria_divs = lambda *_: None
STYLE['width'] = 1024
STYLE['min-height'] = "800px"
INVENTARIO.elt.style.width = 1024
IMG = dict(
A_NORTE="https://i.imgur.com/aLEjWgB.png",
A_LESTE="https://i.imgur.com/sivjAnO.png",
A_SUL="https://i.imgur.com/otHJhF0.png",
# B_NORTE="https://i.imgur.com/40K5493.png", B_LESTE="https://i.imgur.com/R3bpFXD.png",
B_OESTE="https://i.imgur.com/dlxY8hi.png", B_SUL="https://i.imgur.com/eYM3Yp9.png",
B_NORTE="https://activufrj.nce.ufrj.br/file/SuperPythonEM/rect3569.jpg",
B_LESTE="https://activufrj.nce.ufrj.br/file/SuperPythonEM/rect3565.jpg",
C_LESTE="https://i.imgur.com/94V79TA.png", C_NORTE="https://i.imgur.com/YJfnhy9.png",
C_OESTE="https://i.imgur.com/Fzz2FNz.png", C_SUL="https://i.imgur.com/LFKXlB1.png",
D_NORTE="http://i.imgur.com/1uWH7rU.png", D_LESTE="https://i.imgur.com/b0FcjLq.png",
D_OESTE="https://i.imgur.com/406g75C.png", D_SUL="https://i.imgur.com/HQBtUoQ.png",
E_NORTE="https://i.imgur.com/uNkTVGg.png", E_SUL="http://i.imgur.com/bculg4O.png",
E_LESTE="https://i.imgur.com/lUi1E1v.png", E_OESTE="https://i.imgur.com/bPBT1d7.png",
F_NORTE="https://i.imgur.com/iHsggAa.png", F_SUL="http://i.imgur.com/euNeDGs.png",
F_LESTE="https://i.imgur.com/NqSCDQR.png", F_OESTE="https://i.imgur.com/hG4mgby.png",
G_NORTE="https://i.imgur.com/XDIASJa.png", G_SUL="https://i.imgur.com/ARQZ8CX.png",
G_LESTE="https://i.imgur.com/pJOegNT.png", G_OESTE="http://i.imgur.com/9IhOYjO.png",
H_NORTE="https://i.imgur.com/WjTtZPn.png", H_LESTE="https://i.imgur.com/AzvB8hs.png",
H_OESTE="https://i.imgur.com/SIhLGCP.png", H_SUL="https://i.imgur.com/UVnpzzE.png",
I_NORTE="https://i.imgur.com/RSdQSH1.png", I_SUL="https://i.imgur.com/UGCRJ0d.png",
I_LESTE="https://i.imgur.com/jSn4zsl.png", I_OESTE="https://i.imgur.com/eG43vn5.png",
J_NORTE="https://i.imgur.com/MMO11Dv.png", J_SUL="https://i.imgur.com/RkWPb8Z.png",
J_LESTE="https://i.imgur.com/btv0qfO.png", J_OESTE="https://i.imgur.com/lDezYKu.png",
K_NORTE="https://i.imgur.com/Tx9Q6vW.png", K_SUL="https://i.imgur.com/rrI94Xh.png",
K_LESTE="https://i.imgur.com/R6gON2E.png", K_OESTE="https://i.imgur.com/Mn69uua.png",
L_NORTE="https://i.imgur.com/oAu9lkN.png", L_SUL="https://i.imgur.com/xTjd7UV.png",
L_LESTE="https://i.imgur.com/JMQAGvc.png", L_OESTE="http://i.imgur.com/UJBMKY7.png",
M_NORTE="https://i.imgur.com/qoHwGLW.png", M_SUL="https://i.imgur.com/5P3U1Ai.png",
M_LESTE="http://i.imgur.com/1UXBodl.png", M_OESTE="https://i.imgur.com/AC2KgZg.png",
N_NORTE="https://i.imgur.com/KVlUf94.png", N_LESTE="https://i.imgur.com/f6vR0tY.png",
N_OESTE="https://i.imgur.com/GE8IsRM.png", N_SUL="https://i.imgur.com/RfUP0ez.png",
O_NORTE="https://i.imgur.com/lOT96Hr.png", O_SUL="https://i.imgur.com/HtRKv7X.png",
O_LESTE="https://i.imgur.com/uvPjc14.png", O_OESTE="https://i.imgur.com/I7Gn0Xx.png",
P_NORTE="https://i.imgur.com/OutDPac.png", P_SUL="https://i.imgur.com/sAIhp4b.png",
P_LESTE="https://i.imgur.com/dc2Ol59.png", P_OESTE="https://i.imgur.com/9IBwxjI.png",
Q_NORTE="https://i.imgur.com/JRYlZeN.png", Q_SUL="http://i.imgur.com/4BCiuYZ.png",
Q_LESTE="https://i.imgur.com/ek4cwBg.png", Q_OESTE="https://i.imgur.com/vmZHZmr.png",
R_NORTE="https://i.imgur.com/qnjq624.png", R_SUL="https://i.imgur.com/nZvwdhP.png",
R_LESTE="https://i.imgur.com/gS4rXYk.png", R_OESTE="http://i.imgur.com/2Z36mLI.png"
)
PROP = dict(
NOTE="https://i.imgur.com/SghupND.png", LIVRO="https://i.imgur.com/yWylotH.png?1",
FORCE="https://i.imgur.com/aLTJY2B.png",
FAKEOB = "https://upload.wikimedia.org/wikipedia/commons/3/3d/Simple_Rectangle_-_Semi-Transparent.svg"
)
def cria_lab():
def und(ch):
return "MANSÃO_%s" % NOME[ch].replace(" ", "_") if ch in NOME else "_NOOO_"
j.c.c(**SCENES)
salas = {nome: [getattr(j.c, lado) for lado in lados if hasattr(j.c, lado)] for nome, lados in ROOMS.items()}
j.s.c(**salas)
chambers = [[getattr(j.s, und(ch)) if hasattr(j.s, und(ch)) else None for ch in line] for line in MAP]
j.l.m(chambers)
blqa, blqb = j.s.MANSÃO_BLOQUEIO.sul.N, j.s.MANSÃO_ARMA_DO_CRIME.norte.N
j.s.MANSÃO_HALL.oeste.portal(N=j.s.MANSÃO_FACHADA.oeste)
print("cria_lab", blqa.img)
blqa.fecha()
blqb.fecha()
j.s.MANSÃO_HALL.leste.vai()
# j.s.MANSÃO_HALL.oeste.vai()
class Note:
def __init__(self):
self.onde = self.cena = j.s.MANSÃO_HALL.leste
print("implanta_livro_de_notas", self.cena.img)
self.livro = Cena(PROP["LIVRO"])
self.papel = Elemento(
img=PROP["FAKEOB"], tit="Um lavatorio", vai=self.pega_papel, style=dict(
left=360, top=356, width=170, height="111px"))
self.implanta_livro_de_notas()
self.div = html.DIV(style=dict(
position="absolute", left=45, top=70, width=450, background="transparent", border="none"))
self.ta = html.TEXTAREA(CODE, cols="70", rows="20", style=dict(
position="absolute", left=50, top=50, background="transparent", border="none"))
self.div <= self.ta
self.livro.elt <= self.div
def implanta_livro_de_notas(self):
print("implanta_livro_de_notas", self.papel.img)
from _spy.vitollino.vitollino import Cursor
Cursor(self.papel.elt)
self.papel.entra(self.cena)
def pega_papel(self, _=0):
texto = Texto(self.cena, "Um Livro de Notas", "Você encontra um livro de notas")
texto.vai()
j.i.bota(self.papel, "papel", None) # texto.vai)
self.papel.vai = self.mostra_livro
def mostra_livro(self):
self.onde = j.i.cena
self.livro.portal(O=self.onde, L=self.onde)
self.livro.vai()
self.escreve_livro()
def escreve_livro(self):
cm = window.CodeMirror.fromTextArea(self.ta, dict(mode="python", theme="solarized"))
self.escreve_livro = lambda *_: None
class Force:
def __init__(self):
self.onde = self.cena = j.s.MANSÃO_HALL.leste
self.force = Elemento(
img=PROP["FORCE"], tit="campo de força", vai=self.toca_campo, style=dict(
left=0, top=30, width=850, height="680px"))
self.implanta_campo_de_forca()
def implanta_campo_de_forca(self):
self.force.entra(self.cena)
def toca_campo(self, _=0):
texto = Texto(self.cena, "Campo de Força", "Você recebe um pulso de força e é jogado para trás")
texto.vai()
def main(*_):
# criarsalab()
# j.m("https://is.gd/Ldlg0V")
cria_lab()
Note()
#Force()
pass
NOMES = """SALA A - FACHADA
SALA B - HALL
SALA C - SALA DE ESTAR
SALA D - CENA DO CRIME
SALA H - A CHAVE
SALA I - FOLHA DE CADERNO
SALA J - BLOQUEIO
SALA E - DESPENSA
SALA K - PANO ENSANGUENTADO
SALA L - ESCURIDÃO
SALA F - ENTRADA DO QUARTO
SALA G - QUARTO
SALA N - SALA DE TV
SALA Q - SALA DE JANTAR
SALA R - COZINHA
SALA P - CORREDOR
SALA O - SALA DE VIGILÂNCIA
SALA M - ARMA DO CRIME""".split("\n")
CARDINAL = "NORTE LESTE SUL OESTE".split()
NOME = {line.split(" - ")[0].split()[-1]: line.split(" - ")[1].replace(" ", "_") for line in NOMES}
ROOMS = {"MANSÃO_%s" % NOME[room]: ["MANSÃO_%s_%s" % (NOME[room], k) for k in CARDINAL]
for room in NOME.keys()}
SCENES = {"MANSÃO_%s_%s" % (NOME[room], k): IMG["%s_%s" % (room, k)]
for k in CARDINAL for room in NOME.keys() if "%s_%s" % (room, k) in IMG}
MAP = """
ABC
--D-E-FG
--HIJKL
----M-N
----OPQR"""[1:].split("\n")
CODE = """
def cria_lab():
def und(ch):
return "MANSÃO_%s" % NOME[ch].replace(" ", "_") if ch in NOME else "_NOOO_"
j.c.c(**SCENES)
salas = {nome: [getattr(j.c, lado) for lado in lados if hasattr(j.c, lado)] for nome, lados in ROOMS.items()}
j.s.c(**salas)
chambers = [[getattr(j.s, und(ch)) if hasattr(j.s, und(ch)) else None for ch in line] for line in MAP]
j.l.m(chambers)
blqa, blqb = j.s.MANSÃO_BLOQUEIO.sul.N, j.s.MANSÃO_ARMA_DO_CRIME.norte.N
j.s.MANSÃO_HALL.oeste.portal(N=j.s.MANSÃO_FACHADA.oeste)
print("cria_lab", blqa.img)
blqa.fecha()
blqb.fecha()
j.s.MANSÃO_FACHADA.leste.vai()
"""
|
This limited edition fine art print is printed on archival heavyweight watercolor paper. The pigmented inks have a lifetime guarantee. The edition is limited to 50, and each print is individually numbered and hand-signed by the artist.
|
from fabric.api import *
from fabric.utils import abort
from fabric.contrib.project import rsync_project
from fabric.contrib.files import exists
if not env.hosts:
env.hosts = [
'www-data@app-0.igor.io',
'www-data@app-1.igor.io',
]
project_name = 'ngenious.website'
target_dir = '/var/www/'+project_name
backup_dir = target_dir+'-backup'
staging_dir = target_dir+'-staging'
@task(default=True)
def deploy():
puts('> Cleaning up previous backup and staging dir')
run('rm -rf %s %s' % (backup_dir, staging_dir))
puts('> Preparing staging')
run('cp -r %s %s' % (target_dir, staging_dir))
puts('> Uploading changes')
with cd(staging_dir):
with hide('stdout'):
extra_opts = '--omit-dir-times'
rsync_project(
env.cwd,
'./',
delete=True,
exclude=['.git', '*.pyc'],
extra_opts=extra_opts,
)
puts('> Switching changes to live')
run('mv %s %s' % (target_dir, backup_dir))
run('mv %s %s' % (staging_dir, target_dir))
@task
def rollback():
if exists(backup_dir):
puts('> Rolling back to previous deploy')
run('mv %s %s' % (target_dir, staging_dir))
run('mv %s %s' % (backup_dir, target_dir))
else:
abort('Rollback failed, no backup exists')
@task
def reload():
puts('> Reloading nginx and php5-fpm')
run('service nginx reload')
run('service php5-fpm reload')
@task
def restart():
puts('> Restarting nginx and php5-fpm')
run('service nginx restart')
run('service php5-fpm restart')
|
How to boost your body's calcium absorption?
What helps the body absorb calcium?
If you think calcium is only necessary for bone and teeth, it is wrong. It is also necessary for blood and nervous system. Calcium is essential for blood clotting, cardiovascular and muscle functions. Calcium deficiency is the result of less absorbance of calcium by our body. Thus the various parts of the body are pushed too many problems. There are many ways for calcium absorption. Drinking too much coffee, tea, and alcohol, smoking and sun exposure can reduce calcium absorption. In this article, you can see what kind of habits will help you to absorb more calcium in your body.
Intake more calcium Rich Foods: Increase calcium-rich foods. Calcium levels vary depending on their body. Adults typically require at least 1200 mg/day.
Calcium-rich foods: Cheese, Banana, Broccoli, Eggs, Fish, Soya, and Chickpea calcium have more calcium. Since milk has very high calcium, it should be taken daily at night. There is more calcium in the cheese. It should take at least 4 days a week.
Are you roaming in the sun?
Do you get enough energy from the sun? Walk at least 10 minutes in the morning and evening. Vitamin D deficiency may result in calcium deficiency. Vitamin D, you can get mostly from the sunlight. There are some foods that have it.
Fish oil, egg, mushroom, sunflower oil, salmon fish, and milk have the high content of vitamin D. Eat mainly seafood.
Reduce fat diet: Fat foods reduce calcium absorption. This also leads to a lack of calcium. So do not take foods that are deep fried in oil and has more fats.
Proteins are necessary to absorb calcium in the body. Take high protein foods. When these are metabolized, calcium can absorb into the body.
Add foods like chicken, fish, eggs, beans, and other vegetables, natural yogurt, nuts, cheese, bananas in your daily meals.
Whatever vegetables you buy which are rich in proteins, vitamins. By the way, you cook, the nutrients will be destroyed. Do you know that nutrients in vegetables can be destroyed if it is cooked at high temperatures? That is why you will not get the benefits of vegetables. So you need to cook at moderate heat.
Avoid acid foods: Avoid too many acidic foods. Vitamin D and calcium levels decrease when the acidity increases in the body. This prevents them from absorbing the pulse size. Coffee, sugar, carbonated drinking will affect it.
Make sure that the foods rich in chlorophyll add calcium in the body and add them to the daily diet.
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import os
import urllib.parse
from pyramid import response
import tests
from tests.fake_servers import base
logger = logging.getLogger(__name__)
class FakeStoreSearchServer(base.BaseFakeServer):
# XXX This fake server as reused as download server, to avoid passing a
# port as an argument. --elopio - 2016-05-01
def configure(self, configurator):
configurator.add_route("info", "/v2/snaps/info/{snap}", request_method="GET")
configurator.add_view(self.info, route_name="info")
configurator.add_route(
"download", "/download-snap/{snap}", request_method="GET"
)
configurator.add_view(self.download, route_name="download")
def info(self, request):
snap = request.matchdict["snap"]
logger.debug(
"Handling details request for package {}, with headers {}".format(
snap, request.headers
)
)
if "User-Agent" not in request.headers:
response_code = 500
return response.Response(None, response_code)
payload = self._get_info_payload(request)
if payload is None:
response_code = 404
return response.Response(json.dumps({}).encode(), response_code)
response_code = 200
content_type = "application/hal+json"
return response.Response(
payload, response_code, [("Content-Type", content_type)]
)
def _get_info_payload(self, request):
# core snap is used in integration tests with fake servers.
snap = request.matchdict["snap"]
# tests/data/test-snap.snap
test_sha3_384 = (
"8c0118831680a22090503ee5db98c88dd90ef551d80fc816"
"dec968f60527216199dacc040cddfe5cec6870db836cb908"
)
revision = "10000"
confinement = "strict"
if snap in ("test-snap", "core"):
sha3_384 = test_sha3_384
elif snap == "snapcraft":
sha3_384 = test_sha3_384
revision = "25"
confinement = "classic"
elif snap == "test-snap-with-wrong-sha":
sha3_384 = "wrong sha"
elif (
snap == "test-snap-branded-store"
and request.headers.get("Snap-Device-Store") == "Test-Branded"
):
sha3_384 = test_sha3_384
else:
return None
channel_map = list()
for arch in ("amd64", "i386", "s390x", "arm64", "armhf", "ppc64el"):
for risk in ("stable", "edge"):
channel_map.append(
{
"channel": {
"architecture": arch,
"name": risk,
"released-at": "019-01-17T15:01:26.537392+00:00",
"risk": risk,
"track": "latest",
},
"download": {
"deltas": [],
"sha3-384": sha3_384,
"url": urllib.parse.urljoin(
"http://localhost:{}".format(self.server.server_port),
"download-snap/test-snap.snap",
),
},
"created-at": "2019-01-16T14:59:16.711111+00:00",
"confinement": confinement,
"revision": revision,
}
)
return json.dumps(
{
"channel-map": channel_map,
"snap": {
"name": snap,
"snap-id": "good",
"publisher": {
"id": snap + "-developer-id",
"validation": "unproven",
},
},
"snap-id": "good",
"name": snap,
}
).encode()
def download(self, request):
snap = request.matchdict["snap"]
logger.debug("Handling download request for snap {}".format(snap))
if "User-Agent" not in request.headers:
response_code = 500
return response.Response(None, response_code)
response_code = 200
content_type = "application/octet-stream"
# TODO create a test snap during the test instead of hardcoding it.
# --elopio - 2016-05-01
snap_path = os.path.join(
os.path.dirname(tests.__file__), "data", "test-snap.snap"
)
with open(snap_path, "rb") as snap_file:
return response.Response(
snap_file.read(), response_code, [("Content-Type", content_type)]
)
|
Create a National Registry account?
From the homepage select Create An Account.
Add a user role (Training Officer, etc) to my National Registry account?
Select the role(s) you want to request.
Add the State EMS Office user role to my National Registry account?
AFFILIATE AS A TRAINING OFFICER WITH AN EXISTING AGENCY?
Login to your account. Note: You must have the 'Training Officer' role activated on your National Registry account. If you do not already have this role, or you do not have an National Registry account, see the How To Guides listed above.
On the left side of the screen, under "My Current Role", select "Training Officer".
Scroll down and click on “Affiliate with Agency”.
Choose your state from the drop-down menu and click “Next”.
Choose your agency from the drop down list.
The agency's current Training Officer on file must approve your Training Officer affiliation/role request. If the Training Officer currently listed for the agency is no longer with the agency, the agency's Director/Chief must send an email to ednet@nremt.org authorizing you to be added as the Training Officer. Please allow 7 business days for change request to be approved.
If you need additional assistance, you can contact the EdNet Specialist at directly by calling 614-888-4484, extension 192.
Watch this video clip to see the steps demonstrated on our new website!
Here is a detailed instructional video demonstrating how to affiliate with an agency.
Add the Medical Director user role to my National Registry account?
Login to your account. Note: You must have the 'Medical Director' role activated on your National Registry account. If you do not already have this role, or you do not have an National Registry account, see the How To Guides listed above.
On the left side of the screen, under "My Current Role", select "Medical Director".
Enter your Medical License number.
Enter the state associated with the Medical License number provided.
The agency's current Training Officer on file must approve your Medical Director role request. If the Training Officer currently listed for the agency is no longer with the agency, send an email to ednet@nremt.org or call 614-888-4484, extension 192 for assistance.
This video describes how to add the Medical Director role to a user account.
REGISTER AN AGENCY FOR National Registry ONLINE RECERTIFICATION?
Use this process if you need to register your EMS agency with the National Registry. This will allow your EMS providers to affiliate with your EMS agency and electronically submit National Registry recertification applications. This will also allow your agency Training Officer and/or Medical Director(s) to approve electronic recertification applications.
Verify your agency is NOT already listed on the drop down list.
Verify your agency is not listed, then "Create an Agency Request"
Please allow seven business days for a new agency to be approved. Once the agency has been approved, your National Registry certified EMS Providers will be able to affiliate with your agency. As the Training Officer, you will have to approve their affiliation.
If you need additional assistance, you can contact the EdNet Specialist by email ednet@nremt.org, or call 614-888-4484, extension 192.
|
"""
Package: robotframework-AutoItLibrary
Module: AutoItLibrary Installation Module
Purpose: This is a Python "Distutils" setup program used to build installers for and to install the
robotframework-AutoItLibrary.
Copyright (c) 2008-2010 Texas Instruments, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "Martin Taylor <cmtaylor@ti.com>"
from distutils.core import setup
from distutils.sysconfig import get_python_lib
import sys
import os
import shutil
import subprocess
CLASSIFIERS = """
Development Status :: 5 - Production/Stable
License :: OSI Approved :: Apache Software License
Operating System :: Microsoft :: Windows
Programming Language :: Python
Topic :: Software Development :: Testing
"""[1:-1]
DESCRIPTION = """
AutoItLibrary is a Robot Framework keyword library wrapper for for the
freeware AutoIt tool (http://www.autoitscript.com/autoit3/index.shtml)
using AutoIt's AutoItX.dll COM object. The AutoItLibrary class
provides a proxy for the AutoIt keywords callable on the AutoIt COM
object and provides additional high-level keywords implemented as
methods in this class.
"""[1:-1]
if __name__ == "__main__":
#
# Install the 3rd party packages
#
if sys.argv[1].lower() == "install" :
if os.name == "nt" :
#
# Install and register AutoItX
#
if os.path.isfile(os.path.join(get_python_lib(), "AutoItLibrary/lib/AutoItX3.dll")) :
print "Don't think we need to unregister the old one..."
instDir = os.path.normpath(os.path.join(get_python_lib(), "AutoItLibrary/lib"))
if not os.path.isdir(instDir) :
os.makedirs(instDir)
instFile = os.path.normpath(os.path.join(instDir, "AutoItX3.dll"))
shutil.copyfile("3rdPartyTools/AutoIt/AutoItX3.dll", instFile)
#
# Register the AutoItX COM object
# and make its methods known to Python
#
cmd = r"%SYSTEMROOT%\system32\regsvr32.exe /S " + instFile
print cmd
subprocess.check_call(cmd, shell=True)
makepy = os.path.normpath(os.path.join(get_python_lib(), "win32com/client/makepy.py"))
#
# Make sure we have win32com installed
#
if not os.path.isfile(makepy) :
print "AutoItLibrary requires win32com. See http://starship.python.net/crew/mhammond/win32/."
sys.exit(2)
cmd = "python %s %s" % (makepy, instFile)
print cmd
subprocess.check_call(cmd)
else :
print "AutoItLibrary cannot be installed on non-Windows platforms."
sys.exit(2)
#
# Figure out the install path
#
destPath = os.path.normpath(os.path.join(os.getenv("HOMEDRIVE"), r"\RobotFramework\Extensions\AutoItLibrary"))
#
# Do the distutils installation
#
setup(name = "AutoItLibrary",
version = "1.1",
description = "AutoItLibrary for Robot Framework",
author = "Martin Taylor",
author_email = "cmtaylor@ti.com",
url = "http://code.google.com/p/robotframework-autoitlibrary/",
license = "Apache License 2.0",
platforms = "Microsoft Windows",
classifiers = CLASSIFIERS.splitlines(),
long_description = DESCRIPTION,
package_dir = {'' : "src"},
packages = ["AutoItLibrary"],
data_files = [(destPath,
["ReadMe.txt",
"COPYRIGHT.txt",
"LICENSE.txt",
"doc/AutoItLibrary.html",
"3rdPartyTools/AutoIt/Au3Info.exe",
"3rdPartyTools/AutoIt/AutoItX.chm",
"3rdPartyTools/AutoIt/AutoIt_License.html",
]),
(os.path.join(destPath, "tests"),
["tests/CalculatorGUIMap.py",
"tests/__init__.html",
"tests/Calculator_Test_Cases.html",
"tests/RobotIDE.bat",
"tests/RunTests.bat"
]),
]
)
#
# -------------------------------- End of file --------------------------------
|
How do I get rid of excess furniture?
I have some extra furniture in my home that I will not be taking with me when I move soon. Can you suggest where I may be able to sell it or get rid of it? Any ideas please?
Thanks for your question. You didn't mention where you were moving to, so I will assume you are moving from the New York area. Many shelters throughout New York City provide service to victims of violence and abuse, the homeless and the mentally or physically ill rely on furniture donations. Contact shelters in your area through the yellow pages or by contacting the United Way.
Churches often have outreach programs in need of furniture to give to families in their community.
|
"""
.. module:: auth
:synopsis: All routes on the ``auth`` Blueprint.
.. moduleauthor:: Dan Schlosser <dan@schlosser.io>
"""
import base64
import httplib2
import os
from apiclient.discovery import build
from flask import (Blueprint, render_template, request, flash, session, g,
redirect, url_for, current_app)
from oauth2client.client import (FlowExchangeError,
flow_from_clientsecrets,
AccessTokenCredentials)
from eventum.lib.json_response import json_success, json_error_message
from eventum.models import User, Whitelist
from eventum.forms import CreateProfileForm
from eventum.routes.base import MESSAGE_FLASH
auth = Blueprint('auth', __name__)
gplus_service = build('plus', 'v1')
@auth.route('/login', methods=['GET'])
def login():
"""If the user is not logged in, display an option to log in. On click,
make a request to Google to authenticate.
If they are logged in, redirect.
**Route:** ``/admin/login``
**Methods:** ``GET``
"""
if g.user is not None and 'gplus_id' in session:
# use code=303 to avoid POSTing to the next page.
return redirect(url_for('admin.index'), code=303)
load_csrf_token_into_session()
args_next = request.args.get('next')
next = args_next if args_next else request.url_root
client_id = current_app.config['EVENTUM_GOOGLE_CLIENT_ID']
return render_template('eventum_auth/login.html',
client_id=client_id,
state=session['state'],
# reauthorize=True,
next=next)
@auth.route('/store-token', methods=['POST'])
def store_token():
"""Do the oauth flow for Google plus sign in, storing the access token
in the session, and redircting to create an account if appropriate.
Because this method will be called from a ``$.ajax()`` request in
JavaScript, we can't return ``redirect()``, so instead this method returns
the URL that the user should be redirected to, and the redirect happens in
html:
.. code:: javascript
success: function(response) {
window.location.href = response.data.redirect_url;
}
**Route:** ``/admin/store-token``
**Methods:** ``POST``
"""
if request.args.get('state', '') != session.get('state'):
return json_error_message('Invalid state parameter.', 401)
del session['state']
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets(
current_app.config['EVENTUM_CLIENT_SECRETS_PATH'],
scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
return json_error_message('Failed to upgrade the authorization code.',
401)
gplus_id = credentials.id_token['sub']
# Store the access token in the session for later use.
session['credentials'] = credentials.access_token
session['gplus_id'] = gplus_id
if User.objects(gplus_id=gplus_id).count() == 0:
# A new user model must be made
# Get the user's name and email to populate the form
http = httplib2.Http()
http = credentials.authorize(http)
people_document = gplus_service.people().get(
userId='me').execute(http=http)
# The user must be whitelisted in order to create an account.
email = people_document['emails'][0]['value']
if Whitelist.objects(email=email).count() != 1:
return json_error_message('User has not been whitelisted.',
401,
{'whitelisted': False, 'email': email})
return json_success({
'redirect_url': url_for('.create_profile',
next=request.args.get('next'),
name=people_document['displayName'],
email=email,
image_url=people_document['image']['url'])
})
user = User.objects().get(gplus_id=gplus_id)
user.register_login()
user.save()
# The user already exists. Redirect to the next url or
# the root of the application ('/')
if request.args.get('next'):
return json_success({'redirect_url': request.args.get('next')})
return json_success({'redirect_url': request.url_root})
@auth.route('/create-profile', methods=['GET', 'POST'])
def create_profile():
"""Create a profile (filling in the form with openid data), and
register it in the database.
**Route:** ``/admin/create-profile``
**Methods:** ``GET, POST``
"""
if g.user is not None and 'gplus_id' in session:
# use code=303 to avoid POSTing to the next page.
return redirect(url_for('admin.index'), code=303)
form = CreateProfileForm(request.form,
name=request.args['name'],
email=request.args['email'],
next=request.args['next'])
if form.validate_on_submit():
if User.objects(email=form.email.data).count() != 0:
# A user with this email already exists. Override it.
user = User.objects.get(email=form.email.data)
user.openid = session['openid']
user.name = form.name.data
flash('Account with this email already exists. Overridden.',
MESSAGE_FLASH)
user.register_login()
user.save()
else:
# Retreive their user type from the whitelist then remove them.
wl = Whitelist.objects().get(email=form.email.data)
user_type = wl.user_type
wl.redeemed = True
wl.save()
# Create a brand new user
user = User(email=form.email.data,
name=form.name.data,
gplus_id=session['gplus_id'],
user_type=user_type,
image_url=request.args.get('image_url'))
flash('Account created successfully.', MESSAGE_FLASH)
user.register_login()
user.save()
# redirect to the next url or the root of the application ('/')
if form.next.data:
# use code=303 to avoid POSTing to the next page.
return redirect(form.next.data, code=303)
# use code=303 to avoid POSTing to the next page.
return redirect('/', code=303)
return render_template('eventum_auth/create_profile.html',
image_url=request.args.get('image_url'), form=form)
@auth.route('/logout', methods=['GET'])
def logout():
"""Logs out the current user.
**Route:** ``/admin/logout``
**Methods:** ``GET``
"""
session.pop('gplus_id', None)
g.user = None
return redirect(url_for('client.index'))
def load_csrf_token_into_session():
"""Create a unique session cross-site request forgery (CSRF) token and
load it into the session for later verification.
"""
# 24 bytes in b64 == 32 characters
session['state'] = base64.urlsafe_b64encode(os.urandom(24))
@auth.route('/disconnect', methods=['GET', 'POST'])
def disconnect():
"""Revoke current user's token and reset their session.
**Route:** ``/admin/disconnect``
**Methods:** ``GET, POST``
"""
# Only disconnect a connected user.
credentials = AccessTokenCredentials(
session.get('credentials'), request.headers.get('User-Agent'))
if credentials is None:
return json_error_message('Current user not connected.', 401)
# Execute HTTP GET request to revoke current token.
access_token = credentials.access_token
url = ('https://accounts.google.com/o/oauth2/revoke?token={}'
.format(str(access_token)))
h = httplib2.Http()
result = h.request(url, 'GET')[0]
session.pop('gplus_id', None)
g.user = None
if result['status'] == '200':
# Reset the user's session.
del session['credentials']
else:
# For whatever reason, the given token was invalid.
current_app.logger.error('Failed to revoke token for given user.')
# use code=303 to avoid POSTing to the next page.
return redirect(url_for('.login'), code=303)
|
A little over a year ago, when we spoke with business owners and executives, document management seemed to be more of a “nice to have” feature. Yes, it saved time, was more convenient, and saved space, but the traditional filing cabinet was still working. Now document management enters discussions as a necessity, and no more as a “nice to have” function.
What happened? Here are a few explanations that can help explain why document management has become important to have.
One explanation is that sales representatives that are traveling to customer sites need to see certain documents as well.With the help of laptops, smartphones, and tablets, sales people have lots of information available to them. Documents filed in the filing cabinet back in the office are difficult to access though. If they are at a customer site and need to produce a document, such as a proof of delivery or an invoice, it requires rigorous chain of events and people to get that document to them in the field. With a document management system, documents can be easily accessed from remote locations.
Certain industries have regulations requiring them to have documentation readily available. MSDS data, certifications, and HazMat forms are some examples of documents required in certain industries. With a document management system, these types of documents can be easily accessed as needed for email or as a document with the shipment.
The number of businesses that are trying to go paperless, or at least cutting down on the dependence for paper documents, is constantly increasing. Printing is an expense on many levels, postage keeps increasing, so businesses are getting documents to their customers by other means. Many businesses have moved to emailing documents. Other businesses have gone to making documents available online, through some sort of customer portal, giving their customers the ability to download documents.
Document management has gone well beyond a nice space saving and convenience feature. It has become a needed tool in today’s sophisticated marketplace.
|
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import DataSource, DataSink, PipelineContext, Query, validate_query
from cassiopeia.data import Platform, Region
from cassiopeia.dto.spectator import FeaturedGamesDto, CurrentGameInfoDto
from cassiopeia.datastores.uniquekeys import convert_region_to_platform
from .common import SimpleKVDiskService
T = TypeVar("T")
class SpectatorDiskService(SimpleKVDiskService):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
@DataSink.dispatch
def put(self, type: Type[T], item: T, context: PipelineContext = None) -> None:
pass
@DataSink.dispatch
def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext = None) -> None:
pass
##################
# Featured Games #
##################
_validate_get_featured_games_query = Query. \
has("platform").as_(Platform)
@get.register(FeaturedGamesDto)
@validate_query(_validate_get_featured_games_query, convert_region_to_platform)
def get_featured_games(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> FeaturedGamesDto:
key = "{clsname}.{platform}".format(clsname=FeaturedGamesDto.__name__, platform=query["platform"].value)
return FeaturedGamesDto(self._get(key))
@put.register(FeaturedGamesDto)
def put_featured_games(self, item: FeaturedGamesDto, context: PipelineContext = None) -> None:
platform = Region(item["region"]).platform.value
key = "{clsname}.{platform}".format(clsname=FeaturedGamesDto.__name__, platform=platform)
self._put(key, item)
################
# Current Game #
################
_validate_get_current_game_query = Query. \
has("platform").as_(Platform).also. \
has("summoner.id").as_(str)
@get.register(CurrentGameInfoDto)
@validate_query(_validate_get_current_game_query, convert_region_to_platform)
def get_current_game(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> CurrentGameInfoDto:
key = "{clsname}.{platform}.{id}".format(clsname=CurrentGameInfoDto.__name__,
platform=query["platform"].value,
id=query["summoner.id"])
return CurrentGameInfoDto(self._get(key))
@put.register(CurrentGameInfoDto)
def put_current_game(self, item: CurrentGameInfoDto, context: PipelineContext = None) -> None:
platform = Region(item["region"]).platform.value
key = "{clsname}.{platform}.{id}".format(clsname=CurrentGameInfoDto.__name__,
platform=platform,
id=item["summonerId"])
self._put(key, item)
|
Ransom P Burden served his country in World War II with the 47th Bombardment Group .
Information on Ransom Burden is gathered and extracted from military records. We have many documents and copies of documents, including military award documents. It is from these documents that we have found this information on SSGT Burden. These serviceman's records are not complete and should not be construed as a complete record. We are always looking for more documented material on this and other servicemen. If you can help add to Ransom Burden's military record please contact us.
The information on this page about Ransom Burden has been obtained through a possible variety of sources incluging the serviceman themselves, family, copies of military records that are in possession of the Army Air Corps Library and Museum along with data obtained from other researchers and sources including AF Archives at Air Force Historical Research Agency and the U.S. National Archives.
If you have more information concerning the service of Ransom Burden, including pictures, documents and other artifacts that we can add to this record, please Contact Us.
|
#!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
#import matplotlib.cm as cm
#from matplotlib.colors import Normalize
import sys
#print "This is the name of the script: ", sys.argv[0]
#print "Number of arguments: ", len(sys.argv)
#print "The arguments are: " , str(sys.argv)
if(len(sys.argv) == 1) :
init_t = 0
else:
init_t = int( sys.argv[1] )
#import pylab as pl
plt.figure(figsize=(8,8))
skip=1
#path='timings_full/'
path='./'
LL= 1
for n in range( init_t ,2000000+skip,skip):
plt.clf()
dt=np.loadtxt(path+str(n)+'/particles.dat')
x=dt[:,0]; y=dt[:,1];
vol=dt[:,3]
# vx=dt[:,5]; vym=dt[:,6];
p=dt[:,5]
# I=dt[:,14]; # eccentricity
r = np.sqrt( x**2 + y**2 )
rm = np.argmax(r)
p -= p[ rm ] # np.min( p )
# plt.plot( r , p , 'o' )
plt.scatter( x , y , s=80 , c=p )
# plt.scatter( x , y , 80, c= vol , vmin=0.0022, vmax=0.0028 )
# plt.scatter( x , y , 10, c=w )
# plt.scatter( x , y , 10, c=I )
# plt.scatter( x , y , 80, c= I , vmin= 1.02e-6, vmax= 1.06e-6 )
# plt.scatter( x , y , 80, c= np.log( d2 + 1e-18 ) )
# plt.scatter( x , y , 10, c=om )
plt.xlim([-LL/2.0 , LL/2.0 ])
plt.ylim([-LL/2.0 , LL/2.0 ])
# pl.colorbar(ticks=[0.45,0.55])
print( 'snap{:03d}'.format( int(n/skip) ) )
plt.savefig( 'snap{:03d}'.format( int(n/skip) ) )
|
Our Mother's Day gift guide is the perfect way to show her the appreciation she deserves.
Our Matriarch Robe, a full length 100% silk kimono robe, in our Kusuma print, inspired by lush tropical forest and flowers of Lombok. Complete with an extra wide silk sash, side seam pockets and French seams throughout the garment, this robe is most certainly fit for a queen. Ships in a stunning branded gift box.
A Woodlot 'Flora' soap bar made from a nourishing blend of olive oil, avocado oil, and coconut oil, with coconut and shea butters. Flora scent is blended with relaxing lavender, refreshing bergamot, grounding cedar-wood and citrusy sweet orange.
A Woodlot everyday mist in Rose & Palo Santo. Soothe and refresh with rejuvenating rose hydrosol and stress-relieving palo santo—this everyday mist is a must for it’s skin toning properties.
A great night’s sleep will ensure you stay calm, cool and collected no matter what your day has in store, made possible by a Halfmoon silk sleep mask.
All robes are shipped in stunning branded Birds of Paradise gift box, and gift collection includes a white gift bag with tissue that fits all listed items.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-27 17:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0002_auto_20170125_0218'),
]
operations = [
migrations.AlterField(
model_name='albums',
name='date_modified',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='albums',
name='date_published',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='albums',
name='date_uploaded',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='photos',
name='date_modified',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='photos',
name='date_published',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='photos',
name='date_uploaded',
field=models.DateField(auto_now_add=True, null=True),
),
]
|
The Liverpool Assassin by Cold Steel measures in at almost 35 inches - the Liverpool Assassin is one of the biggest and heaviest Cricket Bats being commercially made on the market today. Made from high-impact polypropylene, with a flat striking edge and raised ridge on the spine, it is incredibly tough, unbelievably resilient -- and it hits like a freight train!
Note: This is a large, oversize, heavy item. Additional shipping charges apply. Not eligible for FREE SHIPPING offer.
Out of all the knife places I've dealt with Oso Grande is by far the best when it comes to pricing, fast shipping and excellent customer service.
|
# !/usr/bin/env python
# !-*-coding:utf-8-*-
import time
from django.utils.deprecation import MiddlewareMixin
from django.core.cache import cache
from utils.tools import my_response, Logger
# class PermissionCheck(MiddlewareMixin):
# """
# 中间件,用于检查请求权限
# """
# cookie_time = 2 * 3600
# @staticmethod
# def process_request(request):
# """
# :param request:
# :return:
# """
# # print "start", time.time()
# if "login" in request.path:
# return
# # request.COOKIES["sid"] = "9342c00a6cb65a2d35e2bd48cc2ab163"
# sid = request.COOKIES.get("sid")
# content = cache.get(sid)
# if content:
# username = content.get("username")
# Logger.debug("{0}: request, url is: {1}".format(username, request.path.encode("utf-8")))
# request.COOKIES["username"] = username
# else:
# return my_response(code=-1, msg="登录超时!")
# @staticmethod
# def process_response(request, response):
# sid = request.COOKIES.get("sid")
# if sid and "logout" not in request.path:
# cache.expire(sid, timeout=PermissionCheck.cookie_time)
# response.set_cookie("sid", sid, max_age=PermissionCheck.cookie_time - 10)
# # print "end time", time.time()
# return response
class PrintCheck(MiddlewareMixin):
"""
中间件,用于检查请求权限
"""
cookie_time = 2 * 3600
@staticmethod
def process_request(request):
"""
:param request:
:return:
"""
# print "start", time.time()
if "login" in request.path:
return
# request.COOKIES["sid"] = "9342c00a6cb65a2d35e2bd48cc2ab163"
sid = request.COOKIES.get("sid")
content = cache.get(sid)
if content:
chinese_name = content.get("chinese_name")
Logger.debug("{0}: request, url is: {1}".format(username, request.path.encode("utf-8")))
request.COOKIES["chinese_name"] = chinese_name
else:
return my_response(code=-1, msg="登录超时!")
|
Despite Nvidia’s confusing and controversial GeForce Partner Program (GPP) coming to an end, ASUS confirmed that it was set to continue using its AREZ branding to distinguish its AMD Radeon range. AREZ GPUs have now been spotted in the wild, initially sporting a $160 premium price above its identical ROG branded counterparts.
US retailer Newegg is one of the first to get its hands on AREZ Strix Radeon RX Vega 64 (AREZ RXVEGA64-O8G-GAMING) graphics cards, initially setting the price at a staggering $749.99, significantly above the $589.99 MSRP of the ASUS ROG Strix Radeon RX Vega 64 (STRIX-RXVEGA64-O8G-GAMING) GPUs currently in circulation.
While the term “you get what you pay for” is often true, speaking for the quality of the item, companies often charge extra for the reputation of its branding. In the case of AREZ versus its older ROG counterpart, however, the $160 premium seems to only net customers a different name and different sticker on the centre of each fan. Newegg seems to have responded to the concern expressed in reports, dropping the price of the AREZ card down to $649.99 at the time of writing, however this is still an unnecessary $60 premium for the exact same card.
With few other retailers stocking AREZ graphics cards at this time, it’s difficult to discern whether this is the choice of Newegg or ASUS, however the latter has a lot of cost to make up from its extensive marketing overhaul, thanks to Nvidia.
Although ASUS wasn’t the only company that committed to diversifying its Radeon branding in order to comply with Nvidia’s aborted GPP, we have yet to see this same inconsistency with other manufacturers. The further fragmented these brands make the current selection, the easier it will be for consumers to be duped into paying more for the same however, so be care is urged when shopping for a new GPU.
KitGuru Says: It’s sad that the costs to change up these labels will inevitably fall on the buyer, rather than the company that caused it all. Still, don’t underestimate the power of your wallet, as that is inevitably what manufacturers respond to.
|
from google.appengine.ext import ndb
class Calendar(ndb.Model):
summary = ndb.StringProperty()
time_zone = ndb.StringProperty()
show_in_todolist = ndb.BooleanProperty(default=True)
active = ndb.BooleanProperty(default=True)
watch_id = ndb.StringProperty()
watch_expires = ndb.DateTimeProperty()
resource_id = ndb.StringProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
@classmethod
def create_or_update(cls, user_key, _id, summary, time_zone):
if not _id:
raise ValueError('Invalid id for Calendar object.')
calendar = cls.get_or_insert(_id, parent=user_key,
summary=summary,
time_zone=time_zone)
if calendar.summary != summary or \
calendar.time_zone != time_zone:
calendar.summary = summary
calendar.time_zone = time_zone
calendar.put()
return calendar
@classmethod
def get_by_id(cls, _id):
return ndb.Key(urlsafe=_id).get()
@classmethod
def get_by_watch_id(cls, watch_id):
return cls.query(cls.watch_id==watch_id).get()
@classmethod
def get_all(cls, user_key):
return cls.query(ancestor=user_key)
|
Dr. Muhammad Ahmad was national field chairman of the Revolutionary Action Movement (RAM) during the mid-60s and founder of the African People’s Party in the 1970s. He has worked closely with Malcolm X, Jesse Gray, Amiri Baraka, Stokely Carmichael, James and Grace Lee Boggs, James Forman, Robert and Mabel Williams, and Queen Mother Audley Moore, among others, in founding and carrying out various Black liberation projects and organizations. Who better, then, to pen a major assessment of some of the most important Black radical organizations of the 60s? Here is a study of the Student Non-Violent Coordinating Committee (SNCC), the Black Panther Party (BPP), the Revolutionary Action Movement (RAM), and the League of Revolutionary Black Workers (LRBW), that only he could have done.
|
# -*- coding: utf-8 -*-
# HDTV - A ROOT-based spectrum analysis software
# Copyright (C) 2006-2009 The HDTV development team (see file AUTHORS)
#
# This file is part of HDTV.
#
# HDTV is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# HDTV is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with HDTV; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
from scipy.interpolate import InterpolatedUnivariateSpline
import numpy as np
import ROOT
import hdtv.color
import hdtv.rootext.mfile
import hdtv.rootext.calibration
import hdtv.rootext.display
import hdtv.rootext.fit
from hdtv.drawable import Drawable
from hdtv.specreader import SpecReader, SpecReaderError
from hdtv.cal import CalibrationFitter
from hdtv.util import LockViewport
# Don't add created spectra to the ROOT directory
ROOT.TH1.AddDirectory(ROOT.kFALSE)
def HasPrimitiveBinning(hist):
if hist.GetNbinsX() != (hist.GetXaxis().GetXmax() - hist.GetXaxis().GetXmin()):
return False
for bin in range(0, hist.GetNbinsX()):
if hist.GetBinWidth(bin) != 1.0:
return False
return True
class Histogram(Drawable):
"""
Histogram object
This class is hdtvs wrapper around a ROOT histogram. It adds a calibration,
plus some internal management for drawing the histogram to the hdtv spectrum
viewer.
"""
def __init__(self, hist, color=hdtv.color.default, cal=None):
Drawable.__init__(self, color, cal)
self._norm = 1.0
self._ID = None
self.effCal = None
self.typeStr = "spectrum"
self.cal = cal
if cal is None:
self.SetHistWithPrimitiveBinning(hist)
else:
self._hist = hist
def __str__(self):
return self.name
def __copy__(self):
# call C++ copy constructor
hist = self._hist.__class__(self._hist)
# create new spectrum object
return Histogram(hist, color=self.color, cal=self.cal)
# hist property
def _set_hist(self, hist):
self._hist = hist
if self.displayObj:
self.displayObj.SetHist(self._hist)
def _get_hist(self):
return self._hist
hist = property(_get_hist, _set_hist)
# name property
def _get_name(self):
if self._hist:
return self._hist.GetName()
def _set_name(self, name):
self._hist.SetName(name)
name = property(_get_name, _set_name)
# norm property
def _set_norm(self, norm):
self._norm = norm
if self.displayObj:
self.displayObj.SetNorm(norm)
def _get_norm(self):
return self._norm
norm = property(_get_norm, _set_norm)
@property
def info(self):
"""
Return a string describing this spectrum
"""
s = "Spectrum type: %s\n" % self.typeStr
if not self._hist:
return s
s += "Name: %s\n" % str(self)
s += "Nbins: %d\n" % self._hist.GetNbinsX()
xmin = self._hist.GetXaxis().GetXmin()
xmax = self._hist.GetXaxis().GetXmax()
if self.cal and not self.cal.IsTrivial():
s += "Xmin: %.2f (cal) %.2f (uncal)\n" % (self.cal.Ch2E(xmin), xmin)
s += "Xmax: %.2f (cal) %.2f (uncal)\n" % (self.cal.Ch2E(xmax), xmax)
else:
s += "Xmin: %.2f\n" % xmin
s += "Xmax: %.2f\n" % xmax
if not self.cal or self.cal.IsTrivial():
s += "Calibration: none\n"
elif isinstance(self.cal, ROOT.HDTV.Calibration):
s += "Calibration: Polynomial, degree %d\n" % self.cal.GetDegree()
else:
s += "Calibration: unknown\n"
return s
# TODO: sumw2 function should be called at some point for correct error
# handling
def Plus(self, spec):
"""
Add other spectrum to this one
"""
# If the spectra have the same calibration (~= have the same binning),
# the root build-in add can be used
if self.cal == spec.cal or (self.cal.IsTrivial() and spec.cal.IsTrivial()):
hdtv.ui.info("Adding binwise")
self._hist.Add(spec._hist, 1.0)
# If the binning is different, determine the amount to add to each bin
# by integrating the other spectrum
else:
hdtv.ui.info("Adding calibrated")
nbins = self._hist.GetNbinsX()
for n in range(0, nbins):
integral = ROOT.HDTV.TH1IntegrateWithPartialBins(
spec._hist,
spec.cal.E2Ch(self.cal.Ch2E(n - 0.5)),
spec.cal.E2Ch(self.cal.Ch2E(n + 0.5)),
)
# Note: Can't use Fill due to bin errors?
self._hist.SetBinContent(
n + 1, self._hist.GetBinContent(n + 1) + integral
)
# update display
if self.displayObj:
self.displayObj.SetHist(self._hist)
self.typeStr = "spectrum, modified (sum)"
def Minus(self, spec):
"""
Substract other spectrum from this one
"""
# If the spectra have the same calibration (~= have the same binning),
# the root build-in add can be used
if self.cal == spec.cal or (self.cal.IsTrivial() and spec.cal.IsTrivial()):
hdtv.ui.info("Adding binwise")
self._hist.Add(spec._hist, -1.0)
# If the binning is different, determine the amount to add to each bin
# by integrating the other spectrum
else:
hdtv.ui.info("Adding calibrated")
nbins = self._hist.GetNbinsX()
for n in range(0, nbins):
integral = ROOT.HDTV.TH1IntegrateWithPartialBins(
spec._hist,
spec.cal.E2Ch(self.cal.Ch2E(n - 0.5)),
spec.cal.E2Ch(self.cal.Ch2E(n + 0.5)),
)
# Note: Can't use Fill due to bin errors?
self._hist.SetBinContent(
n + 1, self._hist.GetBinContent(n + 1) - integral
)
# update display
if self.displayObj:
self.displayObj.SetHist(self._hist)
self.typeStr = "spectrum, modified (difference)"
def Multiply(self, factor):
"""
Multiply spectrum with factor
"""
self._hist.Scale(factor)
# update display
if self.displayObj:
self.displayObj.SetHist(self._hist)
self.typeStr = "spectrum, modified (multiplied)"
def Rebin(self, ngroup, calibrate=True):
"""
Rebin spectrum by adding ngroup bins into one
"""
bins = self._hist.GetNbinsX()
self._hist.RebinX(ngroup)
self._hist.GetXaxis().SetLimits(0, bins / ngroup)
# update display
if self.displayObj:
self.displayObj.SetHist(self._hist)
# update calibration
if calibrate:
if not self.cal:
self.cal.SetCal(0.0, 1.0)
self.cal.Rebin(ngroup)
self.displayObj.SetCal(self.cal)
hdtv.ui.info("Calibration updated for rebinned spectrum")
self.typeStr = f"spectrum, modified (rebinned, ngroup={ngroup})"
def Calbin(
self, binsize: float = 1.0, spline_order: int = 3, use_tv_binning: bool = True
):
"""
Rebin spectrum to match calibration unit
Args:
binsize: Size of calibrated bins
spline_order: Order of the spline interpolation (default: 3)
use_tv_binning: Center first bin on 0. (True) or
lower edge of first bin on 0. (False).
"""
nbins_old = self._hist.GetNbinsX()
lower_old = self.cal.Ch2E(0)
upper_old = self.cal.Ch2E(nbins_old - 1)
nbins = int(np.ceil(upper_old / binsize)) + 1
if use_tv_binning:
lower = -0.5 * binsize
upper = 0.5 * binsize + (upper_old // nbins) * (nbins - 1)
else:
lower = 0.0
upper = binsize + (upper_old // nbins) * (nbins - 1)
# Create new histogram with number of bins equal
# to the calibrated range of the old histogram
# Always -0.5 to create standard tv-type histogram
newhist = ROOT.TH1D(
self._hist.GetName(), self._hist.GetTitle(), nbins, -0.5, nbins - 0.5
)
input_bins_center, input_hist = np.transpose(
[
[
self.cal.Ch2E(n - 1),
self._hist.GetBinContent(n)
/ (self.cal.Ch2E(n) - self.cal.Ch2E(n - 1)),
]
for n in range(1, self._hist.GetNbinsX() + 1)
]
)
output_bins_low = np.arange(nbins) * binsize + lower
output_bins_high = output_bins_low + binsize
inter = InterpolatedUnivariateSpline(
input_bins_center, input_hist, k=spline_order
)
inter_integral_v = np.vectorize(inter.integral)
output_hist = np.maximum(
inter_integral_v(output_bins_low, output_bins_high), 0.0
)
# Suppress bins outside of original histogram range
min_bin = int((lower_old - lower) / binsize)
output_hist[:min_bin] = np.zeros(min_bin)
for i in range(0, nbins):
newhist.SetBinContent(i + 1, output_hist[i])
self._hist = newhist
if use_tv_binning:
if binsize != 1.0 or self.cal:
self.cal.SetCal(0, binsize)
else:
self.cal.SetCal(binsize / 2, binsize)
# update display
if self.displayObj:
self.displayObj.SetHist(self._hist)
# update calibration
self.displayObj.SetCal(self.cal)
hdtv.ui.info(f"Rebinned to calibration unit (binsize={binsize}).")
def Poisson(self):
"""
Randomize each bin content assuming a Poissonian distribution.
"""
for i in range(0, self._hist.GetNbinsX() + 1):
counts = self._hist.GetBinContent(i)
# error = self._hist.GetBinError(i)
varied = np.random.poisson(counts)
self._hist.SetBinContent(i, varied)
if self.displayObj:
self.displayObj.SetHist(self._hist)
def Draw(self, viewport):
"""
Draw this spectrum to the viewport
"""
if self.viewport is not None and not self.viewport == viewport:
# Unlike the DisplaySpec object of the underlying implementation,
# Spectrum() objects can only be drawn on a single viewport
raise RuntimeError("Spectrum can only be drawn on a single viewport")
self.viewport = viewport
# Lock updates
with LockViewport(self.viewport):
# Show spectrum
if self.displayObj is None and self._hist is not None:
if self.active:
color = self._activeColor
else:
color = self._passiveColor
self.displayObj = ROOT.HDTV.Display.DisplaySpec(self._hist, color)
self.displayObj.SetNorm(self.norm)
self.displayObj.Draw(self.viewport)
# add calibration
if self.cal:
self.displayObj.SetCal(self.cal)
# and ID
if self.ID is not None:
ID = str(self.ID).strip(".")
self.displayObj.SetID(ID)
def WriteSpectrum(self, fname, fmt):
"""
Write the spectrum to file
"""
fname = os.path.expanduser(fname)
try:
SpecReader.WriteSpectrum(self._hist, fname, fmt)
except SpecReaderError as msg:
hdtv.ui.error("Failed to write spectrum: %s (file: %s)" % (msg, fname))
return False
return True
def SetHistWithPrimitiveBinning(self, hist, caldegree=4, silent=False):
log = hdtv.ui.debug if silent else hdtv.ui.info
if HasPrimitiveBinning(hist):
self._hist = hist
else:
log(
hist.GetName()
+ " unconventional binning detected. Converting and trying to create calibration using a polynomial of order "
+ str(caldegree)
+ " ..."
)
self._hist = ROOT.TH1D(
hist.GetName(), hist.GetTitle(), hist.GetNbinsX(), 0, hist.GetNbinsX()
)
if caldegree:
cf = CalibrationFitter()
# TODO: Slow
for bin in range(0, hist.GetNbinsX()):
if caldegree:
cf.AddPair(bin, hist.GetXaxis().GetBinUpEdge(bin))
self._hist.SetBinContent(bin, hist.GetBinContent(bin))
# Original comment by JM in commit #dd438b7c44265072bf8b0528170cecc95780e38c:
# "TODO: Copy Errors?"
#
# Edit by UG: It makes sense to simply copy the uncertainties. There are two
# possible cases:
# 1. The ROOT histogram contains user-defined uncertainties per bin that can
# be retrieved by calling hist.GetBinError(). In this case, it can be
# assumed that the user knew what he was doing when the uncertainties
# were assigned.
# 2. The ROOT histogram contains no user-defined uncertainties. In this case,
# a call of hist.GetBinError() will return the square root of the bin
# content, which is a sensible assumption.
#
# Since text spectra are loaded in a completely analogous way, implicitly
# assuming that the uncertainties are Poissonian, there is no need to issue
# an additional warning.
self._hist.SetBinError(bin, hist.GetBinError(bin))
if caldegree:
cf.FitCal(caldegree)
self.cal = cf.calib
class FileHistogram(Histogram):
"""
File spectrum object
A spectrum that comes from a file in any of the formats supported by hdtv.
"""
def __init__(self, fname, fmt=None, color=hdtv.color.default, cal=None):
"""
Read a spectrum from file
"""
# check if file exists
try:
os.path.exists(fname)
except OSError:
hdtv.ui.error("File %s not found" % fname)
raise
# call to SpecReader to get the hist
try:
hist = SpecReader.GetSpectrum(fname, fmt)
except SpecReaderError as msg:
hdtv.ui.error(str(msg))
raise
self.fmt = fmt
self.filename = fname
Histogram.__init__(self, hist, color, cal)
self.typeStr = "spectrum, read from file"
@property
def info(self):
# get the info property of the baseclass
s = super(FileHistogram, self).info
s += "Filename: %s\n" % self.filename
if self.fmt:
s += "File format: %s\n" % self.fmt
else:
s += "File format: autodetected\n"
return s
def Refresh(self):
"""
Reload the spectrum from disk
"""
try:
os.path.exists(self.filename)
except OSError:
hdtv.ui.warning("File %s not found, keeping previous data" % self.filename)
return
# call to SpecReader to get the hist
try:
hist = SpecReader.GetSpectrum(self.filename, self.fmt)
except SpecReaderError as msg:
hdtv.ui.warning(
"Failed to load spectrum: %s (file: %s), keeping previous data"
% (msg, self.filename)
)
return
self.hist = hist
class CutHistogram(Histogram):
def __init__(self, hist, axis, gates, color=hdtv.color.default, cal=None):
Histogram.__init__(self, hist, color, cal)
self.gates = gates
self.axis = axis
@property
def info(self):
s = super(CutHistogram, self).info
s += "cut "
s += "on %s axis gate: " % self.axis
for i in range(len(self.gates)):
g = self.gates[i]
s += "%d - %d " % (g.p1.pos_cal, g.p2.pos_cal)
if not i == len(self.gates):
"and"
return s
class THnSparseWrapper(object):
"""
Wrapper around a 2d THnSparse object, providing ProjectionX and
ProjectionY.
"""
def __init__(self, hist):
if not (isinstance(hist, ROOT.THnSparse) and hist.GetNdimensions() == 2):
raise RuntimeError("Class needs a THnSparse histogram of dimension 2")
self.__dict__["_hist"] = hist
def __setattr__(self, name, value):
self.__dict__["_hist"].__setattr__(name, value)
def __getattr__(self, name):
return getattr(self.__dict__["_hist"], name)
def GetXaxis(self):
return self._hist.GetAxis(0)
def GetYaxis(self):
return self._hist.GetAxis(1)
def ProjectionX(self, name, b1, b2, opt):
a = self._hist.GetAxis(1)
if b1 > b2:
a.SetRange(0, a.GetNbins())
else:
a.SetRange(b1, b2)
proj = self._hist.Projection(0, opt)
a.SetRange(0, a.GetNbins())
proj.SetName(name)
return proj
def ProjectionY(self, name, b1, b2, opt):
a = self._hist.GetAxis(0)
if b1 > b2:
a.SetRange(0, a.GetNbins())
else:
a.SetRange(b1, b2)
proj = self._hist.Projection(1, opt)
a.SetRange(0, a.GetNbins())
proj.SetName(name)
return proj
class Histo2D(object):
def __init__(self):
pass
@property
def name(self):
return "generic 2D histogram"
@property
def xproj(self):
return None
@property
def yproj(self):
return None
def ExecuteCut(self, regionMarkers, bgMarkers, axis):
return None
class RHisto2D(Histo2D):
"""
ROOT TH2-backed matrix for projection
"""
def __init__(self, rhist):
self.rhist = rhist
# Lazy generation of projections
self._prx = None
self._pry = None
@property
def name(self):
return self.rhist.GetName()
@property
def xproj(self):
if self._prx is None:
name = self.rhist.GetName() + "_prx"
self._prx = self.rhist.ProjectionX(name, 0, -1, "e")
# do not store the Histogram object here because of garbage
# collection
prx = Histogram(self._prx)
prx.typeStr = "x projection"
return prx
@property
def yproj(self):
if self._pry is None:
name = self.rhist.GetName() + "_pry"
self._pry = self.rhist.ProjectionY(name, 0, -1, "e")
# do not store the Histogram object here because of garbage
# collection
pry = Histogram(self._pry)
pry.typeStr = "y projection"
return pry
def ExecuteCut(self, regionMarkers, bgMarkers, axis):
# _axis_ is the axis the markers refer to, so we project on the *other*
# axis. We call _axis_ the cut axis and the other axis the projection
# axis. If the matrix is symmetric, this does not matter, so _axis_ is
# "0" and the implementation can choose.
if len(regionMarkers) < 1:
raise RuntimeError("Need at least one gate for cut")
if axis == "0":
axis = "x"
if axis not in ("x", "y"):
raise ValueError("Bad value for axis parameter")
if axis == "x":
cutAxis = self.rhist.GetXaxis()
projector = self.rhist.ProjectionY
else:
cutAxis = self.rhist.GetYaxis()
projector = self.rhist.ProjectionX
b1 = cutAxis.FindBin(regionMarkers[0].p1.pos_uncal)
b2 = cutAxis.FindBin(regionMarkers[0].p2.pos_uncal)
name = self.rhist.GetName() + "_cut"
rhist = projector(name, min(b1, b2), max(b1, b2), "e")
# Ensure proper garbage collection for ROOT histogram objects
ROOT.SetOwnership(rhist, True)
numFgBins = abs(b2 - b1) + 1
for r in regionMarkers[1:]:
b1 = cutAxis.FindBin(r.p1.pos_uncal)
b2 = cutAxis.FindBin(r.p2.pos_uncal)
numFgBins += abs(b2 - b1) + 1
tmp = projector("proj_tmp", min(b1, b2), max(b1, b2), "e")
ROOT.SetOwnership(tmp, True)
rhist.Add(tmp, 1.0)
bgBins = []
numBgBins = 0
for b in bgMarkers:
b1 = cutAxis.FindBin(b.p1.pos_uncal)
b2 = cutAxis.FindBin(b.p2.pos_uncal)
numBgBins += abs(b2 - b1) + 1
bgBins.append((min(b1, b2), max(b1, b2)))
if numBgBins > 0:
bgFactor = -float(numFgBins) / float(numBgBins)
for b in bgBins:
tmp = projector("proj_tmp", b[0], b[1], "e")
ROOT.SetOwnership(tmp, True)
rhist.Add(tmp, bgFactor)
hist = CutHistogram(rhist, axis, regionMarkers)
hist.typeStr = "cut"
return hist
class MHisto2D(Histo2D):
"""
MFile-backed matrix for projection
"""
def __init__(self, fname, sym):
# check if file exists
try:
os.stat(fname)
except OSError as error:
hdtv.ui.error(str(error))
raise
self.GenerateFiles(fname, sym)
basename = self.GetBasename(fname)
# call to SpecReader to get the hist
try:
self.vmatrix = SpecReader.GetVMatrix(fname)
except SpecReaderError as msg:
hdtv.ui.error(str(msg))
raise
self._xproj = FileHistogram(basename + ".prx")
self._xproj.typeStr = "Projection"
if sym:
self._yproj = None
self.tvmatrix = self.vmatrix # Fixme
else:
self._yproj = FileHistogram(basename + ".pry")
self._yproj.typeStr = "Projection"
try:
self.tvmatrix = SpecReader.GetVMatrix(basename + ".tmtx")
except SpecReaderError as msg:
hdtv.ui.error(str(msg))
raise
self.filename = fname
@property
def xproj(self):
return self._xproj
@property
def yproj(self):
return self._yproj
def ExecuteCut(self, regionMarkers, bgMarkers, axis):
# _axis_ is the axis the markers refer to, so we project on the *other*
# axis. We call _axis_ the cut axis and the other axis the projection
# axis. If the matrix is symmetric, this does not matter, so _axis_ is
# "0" and the implementation can choose.
if len(regionMarkers) < 1:
raise RuntimeError("Need at least one gate for cut")
if axis == "0":
axis = "x"
if axis not in ("x", "y"):
raise ValueError("Bad value for axis parameter")
if axis == "x":
# FIXME: Calibrations for gated spectra asym/sym
thiscal = self._xproj.cal
if self._yproj:
othercal = self._yproj.cal
else:
othercal = self._xproj.cal
matrix = self.tvmatrix
else:
thiscal = self._yproj.cal
othercal = self._xproj.cal
matrix = self.vmatrix
matrix.ResetRegions()
for r in regionMarkers:
# FIXME: The region markers are not used correctly in many parts
# of the code. Workaround by explicitly using the cal here
b1 = matrix.FindCutBin(thiscal.E2Ch(r.p1.pos_cal))
b2 = matrix.FindCutBin(thiscal.E2Ch(r.p2.pos_cal))
matrix.AddCutRegion(b1, b2)
for b in bgMarkers:
b1 = matrix.FindCutBin(thiscal.E2Ch(b.p1.pos_cal))
b2 = matrix.FindCutBin(thiscal.E2Ch(b.p2.pos_cal))
matrix.AddBgRegion(b1, b2)
name = self.filename + "_cut"
rhist = matrix.Cut(name, name)
# Ensure proper garbage collection for ROOT histogram objects
ROOT.SetOwnership(rhist, True)
hist = CutHistogram(rhist, axis, regionMarkers)
hist.typeStr = "cut"
hist._cal = othercal
return hist
def GetBasename(self, fname):
if fname.endswith(".mtx") or fname.endswith(".mtx"):
return fname[:-4]
else:
return fname
def GenerateFiles(self, fname, sym):
"""
Generate projection(s) and possibly transpose (for asymmetric matrices),
if they do not exist yet.
"""
basename = self.GetBasename(fname)
# Generate projection(s)
prx_fname = basename + ".prx"
pry_fname = ""
if os.path.exists(prx_fname):
hdtv.ui.info("Using %s for x projection" % prx_fname)
prx_fname = ""
if not sym:
pry_fname = basename + ".pry"
if os.path.exists(pry_fname):
hdtv.ui.info("Using %s for y projection" % pry_fname)
pry_fname = ""
if prx_fname or pry_fname:
errno = ROOT.MatOp.Project(fname, prx_fname, pry_fname)
if errno != ROOT.MatOp.ERR_SUCCESS:
raise RuntimeError("Project: " + ROOT.MatOp.GetErrorString(errno))
if prx_fname:
hdtv.ui.info("Generated x projection: %s" % prx_fname)
if pry_fname:
hdtv.ui.info("Generated y projection: %s" % pry_fname)
# Generate transpose
if not sym:
trans_fname = basename + ".tmtx"
if os.path.exists(trans_fname):
hdtv.ui.info("Using %s for transpose" % trans_fname)
else:
errno = ROOT.MatOp.Transpose(fname, trans_fname)
if errno != ROOT.MatOp.ERR_SUCCESS:
raise RuntimeError("Transpose: " + ROOT.MatOp.GetErrorString(errno))
hdtv.ui.info("Generated transpose: %s" % trans_fname)
|
"Would you get me a coffee?"
If you don't ask for it, you don't get it.
If you don't ask for it, you're not going to get it.
A:Don’t make the same mistake twice.
B: Yes. I got it.
A:Did you understand the medical terms he used?
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from bomlib.columns import ColumnList
from bomlib.preferences import BomPref
import bomlib.units as units
from bomlib.sort import natural_sort
import re
import sys
DNF = [
"dnf",
"dnl",
"dnp",
"do not fit",
"do not place",
"do not load",
"nofit",
"nostuff",
"noplace",
"noload",
"not fitted",
"not loaded",
"not placed",
"no stuff",
]
class Component():
"""Class for a component, aka 'comp' in the xml netlist file.
This component class is implemented by wrapping an xmlElement instance
with accessors. The xmlElement is held in field 'element'.
"""
def __init__(self, xml_element, prefs=None):
self.element = xml_element
self.libpart = None
if not prefs:
prefs = BomPref()
self.prefs = prefs
# Set to true when this component is included in a component group
self.grouped = False
# Compare the value of this part, to the value of another part (see if they match)
def compareValue(self, other):
# Simple string comparison
if self.getValue().lower() == other.getValue().lower():
return True
# Otherwise, perform a more complicated value comparison
if units.compareValues(self.getValue(), other.getValue()):
return True
# Ignore value if both components are connectors
if self.prefs.groupConnectors:
if 'connector' in self.getLibName().lower() and 'connector' in other.getLibName().lower():
return True
# No match, return False
return False
# Determine if two parts have the same name
def comparePartName(self, other):
pn1 = self.getPartName().lower()
pn2 = other.getPartName().lower()
# Simple direct match
if pn1 == pn2:
return True
# Compare part aliases e.g. "c" to "c_small"
for alias in self.prefs.aliases:
if pn1 in alias and pn2 in alias:
return True
return False
def compareField(self, other, field):
this_field = self.getField(field).lower()
other_field = other.getField(field).lower()
# If blank comparisons are allowed
if this_field == "" or other_field == "":
if not self.prefs.mergeBlankFields:
return False
if this_field == other_field:
return True
return False
def __eq__(self, other):
"""
Equivalency operator is used to determine if two parts are 'equal'
"""
# 'fitted' value must be the same for both parts
if self.isFitted() != other.isFitted():
return False
if len(self.prefs.groups) == 0:
return False
for c in self.prefs.groups:
# Perform special matches
if c.lower() == ColumnList.COL_VALUE.lower():
if not self.compareValue(other):
return False
# Match part name
elif c.lower() == ColumnList.COL_PART.lower():
if not self.comparePartName(other):
return False
# Generic match
elif not self.compareField(other, c):
return False
return True
def setLibPart(self, part):
self.libpart = part
def getPrefix(self):
"""
Get the reference prefix
e.g. if this component has a reference U12, will return "U"
"""
prefix = ""
for c in self.getRef():
if c.isalpha():
prefix += c
else:
break
return prefix
def getSuffix(self):
"""
Return the reference suffix #
e.g. if this component has a reference U12, will return "12"
"""
suffix = ""
for c in self.getRef():
if c.isalpha():
suffix = ""
else:
suffix += c
return int(suffix)
def getLibPart(self):
return self.libpart
def getPartName(self):
return self.element.get("libsource", "part")
def getLibName(self):
return self.element.get("libsource", "lib")
def getDescription(self):
try:
return self.element.get("libsource", "description")
except:
# Compatibility with old KiCad versions (4.x)
ret = self.element.get("field", "name", "description")
if ret == "":
ret = self.libpart.getDescription()
return ret
def setValue(self, value):
"""Set the value of this component"""
v = self.element.getChild("value")
if v:
v.setChars(value)
def getValue(self):
return self.element.get("value")
def getField(self, name, ignoreCase=True, libraryToo=True):
"""Return the value of a field named name. The component is first
checked for the field, and then the components library part is checked
for the field. If the field doesn't exist in either, an empty string is
returned
Keywords:
name -- The name of the field to return the value for
libraryToo -- look in the libpart's fields for the same name if not found
in component itself
"""
fp = self.getFootprint().split(":")
if name.lower() == ColumnList.COL_REFERENCE.lower():
return self.getRef().strip()
elif name.lower() == ColumnList.COL_DESCRIPTION.lower():
return self.getDescription().strip()
elif name.lower() == ColumnList.COL_DATASHEET.lower():
return self.getDatasheet().strip()
# Footprint library is first element
elif name.lower() == ColumnList.COL_FP_LIB.lower():
if len(fp) > 1:
return fp[0].strip()
else:
# Explicit empty return
return ""
elif name.lower() == ColumnList.COL_FP.lower():
if len(fp) > 1:
return fp[1].strip()
elif len(fp) == 1:
return fp[0]
else:
return ""
elif name.lower() == ColumnList.COL_VALUE.lower():
return self.getValue().strip()
elif name.lower() == ColumnList.COL_PART.lower():
return self.getPartName().strip()
elif name.lower() == ColumnList.COL_PART_LIB.lower():
return self.getLibName().strip()
# Other fields (case insensitive)
for f in self.getFieldNames():
if f.lower() == name.lower():
field = self.element.get("field", "name", f)
if field == "" and libraryToo:
field = self.libpart.getField(f)
return field.strip()
# Could not find a matching field
return ""
def getFieldNames(self):
"""Return a list of field names in play for this component. Mandatory
fields are not included, and they are: Value, Footprint, Datasheet, Ref.
The netlist format only includes fields with non-empty values. So if a field
is empty, it will not be present in the returned list.
"""
fieldNames = []
fields = self.element.getChild('fields')
if fields:
for f in fields.getChildren():
fieldNames.append(f.get('field', 'name'))
return fieldNames
def getRef(self):
return self.element.get("comp", "ref")
# Determine if a component is FITTED or not
def isFitted(self):
check = self.getField(self.prefs.configField).lower()
# Check the value field first
if self.getValue().lower() in DNF:
return False
# Empty value means part is fitted
if check == "":
return True
opts = check.lower().split(",")
exclude = False
include = True
for opt in opts:
opt = opt.strip()
# Any option containing a DNF is not fitted
if opt in DNF:
exclude = True
break
# Options that start with '-' are explicitly removed from certain configurations
if opt.startswith("-") and str(opt[1:]) in [str(cfg) for cfg in self.prefs.pcbConfig]:
exclude = True
break
if opt.startswith("+"):
include = include or opt[1:] in [str(cfg) for cfg in self.prefs.pcbConfig]
return include and not exclude
# Test if this part should be included, based on any regex expressions provided in the preferences
def testRegExclude(self):
for reg in self.prefs.regExcludes:
if type(reg) == list and len(reg) == 2:
field_name, regex = reg
field_value = self.getField(field_name)
# Attempt unicode escaping...
# Filthy hack
try:
regex = regex.decode("unicode_escape")
except:
pass
if re.search(regex, field_value, flags=re.IGNORECASE) is not None:
if self.prefs.verbose:
print("Excluding '{ref}': Field '{field}' ({value}) matched '{reg}'".format(
ref=self.getRef(),
field=field_name,
value=field_value,
reg=regex).encode('utf-8'))
# Found a match
return True
# Default, could not find any matches
return False
def testRegInclude(self):
if len(self.prefs.regIncludes) == 0: # Nothing to match against
return True
for reg in self.prefs.regIncludes:
if type(reg) == list and len(reg) == 2:
field_name, regex = reg
field_value = self.getField(field_name)
print(field_name, field_value, regex)
if re.search(regex, field_value, flags=re.IGNORECASE) is not None:
if self.prefs.verbose:
print("")
# Found a match
return True
# Default, could not find a match
return False
def getFootprint(self, libraryToo=True):
ret = self.element.get("footprint")
if ret == "" and libraryToo:
if self.libpart:
ret = self.libpart.getFootprint()
return ret
def getDatasheet(self, libraryToo=True):
ret = self.element.get("datasheet")
if ret == "" and libraryToo:
ret = self.libpart.getDatasheet()
return ret
def getTimestamp(self):
return self.element.get("tstamp")
class joiner:
def __init__(self):
self.stack = []
def add(self, P, N):
if self.stack == []:
self.stack.append(((P, N), (P, N)))
return
S, E = self.stack[-1]
if N == E[1] + 1:
self.stack[-1] = (S, (P, N))
else:
self.stack.append(((P, N), (P, N)))
def flush(self, sep, N=None, dash='-'):
refstr = u''
c = 0
for Q in self.stack:
if bool(N) and c != 0 and c % N == 0:
refstr += u'\n'
elif c != 0:
refstr += sep
S, E = Q
if S == E:
refstr += "%s%d" % S
c += 1
else:
# Do we have space?
if bool(N) and (c + 1) % N == 0:
refstr += u'\n'
c += 1
refstr += "%s%d%s%s%d" % (S[0], S[1], dash, E[0], E[1])
c += 2
return refstr
class ComponentGroup():
"""
Initialize the group with no components, and default fields
"""
def __init__(self, prefs=None):
self.components = []
self.fields = dict.fromkeys(ColumnList._COLUMNS_DEFAULT) # Columns loaded from KiCad
if not prefs:
prefs = BomPref()
self.prefs = prefs
def getField(self, field):
if field not in self.fields.keys():
return ""
if not self.fields[field]:
return ""
return u''.join((self.fields[field]))
def getCount(self):
return len(self.components)
# Test if a given component fits in this group
def matchComponent(self, c):
if len(self.components) == 0:
return True
if c == self.components[0]:
return True
return False
def containsComponent(self, c):
# Test if a given component is already contained in this grop
if not self.matchComponent(c):
return False
for comp in self.components:
if comp.getRef() == c.getRef():
return True
return False
def addComponent(self, c):
# Add a component to the group
if self.containsComponent(c):
return
self.components.append(c)
def isFitted(self):
return any([c.isFitted() for c in self.components])
def getRefs(self):
# Return a list of the components
return " ".join([c.getRef() for c in self.components])
def getAltRefs(self, wrapN=None):
S = joiner()
for n in self.components:
P, N = (n.getPrefix(), n.getSuffix())
S.add(P, N)
return S.flush(' ', N=wrapN)
# Sort the components in correct order
def sortComponents(self):
self.components = sorted(self.components, key=lambda c: natural_sort(c.getRef()))
# Update a given field, based on some rules and such
def updateField(self, field, fieldData):
# Protected fields cannot be overwritten
if field in ColumnList._COLUMNS_PROTECTED:
return
if field is None or field == "":
return
elif fieldData == "" or fieldData is None:
return
if (field not in self.fields.keys()) or (self.fields[field] is None) or (self.fields[field] == ""):
self.fields[field] = fieldData
elif fieldData.lower() in self.fields[field].lower():
return
else:
print("Field conflict: ({refs}) [{name}] : '{flds}' <- '{fld}'".format(
refs=self.getRefs(),
name=field,
flds=self.fields[field],
fld=fieldData).encode('utf-8'))
self.fields[field] += " " + fieldData
def updateFields(self, usealt=False, wrapN=None):
for c in self.components:
for f in c.getFieldNames():
# These columns are handled explicitly below
if f in ColumnList._COLUMNS_PROTECTED:
continue
self.updateField(f, c.getField(f))
# Update 'global' fields
if usealt:
self.fields[ColumnList.COL_REFERENCE] = self.getAltRefs(wrapN)
else:
self.fields[ColumnList.COL_REFERENCE] = self.getRefs()
q = self.getCount()
self.fields[ColumnList.COL_GRP_QUANTITY] = "{n}{dnf}".format(
n=q,
dnf=" (DNF)" if not self.isFitted() else "")
self.fields[ColumnList.COL_GRP_BUILD_QUANTITY] = str(q * self.prefs.boards) if self.isFitted() else "0"
if self.prefs.agregateValues:
self.fields[ColumnList.COL_VALUE] = ','.join(sorted(set([c.getValue() for c in self.components])))
else:
self.fields[ColumnList.COL_VALUE] = self.components[0].getValue()
self.fields[ColumnList.COL_PART] = self.components[0].getPartName()
self.fields[ColumnList.COL_PART_LIB] = self.components[0].getLibName()
self.fields[ColumnList.COL_DESCRIPTION] = self.components[0].getDescription()
self.fields[ColumnList.COL_DATASHEET] = self.components[0].getDatasheet()
# Footprint field requires special attention
fp = self.components[0].getFootprint().split(":")
if len(fp) >= 2:
self.fields[ColumnList.COL_FP_LIB] = fp[0]
self.fields[ColumnList.COL_FP] = fp[1]
elif len(fp) == 1:
self.fields[ColumnList.COL_FP_LIB] = ""
self.fields[ColumnList.COL_FP] = fp[0]
else:
self.fields[ColumnList.COL_FP_LIB] = ""
self.fields[ColumnList.COL_FP] = ""
# Return a dict of the KiCad data based on the supplied columns
# NOW WITH UNICODE SUPPORT!
def getRow(self, columns):
row = []
for key in columns:
val = self.getField(key)
if val is None:
val = ""
else:
val = u'' + val
if sys.version_info[0] < 3:
val = val.encode('utf-8')
row.append(val)
return row
|
Stunning Digital Polycarbonate Overlays - Marking Systems, Inc.
Want help upgrading the look of your overlays or nameplates? Check out this digitally printed overlay. You are no longer stuck always using solid colors. Expand your horizons using MSI digital printing.
On this overlay, the customer used a very light gradient to take the background color from a light blue/lavender to 100% beige. Also notice the gradients in the customer logo.
Don’t know where to get started? At MSI it is easy to rapid prototype new designs.
Send your CSR an initial blueprint. Once we receive it, we will schedule a quick 10-15 minute conference call between you and our R&D Director Razvan Datcu. MSI prototype orders will always include multiple versions (if applicable) that are production quality parts at no additional cost. MSI never skimps. We just want to make great looking labels to help you make great products.
Need help with an unusual application or have a problem label?
Call MSI at 972-895-3433 and let our label experts help solve your problem or project.
|
import json
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test, login_required
from django.core.cache import cache
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import Q
from django.forms import model_to_dict
from django.http import HttpResponseBadRequest, HttpResponse, \
HttpResponseForbidden
from django.shortcuts import render
from django.views.decorators.http import require_safe, require_POST
from membership.api import download_sheet_with_user, user_can_download_sheet
from membership.forms import SearchForm
from membership.models import Member, update_membership
user_can_view_members = user_passes_test(
lambda u: user_can_download_sheet(u),
login_url=settings.LOGIN_URL + '?reason=no_member_view_permission'
)
def index(request):
return render(request, 'index.html')
@login_required
@user_can_view_members
@require_safe
def dashboard(request):
return render(request, 'dashboard.html', {
'form': SearchForm(),
'enable_member_update': cache.get('enable_member_update', True)
})
@login_required
@user_can_view_members
@require_POST
def search(request):
form = SearchForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest(content=form.errors)
query = form.cleaned_data['query']
members = Member.objects.filter(
Q(knights_email__icontains=query) |
Q(name__icontains=query)
)[:40]
data = json.dumps({
'results': {
'data': [model_to_dict(m) for m in members]
}
}, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type='application/json')
@login_required
@user_can_view_members
@require_POST
def update(request):
if not request.is_ajax():
return HttpResponseBadRequest('Must be requested from page')
filename = 'membership.csv'
if not download_sheet_with_user(request.user, filename):
return HttpResponseForbidden('User cannot see the sheet 👎')
update_membership(filename)
cache.set('enable_member_update', False, 300)
# thumbs up unicode
return HttpResponse('👍')
|
Curiously, I am more sore today than I was yesterday – the infamous 48-hour post-event peak of DOMS (delayed onset muscle soreness). I do a little bit of waddling down the stairs, and getting up from my yoga mat is less graceful than usual.
I realize today that one advantage to wearing very little to do an obstacle race (elite male obstacle racers, I’m looking at you) is less laundry. With a t-shirt and full length running pants, I’m absolutely exhausted from the amount of rinsing.
The Spartan family came together as one. Yet again. Jeff has a hydration pack in the car, which he is not planning to carry. Boom. I have water.
Next I pick up my racing kit, and go hunting for food. Shaun Provost generously shares some Gu with me. Finally I run into the Canadian Mudd Queens (represent!), and inquire if either of them have any extra fuel that they can spare. They look at me in amazement. “Solo, we all read your blog post on what to pack. You specifically said to pack extra fuel. So yes!”. Boom. I have fuel. My blog has officially paid off.
Johanna, Jen, Tanya and Genevieve produce a small pile of gels, bars, baby food and jelly beans. Me thinks I now have enough sugar to kill this mountain. Now if I could only locate a cup of coffee… But it’s almost time, and we head over the start to watch the elite men take off. Wow, this year everything is a big deal. The caliber of athletes is blinding. Olympic athletes, obstacle racers, triathletes, runners. I’m just soaking up the energy of the crowd – my face hurts from smiling at all the familiar peoplez. I hand out hugs like candy, and warn the guys I know that I better not see them on the course. Women are starting at 8.20am, twenty minutes behind, followed up by the rest of the open waves, released every fifteen minutes.
We mingle at the start line, while the race organizers are doing the PR thing – pictures, interview clips, announcements. I meet few people in person for the first time (hello, Janice Ferguson!). Strange, how in the age of social media, we sometimes develop a relationship with a person before we meet them face to face, no?
The gun goes off, and we are running. The first obstacle is hay bales – these are the highest bales I’ve ever seen, and many women, including myself, are struggling to get over. The fact that the straw is still dry does not help – there is no grip at all. I am pleasantly surprised to see women help each other to get over to the other side. This is the elite wave, and the camaraderie is heart-warming.
Couple of walls, over-under-through and a short net crawl are next, and then we are climbing. And climbing. And climbing. A brutal hill for miles. At least it seems like that’s how long it is.
About half way through, as I’m huffing and puffing like a chain-smoking elephant, I hear a familiar voice behind me: “Solo! How is it going?”. I look around to see a huge smile – Juliana Sproles waves. “Oh, you know… Meditating.”, I reply. That’s really the only way I can describe my snail pace up the mountain. All of my road marathon flat pavement training is paying off. NOT.
I don’t see any resemblance. 🙂 Although I did suggest that next year Juliana raced with the Jaws soundtrack playing loudly, wherever she went. Can you imagine? Talk about laying psychological distance onto fellow racers.
When we finally make it to the top, a memory board awaits. We have to memorize a word-number combination, corresponding to the last two digits of our bib. Few people are writing down the numbers on their arm – intelligent, yes, and exactly what I’ve done last year at the Ultra Beast. This year I’m lucky to have gels. I borrow a ballpoint pen from someone, and try scribbling the number on my skin. Nope.
The combo is not very long, but I know that we will have to recall it hours later.
I give up on trying to write the damn thing down, and decide to put the years I spent in the education system to use. Finally!
I pull up a mental file – Introductory Psychology, lecture on memory. What do we know about the way human memory works? Short-term memory only holds 7+- items. Rehearsal is the best way to transfer information from short-term memory to long-term memory. And the deeper you process the information, the better you will remember it. Thus, a best way to memorize random numbers and letters is to create a story, to make them less random.
I make up a fictional baseball player named Juan X-ray. Don’t ask. He is currently 37, and he was born in 86. Again, don’t ask. His jersey number is “Lucky Thirteen”.
X-ray, Juan. 37 years old, born in 86. Jersey – Lucky 13.
Over sixty pounds of sandy goodness for both men and women have to travel up quarter mile up the hill. And back. Just to put things in perspective: a quarter mile is a FULL lap around the running track. Only this time you have a friend. And a small issue of verticality.
In the next little while, many racers will find God. Or die trying.
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
from pandas import (Index, Series, _np_version_under1p9)
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.dtypes.common import is_integer
import pandas.util.testing as tm
from .common import TestData
class TestSeriesQuantile(TestData):
def test_quantile(self):
q = self.ts.quantile(0.1)
assert q == np.percentile(self.ts.valid(), 10)
q = self.ts.quantile(0.9)
assert q == np.percentile(self.ts.valid(), 90)
# object dtype
q = Series(self.ts, dtype=object).quantile(0.9)
assert q == np.percentile(self.ts.valid(), 90)
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
assert q == Timestamp('2000-01-10 19:12:00')
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
assert q == pd.to_timedelta('24:00:00')
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
assert result is pd.NaT
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assert_raises_regex(ValueError, msg):
self.ts.quantile(invalid)
def test_quantile_multi(self):
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([np.percentile(self.ts.valid(), 10),
np.percentile(self.ts.valid(), 90)],
index=qs, name=self.ts.name)
tm.assert_series_equal(result, expected)
dts = self.ts.index.to_series()
dts.name = 'xxx'
result = dts.quantile((.2, .2))
expected = Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2], name='xxx')
tm.assert_series_equal(result, expected)
result = self.ts.quantile([])
expected = pd.Series([], name=self.ts.name, index=Index(
[], dtype=float))
tm.assert_series_equal(result, expected)
@pytest.mark.skipif(_np_version_under1p9,
reason="Numpy version is under 1.9")
def test_quantile_interpolation(self):
# see gh-10174
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
assert q == np.percentile(self.ts.valid(), 10)
q1 = self.ts.quantile(0.1)
assert q1 == np.percentile(self.ts.valid(), 10)
# test with and without interpolation keyword
assert q == q1
@pytest.mark.skipif(_np_version_under1p9,
reason="Numpy version is under 1.9")
def test_quantile_interpolation_dtype(self):
# GH #10174
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower')
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher')
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
@pytest.mark.skipif(not _np_version_under1p9,
reason="Numpy version is greater 1.9")
def test_quantile_interpolation_np_lt_1p9(self):
# GH #10174
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
assert q == np.percentile(self.ts.valid(), 10)
q1 = self.ts.quantile(0.1)
assert q1 == np.percentile(self.ts.valid(), 10)
# interpolation other than linear
msg = "Interpolation methods other than "
with tm.assert_raises_regex(ValueError, msg):
self.ts.quantile(0.9, interpolation='nearest')
# object dtype
with tm.assert_raises_regex(ValueError, msg):
Series(self.ts, dtype=object).quantile(0.7, interpolation='higher')
def test_quantile_nan(self):
# GH 13098
s = pd.Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
assert result == expected
# all nan/empty
cases = [Series([]), Series([np.nan, np.nan])]
for s in cases:
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
tm.assert_series_equal(res, pd.Series([np.nan, np.nan],
index=[0.2, 0.3]))
def test_quantile_box(self):
cases = [[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')],
# NaT
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'), pd.NaT],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern'), pd.NaT],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days'), pd.NaT]]
for case in cases:
s = pd.Series(case, name='XXX')
res = s.quantile(0.5)
assert res == case[1]
res = s.quantile([0.5])
exp = pd.Series([case[1]], index=[0.5], name='XXX')
tm.assert_series_equal(res, exp)
def test_datetime_timedelta_quantiles(self):
# covers #9694
assert pd.isnull(Series([], dtype='M8[ns]').quantile(.5))
assert pd.isnull(Series([], dtype='m8[ns]').quantile(.5))
def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile(0.5)
assert res is pd.NaT
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
def test_quantile_empty(self):
# floats
s = Series([], dtype='float64')
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# int
s = Series([], dtype='int64')
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# datetime
s = Series([], dtype='datetime64[ns]')
res = s.quantile(0.5)
assert res is pd.NaT
res = s.quantile([0.5])
exp = Series([pd.NaT], index=[0.5])
tm.assert_series_equal(res, exp)
|
Spending time in the Sabbia Med® makes you feel like you’re on holiday and is ideal for people who suffer from the winter blues, seasonal affective disorder. The system is also perfect for specific treatments, e.g. massages.
Even in the cold, dark season when the human eyes and skin only get a few rays of sunshine, our whole being needs a good dose of sunlight to function and maintain its balance. In Sabbia Med® your guests can top up on light, heat and that holiday feeling at any time, even in the winter.
A wonderful summer’s day is simulated in Sabbia Med® – from sunrise to sunset. Clean, warm, light sand and gentle light with the pleasant intensity of a summer’s day accompanied by subdued relaxing music let you forget grey days and the dark side of winter for a brief time.
Sabbia Med® offers constant brightness similar to daylight, gentle UV radiation and stimulates the metabolism. Spending time in Sabbia Med® is particularly recommended for people who suffer from the winter blues (seasonal affective disorder) as a result of its light therapy effect.
By the way, the room is also ideal for specific spa treatments, for example massages with that special flair.
The Sabbia Med® system is of course also available without sand. The light source can simply be integrated as an additional source in your relaxation area.
No irritating light for the eyes thanks to ideally arranged light sources and ceiling mirror to reflect light.
By designing the walls with illusionistic paintings or covering them with a whole variety of materials, coloured lighting module as well as customised programming.
An integrated UVC sterilisation lamp ensures a hygienic sand surface.
|
#!/usr/bin/python3
"""
Diaphora, a diffing plugin for IDA
Copyright (c) 2015-2021, Joxean Koret
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import os
import re
import sys
import time
import json
import decimal
import sqlite3
import threading
from threading import Thread
from io import StringIO
from difflib import SequenceMatcher
from multiprocessing import cpu_count
from diaphora_heuristics import *
from jkutils.kfuzzy import CKoretFuzzyHashing
from jkutils.factor import (FACTORS_CACHE, difference, difference_ratio,
primesbelow as primes)
try:
import idaapi
is_ida = True
except ImportError:
is_ida = False
#-------------------------------------------------------------------------------
VERSION_VALUE = "2.0.5"
COPYRIGHT_VALUE="Copyright(c) 2015-2021 Joxean Koret"
COMMENT_VALUE="Diaphora diffing plugin for IDA version %s" % VERSION_VALUE
# Used to clean-up the pseudo-code and assembly dumps in order to get
# better comparison ratios
CMP_REPS = ["loc_", "j_nullsub_", "nullsub_", "j_sub_", "sub_",
"qword_", "dword_", "byte_", "word_", "off_", "def_", "unk_", "asc_",
"stru_", "dbl_", "locret_", "flt_", "jpt_"]
CMP_REMS = ["dword ptr ", "byte ptr ", "word ptr ", "qword ptr ", "short ptr"]
#-------------------------------------------------------------------------------
def result_iter(cursor, arraysize=1000):
""" An iterator that uses fetchmany to keep memory usage down. """
while True:
results = cursor.fetchmany(arraysize)
if not results:
break
for result in results:
yield result
#-------------------------------------------------------------------------------
def quick_ratio(buf1, buf2):
try:
if buf1 is None or buf2 is None or buf1 == "" or buf1 == "":
return 0
s = SequenceMatcher(None, buf1.split("\n"), buf2.split("\n"))
return s.quick_ratio()
except:
print("quick_ratio:", str(sys.exc_info()[1]))
return 0
#-------------------------------------------------------------------------------
def real_quick_ratio(buf1, buf2):
try:
if buf1 is None or buf2 is None or buf1 == "" or buf1 == "":
return 0
s = SequenceMatcher(None, buf1.split("\n"), buf2.split("\n"))
return s.real_quick_ratio()
except:
print("real_quick_ratio:", str(sys.exc_info()[1]))
return 0
#-------------------------------------------------------------------------------
def ast_ratio(ast1, ast2):
if ast1 == ast2:
return 1.0
elif ast1 is None or ast2 is None:
return 0
return difference_ratio(decimal.Decimal(ast1), decimal.Decimal(ast2))
#-------------------------------------------------------------------------------
def log(msg):
if isinstance(threading.current_thread(), threading._MainThread):
print(("[%s] %s" % (time.asctime(), msg)))
#-------------------------------------------------------------------------------
def log_refresh(msg, show=False, do_log=True):
log(msg)
#-------------------------------------------------------------------------------
def debug_refresh(msg, show=False):
if os.getenv("DIAPHORA_DEBUG"):
log(msg)
#-------------------------------------------------------------------------------
class CChooser():
class Item:
def __init__(self, ea, name, ea2 = None, name2 = None, desc="100% equal", ratio = 0, bb1 = 0, bb2 = 0):
self.ea = ea
self.vfname = name
self.ea2 = ea2
self.vfname2 = name2
self.description = desc
self.ratio = ratio
self.bb1 = int(bb1)
self.bb2 = int(bb2)
self.cmd_import_selected = None
self.cmd_import_all = None
self.cmd_import_all_funcs = None
def __str__(self):
return '%08x' % int(self.ea)
def __init__(self, title, bindiff, show_commands=True):
if title == "Unmatched in primary":
self.primary = False
else:
self.primary = True
self.title = title
self.n = 0
self.items = []
self.icon = 41
self.bindiff = bindiff
self.show_commands = show_commands
self.cmd_diff_asm = None
self.cmd_diff_graph = None
self.cmd_diff_c = None
self.cmd_import_selected = None
self.cmd_import_all = None
self.cmd_import_all_funcs = None
self.cmd_show_asm = None
self.cmd_show_pseudo = None
self.cmd_highlight_functions = None
self.cmd_unhighlight_functions = None
self.selected_items = []
def add_item(self, item):
if self.title.startswith("Unmatched in"):
self.items.append(["%05lu" % self.n, "%08x" % int(item.ea), item.vfname])
else:
self.items.append(["%05lu" % self.n, "%08x" % int(item.ea), item.vfname,
"%08x" % int(item.ea2), item.vfname2, "%.3f" % item.ratio,
"%d" % item.bb1, "%d" % item.bb2, item.description])
self.n += 1
def get_color(self):
if self.title.startswith("Best"):
return 0xffff99
elif self.title.startswith("Partial"):
return 0x99ff99
elif self.title.startswith("Unreliable"):
return 0x9999ff
#-------------------------------------------------------------------------------
MAX_PROCESSED_ROWS = 1000000
TIMEOUT_LIMIT = 60 * 3
#-------------------------------------------------------------------------------
class bytes_encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return obj.decode("utf-8")
return json.JSONEncoder.default(self, obj)
#-------------------------------------------------------------------------------
class CBinDiff:
def __init__(self, db_name, chooser=CChooser):
self.names = dict()
self.primes = primes(2048*2048)
self.db_name = db_name
self.dbs_dict = {}
self.db = None # Used exclusively by the exporter!
self.open_db()
self.matched1 = set()
self.matched2 = set()
self.matches_cache = {}
self.total_functions1 = None
self.total_functions2 = None
self.equal_callgraph = False
self.kfh = CKoretFuzzyHashing()
# With this block size we're sure it will only apply to functions
# somehow big
self.kfh.bsize = 32
self.pseudo = {}
self.pseudo_hash = {}
self.pseudo_comments = {}
self.unreliable = self.get_value_for("unreliable", False)
self.relaxed_ratio = self.get_value_for("relaxed_ratio", False)
self.experimental = self.get_value_for("experimental", False)
self.slow_heuristics = self.get_value_for("slow_heuristics", False)
self.unreliable = False
self.relaxed_ratio = False
self.experimental = False
self.slow_heuristics = False
self.use_decompiler_always = True
self.exclude_library_thunk = True
self.project_script = None
self.hooks = None
# Create the choosers
self.chooser = chooser
# Create the choosers
self.create_choosers()
self.last_diff_db = None
self.re_cache = {}
####################################################################
# LIMITS
#
# Do not run heuristics for more than X seconds (by default, 3 minutes).
self.timeout = self.get_value_for("TIMEOUT_LIMIT", TIMEOUT_LIMIT)
# It's typical in SQL queries to get a cartesian product of the
# results in the functions tables. Do not process more than this
# value per each 20k functions.
self.max_processed_rows = self.get_value_for("MAX_PROCESSED_ROWS", MAX_PROCESSED_ROWS)
# Limits to filter the functions to export
self.min_ea = 0
self.max_ea = 0
# Export only non IDA automatically generated function names? I.e.,
# excluding these starting with sub_*
self.ida_subs = True
# Export only function summaries instead of also exporting both the
# basic blocks and all instructions used by functions?
self.function_summaries_only = False
# Ignore IDA's automatically generated sub_* names for heuristics
# like the 'Same name'?
self.ignore_sub_names = True
# Ignore any and all function names for the 'Same name' heuristic?
self.ignore_all_names = self.get_value_for("ignore_all_names", True)
# Ignore small functions?
self.ignore_small_functions = self.get_value_for("ignore_small_functions", False)
# Number of CPU threads/cores to use?
cpus = cpu_count() - 1
if cpus < 1:
cpus = 1
self.cpu_count = self.get_value_for("CPU_COUNT", cpus)
####################################################################
def __del__(self):
if self.db is not None:
try:
if self.last_diff_db is not None:
tid = threading.current_thread().ident
if tid in self.dbs_dict:
db = self.dbs_dict[tid]
with db.cursor() as cur:
cur.execute('detach "%s"' % self.last_diff_db)
except:
pass
self.db_close()
def get_value_for(self, value_name, default):
# Try to search for a DIAPHORA_<value_name> environment variable
value = os.getenv("DIAPHORA_%s" % value_name.upper())
if value is not None:
if type(value) != type(default):
value = type(default)(value)
return value
return default
def open_db(self):
db = sqlite3.connect(self.db_name, check_same_thread=True)
db.text_factory = str
db.row_factory = sqlite3.Row
tid = threading.current_thread().ident
self.dbs_dict[tid] = db
if isinstance(threading.current_thread(), threading._MainThread):
self.db = db
self.create_schema()
db.execute("analyze")
def get_db(self):
tid = threading.current_thread().ident
if not tid in self.dbs_dict:
self.open_db()
if self.last_diff_db is not None:
self.attach_database(self.last_diff_db)
return self.dbs_dict[tid]
def db_cursor(self):
db = self.get_db()
return db.cursor()
def db_close(self):
tid = threading.current_thread().ident
if tid in self.dbs_dict:
self.dbs_dict[tid].close()
del self.dbs_dict[tid]
if isinstance(threading.current_thread(), threading._MainThread):
self.db.close()
def create_schema(self):
cur = self.db_cursor()
cur.execute("PRAGMA foreign_keys = ON")
sql = """ create table if not exists functions (
id integer primary key,
name varchar(255),
address text unique,
nodes integer,
edges integer,
indegree integer,
outdegree integer,
size integer,
instructions integer,
mnemonics text,
names text,
prototype text,
cyclomatic_complexity integer,
primes_value text,
comment text,
mangled_function text,
bytes_hash text,
pseudocode text,
pseudocode_lines integer,
pseudocode_hash1 text,
pseudocode_primes text,
function_flags integer,
assembly text,
prototype2 text,
pseudocode_hash2 text,
pseudocode_hash3 text,
strongly_connected integer,
loops integer,
rva text unique,
tarjan_topological_sort text,
strongly_connected_spp text,
clean_assembly text,
clean_pseudo text,
mnemonics_spp text,
switches text,
function_hash text,
bytes_sum integer,
md_index text,
constants text,
constants_count integer,
segment_rva text,
assembly_addrs text,
kgh_hash text,
userdata text) """
cur.execute(sql)
sql = """ create table if not exists program (
id integer primary key,
callgraph_primes text,
callgraph_all_primes text,
processor text,
md5sum text
) """
cur.execute(sql)
sql = """ create table if not exists program_data (
id integer primary key,
name varchar(255),
type varchar(255),
value text
)"""
cur.execute(sql)
sql = """ create table if not exists version (value text) """
cur.execute(sql)
sql = """ create table if not exists instructions (
id integer primary key,
address text unique,
disasm text,
mnemonic text,
comment1 text,
comment2 text,
name text,
type text,
pseudocomment text,
pseudoitp integer) """
cur.execute(sql)
sql = """ create table if not exists basic_blocks (
id integer primary key,
num integer,
address text unique)"""
cur.execute(sql)
sql = """ create table if not exists bb_relations (
id integer primary key,
parent_id integer not null references basic_blocks(id) ON DELETE CASCADE,
child_id integer not null references basic_blocks(id) ON DELETE CASCADE)"""
cur.execute(sql)
sql = """ create table if not exists bb_instructions (
id integer primary key,
basic_block_id integer references basic_blocks(id) on delete cascade,
instruction_id integer references instructions(id) on delete cascade)"""
cur.execute(sql)
sql = """ create table if not exists function_bblocks (
id integer primary key,
function_id integer not null references functions(id) on delete cascade,
basic_block_id integer not null references basic_blocks(id) on delete cascade)"""
cur.execute(sql)
sql = """create table if not exists callgraph (
id integer primary key,
func_id integer not null references functions(id) on delete cascade,
address text not null,
type text not null)"""
cur.execute(sql)
sql = """create table if not exists constants (
id integer primary key,
func_id integer not null references functions(id) on delete cascade,
constant text not null)"""
cur.execute(sql)
cur.execute("select 1 from version")
row = cur.fetchone()
if not row:
cur.execute("insert into main.version values ('%s')" % VERSION_VALUE)
cur.close()
def create_indexes(self):
cur = self.db_cursor()
sql = "create index if not exists idx_assembly on functions(assembly)"
cur.execute(sql)
sql = "create index if not exists idx_bytes_hash on functions(bytes_hash)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode on functions(pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_name on functions(name)"
cur.execute(sql)
sql = "create index if not exists idx_mangled_name on functions(mangled_function)"
cur.execute(sql)
sql = "create index if not exists idx_names on functions(names)"
cur.execute(sql)
sql = "create index if not exists idx_asm_pseudo on functions(assembly, pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_nodes_edges_instructions on functions(nodes, edges, instructions)"
cur.execute(sql)
sql = "create index if not exists idx_composite1 on functions(nodes, edges, mnemonics, names, cyclomatic_complexity, prototype2, indegree, outdegree)"
cur.execute(sql)
sql = "create index if not exists idx_composite2 on functions(instructions, mnemonics, names)"
cur.execute(sql)
sql = "create index if not exists idx_composite3 on functions(nodes, edges, cyclomatic_complexity)"
cur.execute(sql)
sql = "create index if not exists idx_composite4 on functions(pseudocode_lines, pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_composite5 on functions(pseudocode_lines, pseudocode_primes)"
cur.execute(sql)
sql = "create index if not exists idx_composite6 on functions(names, mnemonics)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash1 on functions(pseudocode_hash1)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash2 on functions(pseudocode_hash2)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash3 on functions(pseudocode_hash3)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash on functions(pseudocode_hash1, pseudocode_hash2, pseudocode_hash3)"
cur.execute(sql)
sql = "create index if not exists idx_strongly_connected on functions(strongly_connected)"
cur.execute(sql)
sql = "create index if not exists idx_strongly_connected_spp on functions(strongly_connected_spp)"
cur.execute(sql)
sql = "create index if not exists idx_loops on functions(loops)"
cur.execute(sql)
sql = "create index if not exists idx_rva on functions(rva)"
cur.execute(sql)
sql = "create index if not exists idx_tarjan_topological_sort on functions(tarjan_topological_sort)"
cur.execute(sql)
sql = "create index if not exists idx_mnemonics_spp on functions(mnemonics_spp)"
cur.execute(sql)
sql = "create index if not exists idx_clean_asm on functions(clean_assembly)"
cur.execute(sql)
sql = "create index if not exists idx_clean_pseudo on functions(clean_pseudo)"
cur.execute(sql)
sql = "create index if not exists idx_switches on functions(switches)"
cur.execute(sql)
sql = "create index if not exists idx_function_hash on functions(function_hash)"
cur.execute(sql)
sql = "create index if not exists idx_bytes_sum on functions(bytes_sum)"
cur.execute(sql)
sql = "create index if not exists idx_md_index on functions(md_index)"
cur.execute(sql)
sql = "create index if not exists idx_kgh_hash on functions(kgh_hash)"
cur.execute(sql)
sql = "create index if not exists idx_constants on functions(constants_count, constants)"
cur.execute(sql)
sql = "create index if not exists idx_mdindex_constants on functions(md_index, constants_count, constants)"
cur.execute(sql)
sql = "create index if not exists idx_instructions_address on instructions (address)"
cur.execute(sql)
sql = "create index if not exists idx_bb_relations on bb_relations(parent_id, child_id)"
cur.execute(sql)
sql = "create index if not exists idx_bb_instructions on bb_instructions (basic_block_id, instruction_id)"
cur.execute(sql)
sql = "create index if not exists id_function_blocks on function_bblocks (function_id, basic_block_id)"
cur.execute(sql)
sql = "create index if not exists idx_constants on constants (constant)"
cur.execute(sql)
sql = "analyze"
cur.execute(sql)
cur.close()
def attach_database(self, diff_db):
cur = self.db_cursor()
cur.execute('attach "%s" as diff' % diff_db)
cur.close()
def equal_db(self):
cur = self.db_cursor()
sql = "select count(*) total from program p, diff.program dp where p.md5sum = dp.md5sum"
cur.execute(sql)
row = cur.fetchone()
ret = row["total"] == 1
if not ret:
sql = "select count(*) total from (select * from functions except select * from diff.functions) x"
cur.execute(sql)
row = cur.fetchone()
ret = row["total"] == 0
else:
log("Same MD5 in both databases")
cur.close()
return ret
def add_program_data(self, type_name, key, value):
cur = self.db_cursor()
sql = "insert into main.program_data (name, type, value) values (?, ?, ?)"
values = (key, type_name, value)
cur.execute(sql, values)
cur.close()
def get_instruction_id(self, addr):
cur = self.db_cursor()
sql = "select id from instructions where address = ?"
cur.execute(sql, (str(addr),))
row = cur.fetchone()
rowid = None
if row is not None:
rowid = row["id"]
cur.close()
return rowid
def get_bb_id(self, addr):
cur = self.db_cursor()
sql = "select id from basic_blocks where address = ?"
cur.execute(sql, (str(addr),))
row = cur.fetchone()
rowid = None
if row is not None:
rowid = row["id"]
cur.close()
return rowid
def save_function(self, props):
if props == False:
log("WARNING: Trying to save a non resolved function?")
return
# Phase 1: Fix data types and insert the function row.
cur = self.db_cursor()
new_props = []
# The last 4 fields are callers, callees, basic_blocks_data & bb_relations
for prop in props[:len(props)-4]:
# XXX: Fixme! This is a hack for 64 bit architectures kernels
if type(prop) is int and (prop > 0xFFFFFFFF or prop < -0xFFFFFFFF):
prop = str(prop)
elif type(prop) is bytes:
prop = prop.encode("utf-8")
if type(prop) is list or type(prop) is set:
new_props.append(json.dumps(list(prop), ensure_ascii=False, cls=bytes_encoder))
else:
new_props.append(prop)
sql = """insert into main.functions (name, nodes, edges, indegree, outdegree, size,
instructions, mnemonics, names, prototype,
cyclomatic_complexity, primes_value, address,
comment, mangled_function, bytes_hash, pseudocode,
pseudocode_lines, pseudocode_hash1, pseudocode_primes,
function_flags, assembly, prototype2, pseudocode_hash2,
pseudocode_hash3, strongly_connected, loops, rva,
tarjan_topological_sort, strongly_connected_spp,
clean_assembly, clean_pseudo, mnemonics_spp, switches,
function_hash, bytes_sum, md_index, constants,
constants_count, segment_rva, assembly_addrs, kgh_hash,
userdata)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"""
try:
cur.execute(sql, new_props)
except:
print("Props???", new_props)
raise
func_id = cur.lastrowid
# Phase 2: Save the callers and callees of the function
callers, callees = props[len(props)-4:len(props)-2]
sql = "insert into callgraph (func_id, address, type) values (?, ?, ?)"
for caller in callers:
cur.execute(sql, (func_id, str(caller), 'caller'))
for callee in callees:
cur.execute(sql, (func_id, str(callee), 'callee'))
# Phase 3: Insert the constants of the function
sql = "insert into constants (func_id, constant) values (?, ?)"
props_dict = self.create_function_dictionary(props)
for constant in props_dict["constants"]:
if type(constant) in [str, bytes] and len(constant) > 4:
cur.execute(sql, (func_id, constant))
# Phase 4: Save the basic blocks relationships
if not self.function_summaries_only:
# The last 2 fields are basic_blocks_data & bb_relations
bb_data, bb_relations = props[len(props)-2:]
instructions_ids = {}
sql = """insert into main.instructions (address, mnemonic, disasm,
comment1, comment2, name,
type, pseudocomment,
pseudoitp)
values (?, ?, ?, ?, ?, ?, ?, ?, ?)"""
self_get_instruction_id = self.get_instruction_id
cur_execute = cur.execute
for key in bb_data:
for insn in bb_data[key]:
addr, mnem, disasm, cmt1, cmt2, name, mtype = insn
db_id = self_get_instruction_id(str(addr))
if db_id is None:
pseudocomment = None
pseudoitp = None
if addr in self.pseudo_comments:
pseudocomment, pseudoitp = self.pseudo_comments[addr]
cur_execute(sql, (str(addr), mnem, disasm, cmt1, cmt2, name, mtype, pseudocomment, pseudoitp))
db_id = cur.lastrowid
instructions_ids[addr] = db_id
num = 0
bb_ids = {}
sql1 = "insert into main.basic_blocks (num, address) values (?, ?)"
sql2 = "insert into main.bb_instructions (basic_block_id, instruction_id) values (?, ?)"
self_get_bb_id = self.get_bb_id
for key in bb_data:
# Insert each basic block
num += 1
ins_ea = str(key)
last_bb_id = self_get_bb_id(ins_ea)
if last_bb_id is None:
cur_execute(sql1, (num, str(ins_ea)))
last_bb_id = cur.lastrowid
bb_ids[ins_ea] = last_bb_id
# Insert relations between basic blocks and instructions
for insn in bb_data[key]:
ins_id = instructions_ids[insn[0]]
cur_execute(sql2, (last_bb_id, ins_id))
# Insert relations between basic blocks
sql = "insert into main.bb_relations (parent_id, child_id) values (?, ?)"
for key in bb_relations:
for bb in bb_relations[key]:
bb = str(bb)
key = str(key)
try:
cur_execute(sql, (bb_ids[key], bb_ids[bb]))
except:
# key doesnt exist because it doesnt have forward references to any bb
log("Error: %s" % str(sys.exc_info()[1]))
# And finally insert the functions to basic blocks relations
sql = "insert into main.function_bblocks (function_id, basic_block_id) values (?, ?)"
for key in bb_ids:
bb_id = bb_ids[key]
cur_execute(sql, (func_id, bb_id))
cur.close()
def get_valid_definition(self, defs):
""" Try to get a valid structure definition by removing (yes) the
invalid characters typically found in IDA's generated structs."""
ret = defs.replace("?", "_").replace("@", "_")
ret = ret.replace("$", "_")
return ret
def prettify_asm(self, asm_source):
asm = []
for line in asm_source.split("\n"):
if not line.startswith("loc_"):
asm.append("\t" + line)
else:
asm.append(line)
return "\n".join(asm)
def re_sub(self, text, repl, string):
if text not in self.re_cache:
self.re_cache[text] = re.compile(text, flags=re.IGNORECASE)
re_obj = self.re_cache[text]
return re_obj.sub(repl, string)
def get_cmp_asm_lines(self, asm):
sio = StringIO(asm)
lines = []
get_cmp_asm = self.get_cmp_asm
for line in sio.readlines():
line = line.strip("\n")
lines.append(get_cmp_asm(line))
return "\n".join(lines)
def get_cmp_pseudo_lines(self, pseudo):
if pseudo is None:
return pseudo
# Remove all the comments
tmp = self.re_sub(" // .*", "", pseudo)
# Now, replace sub_, byte_, word_, dword_, loc_, etc...
for rep in CMP_REPS:
tmp = self.re_sub(rep + "[a-f0-9A-F]+", rep + "XXXX", tmp)
tmp = self.re_sub("v[0-9]+", "vXXX", tmp)
tmp = self.re_sub("a[0-9]+", "aXXX", tmp)
tmp = self.re_sub("arg_[0-9]+", "aXXX", tmp)
return tmp
def get_cmp_asm(self, asm):
if asm is None:
return asm
# Ignore the comments in the assembly dump
tmp = asm.split(";")[0]
tmp = tmp.split(" # ")[0]
# Now, replace sub_, byte_, word_, dword_, loc_, etc...
for rep in CMP_REPS:
tmp = self.re_sub(rep + "[a-f0-9A-F]+", "XXXX", tmp)
# Remove dword ptr, byte ptr, etc...
for rep in CMP_REMS:
tmp = self.re_sub(rep + "[a-f0-9A-F]+", "", tmp)
reps = ["\+[a-f0-9A-F]+h\+"]
for rep in reps:
tmp = self.re_sub(rep, "+XXXX+", tmp)
tmp = self.re_sub("\.\.[a-f0-9A-F]{8}", "XXX", tmp)
# Strip any possible remaining white-space character at the end of
# the cleaned-up instruction
tmp = self.re_sub("[ \t\n]+$", "", tmp)
# Replace aName_XXX with aXXX, useful to ignore small changes in
# offsets created to strings
tmp = self.re_sub("a[A-Z]+[a-z0-9]+_[0-9]+", "aXXX", tmp)
return tmp
def compare_graphs_pass(self, bblocks1, bblocks2, colours1, colours2, is_second = False):
dones1 = set()
dones2 = set()
# Now compare each basic block from the first function to all the
# basic blocks in the 2nd function
for key1 in bblocks1:
if key1 in dones1:
continue
for key2 in bblocks2:
if key2 in dones2:
continue
# Same number of instructions?
if len(bblocks1[key1]) == len(bblocks2[key2]):
mod = False
partial = True
i = 0
for ins1 in bblocks1[key1]:
ins2 = bblocks2[key2][i]
# Same mnemonic? The change can be only partial
if ins1[1] != ins2[1]:
partial = False
# Try to compare the assembly after doing some cleaning
cmp_asm1 = self.get_cmp_asm(ins1[2])
cmp_asm2 = self.get_cmp_asm(ins2[2])
if cmp_asm1 != cmp_asm2:
mod = True
if not partial:
continue
i += 1
if not mod:
# Perfect match, we discovered a basic block equal in both
# functions
colours1[key1] = 0xffffff
colours2[key2] = 0xffffff
dones1.add(key1)
dones2.add(key2)
break
elif not is_second and partial:
# Partial match, we discovered a basic block with the same
# mnemonics but something changed
#
# NOTE:
# Do not add the partial matches to the dones lists, as we
# can have complete matches after a partial match!
colours1[key1] = 0xCCffff
colours2[key2] = 0xCCffff
break
return colours1, colours2
def compare_graphs(self, g1, ea1, g2, ea2):
colours1 = {}
colours2 = {}
bblocks1 = g1[0]
bblocks2 = g2[0]
# Consider, by default, all blocks added, news
for key1 in bblocks1:
colours1[key1] = 0xCCCCFF
for key2 in bblocks2:
colours2[key2] = 0xCCCCFF
colours1, colours2 = self.compare_graphs_pass(bblocks1, bblocks2, colours1, colours2, False)
colours1, colours2 = self.compare_graphs_pass(bblocks1, bblocks2, colours1, colours2, True)
return colours1, colours2
def get_graph(self, ea1, primary=False):
if primary:
db = "main"
else:
db = "diff"
cur = self.db_cursor()
dones = set()
sql = """ select bb.address bb_address, ins.address ins_address,
ins.mnemonic ins_mnem, ins.disasm ins_disasm
from %s.function_bblocks fb,
%s.bb_instructions bbins,
%s.instructions ins,
%s.basic_blocks bb,
%s.functions f
where ins.id = bbins.instruction_id
and bbins.basic_block_id = bb.id
and bb.id = fb.basic_block_id
and f.id = fb.function_id
and f.address = ?
order by bb.address asc""" % (db, db, db, db, db)
cur.execute(sql, (str(ea1),))
bb_blocks = {}
for row in result_iter(cur):
bb_ea = str(int(row["bb_address"]))
ins_ea = str(int(row["ins_address"]))
mnem = row["ins_mnem"]
dis = row["ins_disasm"]
if ins_ea in dones:
continue
dones.add(ins_ea)
try:
bb_blocks[bb_ea].append([ins_ea, mnem, dis])
except KeyError:
bb_blocks[bb_ea] = [ [ins_ea, mnem, dis] ]
sql = """ select (select address
from %s.basic_blocks
where id = bbr.parent_id) ea1,
(select address
from %s.basic_blocks
where id = bbr.child_id) ea2
from %s.bb_relations bbr,
%s.function_bblocks fbs,
%s.basic_blocks bbs,
%s.functions f
where f.id = fbs.function_id
and bbs.id = fbs.basic_block_id
and fbs.basic_block_id = bbr.child_id
and f.address = ?
order by 1 asc, 2 asc""" % (db, db, db, db, db, db)
cur.execute(sql, (str(ea1), ))
rows = result_iter(cur)
bb_relations = {}
for row in rows:
bb_ea1 = str(row["ea1"])
bb_ea2 = str(row["ea2"])
try:
bb_relations[bb_ea1].add(bb_ea2)
except KeyError:
bb_relations[bb_ea1] = set([bb_ea2])
cur.close()
return bb_blocks, bb_relations
def delete_function(self, ea):
cur = self.db_cursor()
cur.execute("delete from functions where address = ?", (str(ea), ))
cur.close()
def is_auto_generated(self, name):
for rep in CMP_REPS:
if name.startswith(rep):
return True
return False
def check_callgraph(self):
cur = self.db_cursor()
sql = """select callgraph_primes, callgraph_all_primes from program
union all
select callgraph_primes, callgraph_all_primes from diff.program"""
cur.execute(sql)
rows = cur.fetchall()
if len(rows) == 2:
cg1 = decimal.Decimal(rows[0]["callgraph_primes"])
cg_factors1 = json.loads(rows[0]["callgraph_all_primes"])
cg2 = decimal.Decimal(rows[1]["callgraph_primes"])
cg_factors2 = json.loads(rows[1]["callgraph_all_primes"])
if cg1 == cg2:
self.equal_callgraph = True
log("Callgraph signature for both databases is equal, the programs seem to be 100% equal structurally")
Warning("Callgraph signature for both databases is equal, the programs seem to be 100% equal structurally")
else:
FACTORS_CACHE[cg1] = cg_factors1
FACTORS_CACHE[cg2] = cg_factors2
diff = difference(cg1, cg2)
total = sum(cg_factors1.values())
if total == 0 or diff == 0:
log("Callgraphs are 100% equal")
else:
percent = diff * 100. / total
if percent >= 100:
log("Callgraphs are absolutely different")
else:
log("Callgraphs from both programs differ in %f%%" % percent)
cur.close()
def find_equal_matches_parallel(self):
cur = self.db_cursor()
# Start by calculating the total number of functions in both databases
sql = """select count(*) total from functions
union all
select count(*) total from diff.functions"""
cur.execute(sql)
rows = cur.fetchall()
if len(rows) != 2:
Warning("Malformed database, only %d rows!" % len(rows))
raise Exception("Malformed database!")
self.total_functions1 = rows[0]["total"]
self.total_functions2 = rows[1]["total"]
sql = "select address ea, mangled_function, nodes from (select * from functions intersect select * from diff.functions) x"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
for row in rows:
name = row["mangled_function"]
ea = row["ea"]
nodes = int(row["nodes"])
self.best_chooser.add_item(CChooser.Item(ea, name, ea, name, "100% equal", 1, nodes, nodes))
self.matched1.add(name)
self.matched2.add(name)
cur.close()
if not self.ignore_all_names:
self.find_same_name(self.partial_chooser)
self.run_heuristics_for_category("Best")
def run_heuristics_for_category(self, arg_category):
total_cpus = self.cpu_count
if total_cpus < 1:
total_cpus = 1
mode = "[Parallel]"
if total_cpus == 1:
mode = "[Single thread]"
postfix = ""
if self.ignore_small_functions:
postfix = " and f.instructions > 5 and df.instructions > 5 "
if self.hooks is not None:
if 'get_queries_postfix' in dir(self.hooks):
postfix = self.hooks.get_queries_postfix(arg_category, postfix)
threads_list = []
heuristics = list(HEURISTICS)
if self.hooks is not None:
if 'get_heuristics' in dir(self.hooks):
heuristics = self.hooks.get_heuristics(arg_category, heuristics)
for heur in heuristics:
if len(self.matched1) == self.total_functions1 or len(self.matched2) == self.total_functions2:
log("All functions matched in at least one database, finishing.")
break
category = heur["category"]
if category != arg_category:
continue
name = heur["name"]
sql = heur["sql"]
ratio = heur["ratio"]
min_value = 0.0
if ratio == HEUR_TYPE_RATIO_MAX:
min_value = heur["min"]
flags = heur["flags"]
if flags & HEUR_FLAG_UNRELIABLE == HEUR_FLAG_UNRELIABLE and not self.unreliable:
log_refresh("Skipping unreliable heuristic '%s'" % name)
continue
if flags & HEUR_FLAG_SLOW == HEUR_FLAG_SLOW and not self.slow_heuristics:
log_refresh("Skipping slow heuristic '%s'" % name)
continue
if arg_category == "Unreliable":
best = self.partial_chooser
partial = self.unreliable_chooser
else:
best = self.best_chooser
partial = self.partial_chooser
log_refresh("%s Finding with heuristic '%s'" % (mode, name))
sql = sql.replace("%POSTFIX%", postfix)
if self.hooks is not None:
if 'on_launch_heuristic' in dir(self.hooks):
sql = self.hooks.on_launch_heuristic(name, sql)
if ratio == HEUR_TYPE_NO_FPS:
t = Thread(target=self.add_matches_from_query, args=(sql, best))
elif ratio == HEUR_TYPE_RATIO:
t = Thread(target=self.add_matches_from_query_ratio, args=(sql, best, partial))
elif ratio == HEUR_TYPE_RATIO_MAX:
t = Thread(target=self.add_matches_from_query_ratio_max, args=(sql, min_value))
else:
raise Exception("Invalid heuristic ratio calculation value!")
t.name = name
t.time = time.time()
t.start()
threads_list.append(t)
if total_cpus == 1:
t.join()
threads_list = []
while len(threads_list) >= total_cpus:
for i, t in enumerate(threads_list):
if not t.is_alive():
debug_refresh("[Parallel] Heuristic '%s' took %f..." % (t.name, time.time() - t.time))
del threads_list[i]
debug_refresh("[Parallel] Waiting for any of %d thread(s) running to finish..." % len(threads_list))
break
else:
log_refresh("[Parallel] %d thread(s) running, waiting for at least one to finish..." % len(threads_list), do_log=False)
t.join(0.1)
if is_ida:
self.refresh()
if len(threads_list) > 0:
log_refresh("[Parallel] Waiting for remaining %d thread(s) to finish..." % len(threads_list), do_log=False)
do_cancel = False
times = 0
while len(threads_list) > 0 and not do_cancel:
times += 1
for i, t in enumerate(threads_list):
t.join(0.1)
if not t.is_alive():
debug_refresh("[Parallel] Heuristic '%s' took %f..." % (t.name, time.time() - t.time))
del threads_list[i]
debug_refresh("[Parallel] Waiting for remaining %d thread(s) to finish..." % len(threads_list))
break
t.join(0.1)
if time.time() - t.time > TIMEOUT_LIMIT:
do_cancel = True
try:
log_refresh("Timeout, cancelling queries...")
self.get_db().interrupt()
except:
print(("database.interrupt(): %s" % str(sys.exc_info()[1])))
if times % 50 == 0:
names = []
for x in threads_list:
names.append(x.name)
log_refresh("[Parallel] %d thread(s) still running:\n\n%s" % (len(threads_list), ", ".join(names)))
def ast_ratio(self, ast1, ast2):
if not self.relaxed_ratio:
return 0
return ast_ratio(ast1, ast2)
def check_ratio(self, ast1, ast2, pseudo1, pseudo2, asm1, asm2, md1, md2):
fratio = quick_ratio
decimal_values = "{0:.2f}"
if self.relaxed_ratio:
fratio = real_quick_ratio
decimal_values = "{0:.1f}"
v3 = 0
ast_done = False
if self.relaxed_ratio and ast1 is not None and ast2 is not None and max(len(ast1), len(ast2)) < 16:
ast_done = True
v3 = self.ast_ratio(ast1, ast2)
if v3 == 1.0:
return v3
v1 = 0
if pseudo1 is not None and pseudo2 is not None and pseudo1 != "" and pseudo2 != "":
tmp1 = self.get_cmp_pseudo_lines(pseudo1)
tmp2 = self.get_cmp_pseudo_lines(pseudo2)
if tmp1 == "" or tmp2 == "":
log("Error cleaning pseudo-code!")
else:
v1 = fratio(tmp1, tmp2)
v1 = float(decimal_values.format(v1))
if v1 == 1.0:
# If real_quick_ratio returns 1 try again with quick_ratio
# because it can result in false positives. If real_quick_ratio
# says 'different', there is no point in continuing.
if fratio == real_quick_ratio:
v1 = quick_ratio(tmp1, tmp2)
if v1 == 1.0:
return 1.0
tmp_asm1 = self.get_cmp_asm_lines(asm1)
tmp_asm2 = self.get_cmp_asm_lines(asm2)
v2 = fratio(tmp_asm1, tmp_asm2)
v2 = float(decimal_values.format(v2))
if v2 == 1:
# Actually, same as the quick_ratio/real_quick_ratio check done
# with the pseudo-code
if fratio == real_quick_ratio:
v2 = quick_ratio(tmp_asm1, tmp_asm2)
if v2 == 1.0:
return 1.0
if self.relaxed_ratio and not ast_done:
v3 = fratio(ast1, ast2)
v3 = float(decimal_values.format(v3))
if v3 == 1:
return 1.0
v4 = 0.0
if md1 == md2 and md1 > 0.0:
# A MD-Index >= 10.0 is somehow rare
if self.relaxed_ratio and md1 > 10.0:
return 1.0
v4 = min((v1 + v2 + v3 + 3.0) / 5, 1.0)
r = max(v1, v2, v3, v4)
if r == 1.0 and md1 != md2:
# We cannot assign a 1.0 ratio if both MD indices are different, that's an
# error
r = 0
for v in [v1, v2, v3, v4]:
if v != 1.0 and v > r:
r = v
return r
def all_functions_matched(self):
return len(self.matched1) == self.total_functions1 or \
len(self.matched2) == self.total_functions2
def add_matches_from_query_ratio(self, sql, best, partial, unreliable=None, debug=False):
if self.all_functions_matched():
return
cur = self.db_cursor()
try:
cur.execute(sql)
except:
log("Error: %s" % str(sys.exc_info()[1]))
return
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row["ea"])
name1 = row["name1"]
ea2 = row["ea2"]
name2 = row["name2"]
desc = row["description"]
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
ast1 = row["pseudo_primes1"]
ast2 = row["pseudo_primes2"]
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
md1 = row["md1"]
md2 = row["md2"]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2, md1, md2)
if debug:
print("0x%x 0x%x %d" % (int(ea), int(ea2), r))
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, r = self.hooks.on_match(d1, d2, desc, r)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
if r == 1.0:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
elif r >= 0.5:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
elif r < 0.5 and unreliable is not None:
unreliable.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
else:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def add_matches_from_query_ratio_max(self, sql, val):
if self.all_functions_matched():
return
cur = self.db_cursor()
try:
cur.execute(sql)
except:
log("Error: %s" % str(sys.exc_info()[1]))
return
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row["ea"])
name1 = row["name1"]
ea2 = row["ea2"]
name2 = row["name2"]
desc = row["description"]
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
ast1 = row["pseudo_primes1"]
ast2 = row["pseudo_primes2"]
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
md1 = row["md1"]
md2 = row["md2"]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2, md1, md2)
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, r = self.hooks.on_match(d1, d2, desc, r)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
if r == 1.0:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
elif r >= 0.5:
self.partial_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
elif r < 0.5 and r > val:
self.unreliable_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def add_matches_from_cursor_ratio_max(self, cur, best, partial, val):
if self.all_functions_matched():
return
matches = []
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row["ea"])
name1 = row["name1"]
ea2 = row["ea2"]
name2 = row["name2"]
desc = row["description"]
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
ast1 = row["pseudo_primes1"]
ast2 = row["pseudo_primes2"]
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
md1 = row["md1"]
md2 = row["md2"]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2, md1, md2)
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, r = self.hooks.on_match(d1, d2, desc, r)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
good_ratio = False
if r == 1.0:
item = CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2)
good_ratio = True
self.best_chooser.add_item(item)
self.matched1.add(name1)
self.matched2.add(name2)
elif r > val:
item = CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2)
good_ratio = True
best.add_item(item)
self.matched1.add(name1)
self.matched2.add(name2)
elif partial is not None:
item = CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2)
good_ratio = True
partial.add_item(item)
self.matched1.add(name1)
self.matched2.add(name2)
if good_ratio:
matches.append([0, "0x%x" % int(ea), name1, ea2, name2])
return matches
def add_matches_from_query(self, sql, choose):
""" Warning: use this *only* if the ratio is known to be 1.00 """
if self.all_functions_matched():
return
cur = self.db_cursor()
try:
cur.execute(sql)
except:
log("Error: %s" % str(sys.exc_info()[1]))
return
i = 0
while 1:
i += 1
if i % 1000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row["ea"])
name1 = row["name1"]
ea2 = str(row["ea2"])
name2 = row["name2"]
desc = row["description"]
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
ast1 = row["pseudo_primes1"]
ast2 = row["pseudo_primes2"]
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
md1 = row["md1"]
md2 = row["md2"]
if name1 in self.matched1 or name2 in self.matched2:
continue
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, r = self.hooks.on_match(d1, d2, desc, 1.0)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
choose.add_item(CChooser.Item(ea, name1, ea2, name2, desc, 1, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def search_small_differences(self, choose):
cur = self.db_cursor()
# Same basic blocks, edges, mnemonics, etc... but different names
sql = """ select distinct f.address ea, f.name name1, df.name name2,
f.names f_names, df.names df_names, df.address ea2,
f.nodes bb1, df.nodes bb2,
f.pseudocode pseudo1, df.pseudocode pseudo2,
f.assembly asm1, df.assembly asm2,
f.pseudocode_primes pseudo_primes1, df.pseudocode_primes pseudo_primes2,
cast(f.md_index as real) md1, cast(df.md_index as real) md2
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.names != '[]'"""
cur.execute(sql)
rows = result_iter(cur)
for row in rows:
ea = str(row["ea"])
name1 = row["name1"]
name2 = row["name2"]
if name1 in self.matched1 or name2 in self.matched2:
continue
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
s1 = set(json.loads(row["f_names"]))
s2 = set(json.loads(row["df_names"]))
total = max(len(s1), len(s2))
commons = len(s1.intersection(s2))
ratio = (commons * 1.) / total
if ratio >= 0.5:
ea2 = row["ea2"]
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
ast1 = row["pseudo_primes1"]
ast2 = row["pseudo_primes2"]
md1 = row["md1"]
md2 = row["md2"]
desc = "Nodes, edges, complexity and mnemonics with small differences"
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, ratio = self.hooks.on_match(d1, d2, desc, ratio)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
item = CChooser.Item(ea, name1, ea2, name2, desc, ratio, bb1, bb2)
if ratio == 1.0:
self.best_chooser.add_item(item)
else:
choose.add_item(item)
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
return
def find_same_name(self, choose):
cur = self.db_cursor()
sql = """select f.address ea1, f.mangled_function mangled1,
d.address ea2, f.name name, d.name name2,
d.mangled_function mangled2,
f.pseudocode pseudo1, d.pseudocode pseudo2,
f.assembly asm1, d.assembly asm2,
f.pseudocode_primes primes1,
d.pseudocode_primes primes2,
f.nodes bb1, d.nodes bb2,
cast(f.md_index as real) md1, cast(d.md_index as real) md2
from functions f,
diff.functions d
where (d.mangled_function = f.mangled_function
or d.name = f.name)
and f.name not like 'nullsub_%'"""
desc = "Perfect match, same name"
log_refresh("Finding with heuristic '%s'" % desc)
cur.execute(sql)
rows = cur.fetchall()
cur.close()
if len(rows) > 0 and not self.all_functions_matched():
for row in rows:
ea = row["ea1"]
name = row["mangled1"]
ea2 = row["ea2"]
name1 = row["name"]
name2 = row["name2"]
name2_1 = row["mangled2"]
if name in self.matched1 or name1 in self.matched1 or \
name2 in self.matched2 or name2_1 in self.matched2:
continue
if self.ignore_sub_names and name.startswith("sub_"):
continue
ast1 = row["primes1"]
ast2 = row["primes2"]
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
md1 = row["md1"]
md2 = row["md2"]
ratio = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2, md1, md2)
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, ratio = self.hooks.on_match(d1, d2, desc, ratio)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
if float(ratio) == 1.0 or (self.relaxed_ratio and md1 != 0 and md1 == md2):
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, 1, bb1, bb2))
else:
choose.add_item(CChooser.Item(ea, name1, ea2, name2, desc, ratio, bb1, bb2))
self.matched1.add(name)
self.matched1.add(name1)
self.matched2.add(name2)
self.matched2.add(name2_1)
def get_function_id(self, name, primary=True):
cur = self.db_cursor()
rid = None
db_name = "main"
if not primary:
db_name = "diff"
try:
sql = "select id from %s.functions where name = ?" % db_name
cur.execute(sql, (name,))
row = cur.fetchone()
if row:
rid = row["id"]
finally:
cur.close()
return rid
def find_matches_in_hole(self, last, item, row):
cur = self.db_cursor()
try:
postfix = ""
if self.ignore_small_functions:
postfix = " and instructions > 5"
desc = "Call address sequence"
id1 = row["id1"]
id2 = row["id2"]
sql = """ select * from functions where id = ? """ + postfix + """
union all
select * from diff.functions where id = ? """ + postfix
thresold = min(0.6, float(item[5]))
for j in range(0, min(10, id1 - last)):
for i in range(0, min(10, id1 - last)):
cur.execute(sql, (id1+j, id2+i))
rows = cur.fetchall()
if len(rows) == 2:
name1 = rows[0]["name"]
name2 = rows[1]["name"]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(rows[0]["pseudocode_primes"], rows[1]["pseudocode_primes"], \
rows[0]["pseudocode"], rows[1]["pseudocode"], \
rows[0]["assembly"], rows[1]["assembly"], \
float(rows[0]["md_index"]), float(rows[1]["md_index"]))
if r < 0.5:
if rows[0]["names"] != "[]" and rows[0]["names"] == rows[1]["names"]:
r = 0.5001
if r > thresold:
ea = rows[0]["address"]
ea2 = rows[1]["address"]
bb1 = rows[0]["nodes"]
bb2 = rows[1]["nodes"]
ast1 = rows[0]["pseudocode_primes"]
ast2 = rows[1]["pseudocode_primes"]
pseudo1 = rows[0]["pseudocode"]
pseudo2 = rows[1]["pseudocode"]
asm1 = rows[0]["assembly"]
asm2 = rows[1]["assembly"]
md1 = rows[0]["md_index"]
md2 = rows[1]["md_index"]
# Pretty much every single heuristic fails with small functions,
# ignore them...
if bb1 <= 3 or bb2 <= 3:
continue
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, r = self.hooks.on_match(d1, d2, desc, r)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
if r == 1:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
elif r > 0.5:
self.partial_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
else:
self.unreliable_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
finally:
cur.close()
def find_from_matches(self, the_items):
# XXX: FIXME: This is wrong in many ways, but still works... FIX IT!
# Rule 1: if a function A in program P has id X, and function B in
# the same program has id + 1, then, in program P2, function B maybe
# the next function to A in P2.
log_refresh("Finding with heuristic 'Call address sequence'")
cur = self.db_cursor()
try:
# Create a copy of all the functions
cur.execute("create temporary table best_matches (id, id1, ea1, name1, id2, ea2, name2)")
# Insert each matched function into the temporary table
i = 0
for match in the_items:
ea1 = match[1]
name1 = match[2]
ea2 = match[3]
name2 = match[4]
ratio = float(match[5])
if ratio < 0.5:
continue
id1 = self.get_function_id(name1)
id2 = self.get_function_id(name2, False)
sql = """insert into best_matches (id, id1, ea1, name1, id2, ea2, name2)
values (?, ?, ?, ?, ?, ?, ?)"""
cur.execute(sql, (i, id1, str(ea1), name1, id2, str(ea2), name2))
i += 1
last = None
cur.execute("select * from best_matches order by id1 asc")
for row in cur:
row_id = row["id1"]
if last is None or last+1 == row_id:
last = row_id
continue
item = the_items[row["id"]]
self.find_matches_in_hole(last, item, row)
last = row_id
cur.execute("drop table best_matches")
finally:
cur.close()
def find_callgraph_matches(self):
best_items = list(self.best_chooser.items)
self.find_callgraph_matches_from(best_items, 0.60)
partial_items = list(self.partial_chooser.items)
self.find_callgraph_matches_from(partial_items, 0.80)
def find_callgraph_matches_from(self, the_items, min_value):
sql = """select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Callgraph match (%s)' description,
f.pseudocode pseudo1, df.pseudocode pseudo2,
f.assembly asm1, df.assembly asm2,
f.pseudocode_primes pseudo_primes1, df.pseudocode_primes pseudo_primes2,
f.nodes bb1, df.nodes bb2,
cast(f.md_index as real) md1, cast(df.md_index as real) md2,
df.tarjan_topological_sort, df.strongly_connected_spp
from functions f,
diff.functions df
where f.address in (%s)
and df.address in (%s)
and f.name not like 'nullsub_%%'
and df.name not like 'nullsub_%%'
and abs(f.md_index - df.md_index) < 1
and ((f.nodes > 5 and df.nodes > 5)
or (f.instructions > 10 and df.instructions > 10))"""
main_callers_sql = """select address from main.callgraph where func_id = ? and type = ?"""
diff_callers_sql = """select address from diff.callgraph where func_id = ? and type = ?"""
cur = self.db_cursor()
dones = set()
prev_best_matches = len(self.best_chooser.items)
prev_part_matches = len(self.partial_chooser.items)
total_dones = 0
while len(the_items) > 0:
total_dones += 1
if total_dones % 1000 == 0:
log("Processed %d callgraph matches..." % total_dones)
curr_best_matches = len(self.best_chooser.items)
curr_part_matches = len(self.partial_chooser.items)
fmt = "Queued item(s) %d, Best matches %d, Partial Matches %d (Previously %d and %d)"
log(fmt % (len(the_items), curr_best_matches, curr_part_matches, prev_best_matches, prev_part_matches))
match = the_items.pop()
ea1 = match[1]
name1 = match[2]
name2 = match[4]
if ea1 in dones:
continue
dones.add(ea1)
id1 = self.get_function_id(name1)
id2 = self.get_function_id(name2, False)
for call_type in ['caller', 'callee']:
cur.execute(main_callers_sql, (id1, call_type))
main_address_set = set()
for row in cur.fetchall():
main_address_set.add("'%s'" % row[0])
cur.execute(diff_callers_sql, (id2, call_type))
diff_address_set = set()
for row in cur.fetchall():
diff_address_set.add("'%s'" % row[0])
if len(main_address_set) > 0 and len(diff_address_set) > 0:
tname1 = name1.replace("'", "''")
tname2 = name2.replace("'", "''")
cur.execute(sql % (("%s of %s/%s" % (call_type, tname1, tname2)), ",".join(main_address_set), ",".join(diff_address_set)))
matches = self.add_matches_from_cursor_ratio_max(cur, self.partial_chooser, None, min_value)
if matches is not None and len(matches) > 0 and self.unreliable:
the_items.extend(matches)
def find_matches_parallel(self):
self.run_heuristics_for_category("Partial")
# Search using some of the previous criterias but calculating the
# edit distance
log_refresh("Finding with heuristic 'Small names difference'")
self.search_small_differences(self.partial_chooser)
def find_brute_force(self):
cur = self.db_cursor()
sql = "create temp table unmatched(id integer null primary key, address, main)"
cur.execute(sql)
# Find functions not matched in the primary database
sql = "select name, address from functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
for row in rows:
name = row["name"]
if name not in self.matched1:
ea = row[1]
sql = "insert into unmatched(address,main) values(?,?)"
cur.execute(sql, (ea, 1))
# Find functions not matched in the secondary database
sql = "select name, address from diff.functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
for row in rows:
name = row["name"]
if name not in self.matched2:
ea = row[1]
sql = "insert into unmatched(address,main) values(?,?)"
cur.execute(sql, (ea, 0))
cur.close()
cur = self.db_cursor()
sql = """select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Brute forcing' description,
f.pseudocode pseudo1, df.pseudocode pseudo2,
f.assembly asm1, df.assembly asm2,
f.pseudocode_primes pseudo_primes1, df.pseudocode_primes pseudo_primes2,
f.nodes bb1, df.nodes bb2,
cast(f.md_index as real) md1, cast(df.md_index as real) md2,
df.tarjan_topological_sort, df.strongly_connected_spp
from functions f,
diff.functions df,
unmatched um
where ((f.address = um.address and um.main = 1)
or (df.address = um.address and um.main = 0))
and ((f.md_index = df.md_index
and f.md_index > 1 and df.md_index > 1)
or (f.kgh_hash = df.kgh_hash
and f.kgh_hash > 7 and df.kgh_hash > 7))"""
cur.execute(sql)
log_refresh("Finding via brute-forcing...")
self.add_matches_from_cursor_ratio_max(cur, self.unreliable_chooser, None, 0.5)
def find_experimental_matches(self):
self.run_heuristics_for_category("Experimental")
# Find using brute-force
log_refresh("Brute-forcing...")
self.find_brute_force()
def find_unreliable_matches(self):
self.run_heuristics_for_category("Unreliable")
def find_unmatched(self):
cur = self.db_cursor()
sql = "select name, address from functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
choose = self.chooser("Unmatched in secondary", self, False)
for row in rows:
name = row["name"]
if name not in self.matched1:
ea = row[1]
choose.add_item(CChooser.Item(ea, name))
self.unmatched_second = choose
sql = "select name, address from diff.functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
choose = self.chooser("Unmatched in primary", self, False)
for row in rows:
name = row["name"]
if name not in self.matched2:
ea = row["address"]
choose.add_item(CChooser.Item(ea, name))
self.unmatched_primary = choose
cur.close()
def create_choosers(self):
self.unreliable_chooser = self.chooser("Unreliable matches", self)
self.partial_chooser = self.chooser("Partial matches", self)
self.best_chooser = self.chooser("Best matches", self)
self.unmatched_second = self.chooser("Unmatched in secondary", self, False)
self.unmatched_primary = self.chooser("Unmatched in primary", self, False)
def save_results(self, filename):
if os.path.exists(filename):
os.remove(filename)
log("Previous diff results '%s' removed." % filename)
results_db = sqlite3.connect(filename, check_same_thread=True)
results_db.text_factory = str
cur = results_db.cursor()
try:
sql = "create table config (main_db text, diff_db text, version text, date text)"
cur.execute(sql)
sql = "insert into config values (?, ?, ?, ?)"
cur.execute(sql, (self.db_name, self.last_diff_db, VERSION_VALUE, time.asctime()))
sql = "create table results (type, line, address, name, address2, name2, ratio, bb1, bb2, description)"
cur.execute(sql)
sql = "create unique index uq_results on results(address, address2)"
cur.execute(sql)
sql = "create table unmatched (type, line, address, name)"
cur.execute(sql)
with results_db:
results_sql = "insert or ignore into results values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
unmatched_sql = "insert into unmatched values (?, ?, ?, ?)"
for item in self.best_chooser.items:
l = list(item)
l.insert(0, 'best')
cur.execute(results_sql, l)
for item in self.partial_chooser.items:
l = list(item)
l.insert(0, 'partial')
cur.execute(results_sql, l)
for item in self.unreliable_chooser.items:
l = list(item)
l.insert(0, 'unreliable')
cur.execute(results_sql, l)
for item in self.unmatched_primary.items:
l = list(item)
l.insert(0, 'primary')
cur.execute(unmatched_sql, l)
for item in self.unmatched_second.items:
l = list(item)
l.insert(0, 'secondary')
cur.execute(unmatched_sql, l)
log("Diffing results saved in file '%s'." % filename)
finally:
cur.close()
results_db.close()
def try_attach(self, cur, db):
try:
cur.execute('attach "%s" as diff' % db)
except:
pass
def diff(self, db):
self.last_diff_db = db
cur = self.db_cursor()
self.try_attach(cur, db)
try:
cur.execute("select value from diff.version")
except:
log("Error: %s " % sys.exc_info()[1])
log("The selected file does not look like a valid Diaphora exported database!")
cur.close()
return False
row = cur.fetchone()
if not row:
log("Invalid database!")
return False
if row["value"] != VERSION_VALUE:
log("WARNING: The database is from a different version (current %s, database %s)!" % (VERSION_VALUE, row[0]))
try:
t0 = time.time()
log_refresh("Diffing...", True)
self.do_continue = True
if self.equal_db():
log("The databases seems to be 100% equal")
if self.do_continue:
# Compare the call graphs
self.check_callgraph()
if self.project_script is not None:
log("Loading project specific Python script...")
if not self.load_hooks():
return False
# Find the unmodified functions
log_refresh("Finding best matches...")
self.find_equal_matches_parallel()
# Find the modified functions
log_refresh("Finding partial matches")
self.find_matches_parallel()
# Call address sequence heuristic
self.find_from_matches(self.best_chooser.items)
if self.slow_heuristics:
# Find the functions from the callgraph
log_refresh("Finding with heuristic 'Callgraph matches'")
self.find_callgraph_matches()
if self.unreliable:
# Find using likely unreliable methods modified functions
log_refresh("Finding probably unreliable matches")
self.find_unreliable_matches()
if self.experimental:
# Find using experimental methods modified functions
log_refresh("Finding experimental matches")
self.find_from_matches(self.partial_chooser.items)
self.find_experimental_matches()
# Show the list of unmatched functions in both databases
log_refresh("Finding unmatched functions")
self.find_unmatched()
if self.hooks is not None:
if 'on_finish' in dir(self.hooks):
self.hooks.on_finish()
log("Done. Took {} seconds.".format(time.time() - t0))
finally:
cur.close()
return True
if __name__ == "__main__":
version_info = sys.version_info
if version_info[0] == 2:
log("WARNING: You are using Python 2 instead of Python 3. The main branch of Diaphora works exclusively with Python 3.")
log("TIP: There are other branches that contain backward compatability.")
do_diff = True
if os.getenv("DIAPHORA_AUTO_DIFF") is not None:
db1 = os.getenv("DIAPHORA_DB1")
if db1 is None:
raise Exception("No database file specified!")
db2 = os.getenv("DIAPHORA_DB2")
if db2 is None:
raise Exception("No database file to diff against specified!")
diff_out = os.getenv("DIAPHORA_DIFF_OUT")
if diff_out is None:
raise Exception("No output file for diff specified!")
elif is_ida:
diaphora_dir = os.path.dirname(__file__)
script = os.path.join(diaphora_dir, "diaphora_ida.py")
exec(compile(open(script, "rb").read(), script, 'exec'))
do_diff = False
else:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("db1")
parser.add_argument("db2")
parser.add_argument("-o", "--outfile", help="Write output to <outfile>")
args = parser.parse_args()
db1 = args.db1
db2 = args.db2
if args.outfile:
diff_out = args.outfile
else:
diff_out = "{}_vs_{}.diaphora".format(
os.path.basename(os.path.splitext(db1)[0]),
os.path.basename(os.path.splitext(db2)[0]))
if do_diff:
bd = CBinDiff(db1)
if not is_ida:
bd.ignore_all_names = False
bd.db = sqlite3.connect(db1, check_same_thread=True)
bd.db.text_factory = str
bd.db.row_factory = sqlite3.Row
bd.diff(db2)
bd.save_results(diff_out)
|
Transmission case is carefully dismantled to observe for signs of preexisting problems. The 4 mounting feet are placed on a surface plate to check for flatness. Machine if necessary.
Rear transmission mount is prone to cracking. We crack detect this area.
All bearings/bushings are removed especially the countershaft bushings no matter even if they appear ok. These are usually a problem area from the factory and we take no risks.
Countershaft bores are checked for size.
Countershaft bores are checked for alignment to the main shaft bearing bore axis.
Main shaft, countershaft and shifter fork shaft are checked for straightness.
Cluster gear is either set up for bushings or ROLLER bearings. We do NOT use needle bearings as these are prone to random failure. Roller bearings take 40% more loading than needle bearings and are ideal for heavy use applications however it is definitely a nice upgrade to do on any transmission.
Countershaft removal hole in right side of case is tapped for a 1/8 pipe thread so as a plug can be fitted so as to eliminate any chance of an oil leak.
Cluster gear end float set using a dial indicator.
Transmission case is carefully aligned to engine using jigs.
|
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from pixiedust.display.display import Display
from pixiedust.display.chart.renderers.baseChartDisplay import BaseChartDisplay
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from pixiedust.utils.shellAccess import ShellAccess
class HistogramDisplay(BaseChartDisplay):
def doRender(self, handlerId):
rdd = ShellAccess.sqlContext.sql("select deltaDeparture from training").map(lambda s: s.deltaDeparture)\
.filter(lambda s: s < 50 and s > 12)
histo = rdd.histogram(50)
bins = [i for i in histo[0]]
fig, ax = plt.subplots(figsize=(12,8))
ax.set_ylabel('Number of records')
ax.set_xlabel('Bin')
plt.title('Histogram')
intervals = [abs(j-i) for i,j in zip(bins[:-1], bins[1:])]
values=[sum(intervals[:i]) for i in range(0,len(intervals))]
ax.bar(values, histo[1], intervals, color='b', label = "Bins")
ax.set_xticks(bins[:-1],[int(i) for i in bins[:-1]])
ax.legend()
def doRenderChart(self):
pass
|
Biodegradable materials are materials that are readily decomposed by the action of microorganisms. Biodegradable packaging differs from conventional non-degradable packaging in terms of raw materials, production technology, applications, and composting. Biodegradable materials such as bio-plastic and paper are widely used in packaging applications because of their sustainable nature, material properties, and appearance. Biodegradable packaging materials are used in all levels of packaging including primary, secondary and tertiary packaging.
TechNavio’s analysts forecast the Global Biodegradable Packaging Materials market to grow at a CAGR of 10.53 percent over the period 2013-2018.
This report covers the present scenario and the growth prospects of the Global Biodegradable Packaging Materials market for the period 2014-2018. The report provides data on the following segments of the Global Biodegradable Packaging Materials market based on product type, application, and geography.
TechNavio’s report, the Global Biodegradable Packaging Materials Market 2014-2018, has been prepared based on an in-depth market analysis with inputs from industry experts. The report covers the APAC region, Europe, North America, and the ROW; it also covers the Global Biodegradable Packaging Materials market landscape and its growth prospects in the coming years. The report also includes a discussion of the key vendors operating in this market.
|
#
# Copyright 2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
#################################################################################
# Start off by implementing a general purpose event loop for anyones use
#################################################################################
import sys
import getopt
import os
import libvirt
import select
import time
import threading
#
# This general purpose event loop will support waiting for file handle
# I/O and errors events, as well as scheduling repeatable timers with
# a fixed interval.
#
# It is a pure python implementation based around the poll() API
#
class virEventLoopPure:
# This class contains the data we need to track for a
# single file handle
class virEventLoopPureHandle:
def __init__(self, handle, fd, events, cb, opaque):
self.handle = handle
self.fd = fd
self.events = events
self.cb = cb
self.opaque = opaque
def get_id(self):
return self.handle
def get_fd(self):
return self.fd
def get_events(self):
return self.events
def set_events(self, events):
self.events = events
def dispatch(self, events):
self.cb(self.handle,
self.fd,
events,
self.opaque[0],
self.opaque[1])
# This class contains the data we need to track for a
# single periodic timer
class virEventLoopPureTimer:
def __init__(self, timer, interval, cb, opaque):
self.timer = timer
self.interval = interval
self.cb = cb
self.opaque = opaque
self.lastfired = 0
def get_id(self):
return self.timer
def get_interval(self):
return self.interval
def set_interval(self, interval):
self.interval = interval
def get_last_fired(self):
return self.lastfired
def set_last_fired(self, now):
self.lastfired = now
def dispatch(self):
self.cb(self.timer,
self.opaque[0],
self.opaque[1])
def __init__(self, debug=False):
self.debugOn = debug
self.poll = select.poll()
self.pipetrick = os.pipe()
self.pendingWakeup = False
self.runningPoll = False
self.nextHandleID = 1
self.nextTimerID = 1
self.handles = []
self.timers = []
self.quit = False
# The event loop can be used from multiple threads at once.
# Specifically while the main thread is sleeping in poll()
# waiting for events to occur, another thread may come along
# and add/update/remove a file handle, or timer. When this
# happens we need to interrupt the poll() sleep in the other
# thread, so that it'll see the file handle / timer changes.
#
# Using OS level signals for this is very unreliable and
# hard to implement correctly. Thus we use the real classic
# "self pipe" trick. A anonymous pipe, with one end registered
# with the event loop for input events. When we need to force
# the main thread out of a poll() sleep, we simple write a
# single byte of data to the other end of the pipe.
self.debug("Self pipe watch %d write %d" %(self.pipetrick[0], self.pipetrick[1]))
self.poll.register(self.pipetrick[0], select.POLLIN)
def debug(self, msg):
if self.debugOn:
print msg
# Calculate when the next timeout is due to occurr, returning
# the absolute timestamp for the next timeout, or 0 if there is
# no timeout due
def next_timeout(self):
next = 0
for t in self.timers:
last = t.get_last_fired()
interval = t.get_interval()
if interval < 0:
continue
if next == 0 or (last + interval) < next:
next = last + interval
return next
# Lookup a virEventLoopPureHandle object based on file descriptor
def get_handle_by_fd(self, fd):
for h in self.handles:
if h.get_fd() == fd:
return h
return None
# Lookup a virEventLoopPureHandle object based on its event loop ID
def get_handle_by_id(self, handleID):
for h in self.handles:
if h.get_id() == handleID:
return h
return None
# This is the heart of the event loop, performing one single
# iteration. It asks when the next timeout is due, and then
# calcuates the maximum amount of time it is able to sleep
# for in poll() pending file handle events.
#
# It then goes into the poll() sleep.
#
# When poll() returns, there will zero or more file handle
# events which need to be dispatched to registered callbacks
# It may also be time to fire some periodic timers.
#
# Due to the coarse granularity of schedular timeslices, if
# we ask for a sleep of 500ms in order to satisfy a timer, we
# may return upto 1 schedular timeslice early. So even though
# our sleep timeout was reached, the registered timer may not
# technically be at its expiry point. This leads to us going
# back around the loop with a crazy 5ms sleep. So when checking
# if timeouts are due, we allow a margin of 20ms, to avoid
# these pointless repeated tiny sleeps.
def run_once(self):
sleep = -1
self.runningPoll = True
next = self.next_timeout()
self.debug("Next timeout due at %d" % next)
if next > 0:
now = int(time.time() * 1000)
if now >= next:
sleep = 0
else:
sleep = (next - now) / 1000.0
self.debug("Poll with a sleep of %d" % sleep)
events = self.poll.poll(sleep)
# Dispatch any file handle events that occurred
for (fd, revents) in events:
# See if the events was from the self-pipe
# telling us to wake up. if so, then discard
# the data just continue
if fd == self.pipetrick[0]:
self.pendingWakeup = False
os.read(fd, 1)
continue
h = self.get_handle_by_fd(fd)
if h:
self.debug("Dispatch fd %d handle %d events %d" % (fd, h.get_id(), revents))
h.dispatch(self.events_from_poll(revents))
now = int(time.time() * 1000)
for t in self.timers:
interval = t.get_interval()
if interval < 0:
continue
want = t.get_last_fired() + interval
# Deduct 20ms, since schedular timeslice
# means we could be ever so slightly early
if now >= (want-20):
self.debug("Dispatch timer %d now %s want %s" % (t.get_id(), str(now), str(want)))
t.set_last_fired(now)
t.dispatch()
self.runningPoll = False
# Actually the event loop forever
def run_loop(self):
self.quit = False
while not self.quit:
self.run_once()
def interrupt(self):
if self.runningPoll and not self.pendingWakeup:
self.pendingWakeup = True
os.write(self.pipetrick[1], 'c')
# Registers a new file handle 'fd', monitoring for 'events' (libvirt
# event constants), firing the callback cb() when an event occurs.
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_handle(self, fd, events, cb, opaque):
handleID = self.nextHandleID + 1
self.nextHandleID = self.nextHandleID + 1
h = self.virEventLoopPureHandle(handleID, fd, events, cb, opaque)
self.handles.append(h)
self.poll.register(fd, self.events_to_poll(events))
self.interrupt()
self.debug("Add handle %d fd %d events %d" % (handleID, fd, events))
return handleID
# Registers a new timer with periodic expiry at 'interval' ms,
# firing cb() each time the timer expires. If 'interval' is -1,
# then the timer is registered, but not enabled
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_timer(self, interval, cb, opaque):
timerID = self.nextTimerID + 1
self.nextTimerID = self.nextTimerID + 1
h = self.virEventLoopPureTimer(timerID, interval, cb, opaque)
self.timers.append(h)
self.interrupt()
self.debug("Add timer %d interval %d" % (timerID, interval))
return timerID
# Change the set of events to be monitored on the file handle
def update_handle(self, handleID, events):
h = self.get_handle_by_id(handleID)
if h:
h.set_events(events)
self.poll.unregister(h.get_fd())
self.poll.register(h.get_fd(), self.events_to_poll(events))
self.interrupt()
self.debug("Update handle %d fd %d events %d" % (handleID, h.get_fd(), events))
# Change the periodic frequency of the timer
def update_timer(self, timerID, interval):
for h in self.timers:
if h.get_id() == timerID:
h.set_interval(interval);
self.interrupt()
self.debug("Update timer %d interval %d" % (timerID, interval))
break
# Stop monitoring for events on the file handle
def remove_handle(self, handleID):
handles = []
for h in self.handles:
if h.get_id() == handleID:
self.poll.unregister(h.get_fd())
self.debug("Remove handle %d fd %d" % (handleID, h.get_fd()))
else:
handles.append(h)
self.handles = handles
self.interrupt()
# Stop firing the periodic timer
def remove_timer(self, timerID):
timers = []
for h in self.timers:
if h.get_id() != timerID:
timers.append(h)
self.debug("Remove timer %d" % timerID)
self.timers = timers
self.interrupt()
# Convert from libvirt event constants, to poll() events constants
def events_to_poll(self, events):
ret = 0
if events & libvirt.VIR_EVENT_HANDLE_READABLE:
ret |= select.POLLIN
if events & libvirt.VIR_EVENT_HANDLE_WRITABLE:
ret |= select.POLLOUT
if events & libvirt.VIR_EVENT_HANDLE_ERROR:
ret |= select.POLLERR;
if events & libvirt.VIR_EVENT_HANDLE_HANGUP:
ret |= select.POLLHUP;
return ret
# Convert from poll() event constants, to libvirt events constants
def events_from_poll(self, events):
ret = 0;
if events & select.POLLIN:
ret |= libvirt.VIR_EVENT_HANDLE_READABLE;
if events & select.POLLOUT:
ret |= libvirt.VIR_EVENT_HANDLE_WRITABLE;
if events & select.POLLNVAL:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR;
if events & select.POLLERR:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR;
if events & select.POLLHUP:
ret |= libvirt.VIR_EVENT_HANDLE_HANGUP;
return ret;
###########################################################################
# Now glue an instance of the general event loop into libvirt's event loop
###########################################################################
# This single global instance of the event loop wil be used for
# monitoring libvirt events
eventLoop = virEventLoopPure(debug=False)
# This keeps track of what thread is running the event loop,
# (if it is run in a background thread)
eventLoopThread = None
# These next set of 6 methods are the glue between the official
# libvirt events API, and our particular impl of the event loop
#
# There is no reason why the 'virEventLoopPure' has to be used.
# An application could easily may these 6 glue methods hook into
# another event loop such as GLib's, or something like the python
# Twisted event framework.
def virEventAddHandleImpl(fd, events, cb, opaque):
global eventLoop
return eventLoop.add_handle(fd, events, cb, opaque)
def virEventUpdateHandleImpl(handleID, events):
global eventLoop
return eventLoop.update_handle(handleID, events)
def virEventRemoveHandleImpl(handleID):
global eventLoop
return eventLoop.remove_handle(handleID)
def virEventAddTimerImpl(interval, cb, opaque):
global eventLoop
return eventLoop.add_timer(interval, cb, opaque)
def virEventUpdateTimerImpl(timerID, interval):
global eventLoop
return eventLoop.update_timer(timerID, interval)
def virEventRemoveTimerImpl(timerID):
global eventLoop
return eventLoop.remove_timer(timerID)
# This tells libvirt what event loop implementation it
# should use
def virEventLoopPureRegister():
libvirt.virEventRegisterImpl(virEventAddHandleImpl,
virEventUpdateHandleImpl,
virEventRemoveHandleImpl,
virEventAddTimerImpl,
virEventUpdateTimerImpl,
virEventRemoveTimerImpl)
# Directly run the event loop in the current thread
def virEventLoopPureRun():
global eventLoop
eventLoop.run_loop()
# Spawn a background thread to run the event loop
def virEventLoopPureStart():
global eventLoopThread
virEventLoopPureRegister()
eventLoopThread = threading.Thread(target=virEventLoopPureRun, name="libvirtEventLoop")
eventLoopThread.setDaemon(True)
eventLoopThread.start()
##########################################################################
# Everything that now follows is a simple demo of domain lifecycle events
##########################################################################
def eventToString(event):
eventStrings = ( "Added",
"Removed",
"Started",
"Suspended",
"Resumed",
"Stopped",
"Saved",
"Restored" );
return eventStrings[event];
def myDomainEventCallback1 (conn, dom, event, detail, opaque):
print "myDomainEventCallback1 EVENT: Domain %s(%s) %s %d" % (dom.name(), dom.ID(), eventToString(event), detail)
def myDomainEventCallback2 (conn, dom, event, detail, opaque):
print "myDomainEventCallback2 EVENT: Domain %s(%s) %s %d" % (dom.name(), dom.ID(), eventToString(event), detail)
def myDomainEventRebootCallback(conn, dom, opaque):
print "myDomainEventRebootCallback: Domain %s(%s)" % (dom.name(), dom.ID())
def myDomainEventRTCChangeCallback(conn, dom, utcoffset, opaque):
print "myDomainEventRTCChangeCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), utcoffset)
def myDomainEventWatchdogCallback(conn, dom, action, opaque):
print "myDomainEventWatchdogCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), action)
def myDomainEventIOErrorCallback(conn, dom, srcpath, devalias, action, opaque):
print "myDomainEventIOErrorCallback: Domain %s(%s) %s %s %d" % (dom.name(), dom.ID(), srcpath, devalias, action)
def myDomainEventGraphicsCallback(conn, dom, phase, localAddr, remoteAddr, authScheme, subject, opaque):
print "myDomainEventGraphicsCallback: Domain %s(%s) %d %s" % (dom.name(), dom.ID(), phase, authScheme)
def usage():
print "usage: "+os.path.basename(sys.argv[0])+" [uri]"
print " uri will default to qemu:///system"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"] )
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if len(sys.argv) > 1:
uri = sys.argv[1]
else:
uri = "qemu:///system"
print "Using uri:" + uri
# Run a background thread with the event loop
virEventLoopPureStart()
vc = libvirt.open(uri)
# Close connection on exit (to test cleanup paths)
old_exitfunc = getattr(sys, 'exitfunc', None)
def exit():
print "Closing " + str(vc)
vc.close()
if (old_exitfunc): old_exitfunc()
sys.exitfunc = exit
#Add 2 callbacks to prove this works with more than just one
vc.domainEventRegister(myDomainEventCallback1,None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, myDomainEventCallback2, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, myDomainEventRebootCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_RTC_CHANGE, myDomainEventRTCChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_IO_ERROR, myDomainEventIOErrorCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_WATCHDOG, myDomainEventWatchdogCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_GRAPHICS, myDomainEventGraphicsCallback, None)
# The rest of your app would go here normally, but for sake
# of demo we'll just go to sleep. The other option is to
# run the event loop in your main thread if your app is
# totally event based.
while 1:
time.sleep(1)
if __name__ == "__main__":
main()
|
Stunning pictures of famous views from New York on your wall. New York skyline, the The Flat Iron, New York, Central Station and the grand Central Park. New York images.
|
#!/usr/bin/env python
"""
Using navboxplus to perfectly control a motor sensed with only a cheap encoder.
Model-augmented state is: [position, velocity, drag/inertia, b/inertia, disturbance].
"""
from __future__ import division
import numpy as np; npl = np.linalg
import matplotlib.pyplot as plt
from navboxplus import NavBoxPlus
# Motor dynamics
def motor(x, u, wf, dt):
xdot = np.array([x[1],
x[4] + x[3]*u - x[2]*x[1],
0, 0, 0]) # parameters "don't change" (we assume)
xnext = x + xdot*dt + wf
if xnext[2] < 0.5: xnext[2] = 0.5 # prevent parameter drift into nonphysical
if xnext[3] < 0.5: xnext[3] = 0.5
return xnext
# Encoder model (only noise in the form of discretization)
res = 512/360 # ticks/deg
z_per_t = 20 # samples/s
def encoder(x, u, wh):
return np.floor(res*x[0])
# True noise characteristics
wf0_true = np.array([0, 0, 0, 0, 0])
Cf_true = np.diag([0, 0, 1E-3, 1E-6, 0])
# Our guesses at the dynamics and sensor noise characteristics
# We cannot express any perfect confidence
wf0 = np.zeros(5)
Cf = np.diag([1E-7, 1E-4, 1E-3, 1E-6, 1E-2]) # disturbance is not really constant
wh0 = 0
Ch = 1 # because the encoder discretization acts like noise
# Simulation time domain (also chooses predict frequency)
T = 40 # s
dt = 0.05 # s
t = np.arange(0, T, dt) # s
i_per_z = int(1/(z_per_t*dt)) # iters/sample
assert 1/z_per_t >= dt # time between samples >= sim timestep ?
# Desired trajectory
# r = [180, 0] * np.ones((len(t), 2)) # setpoint, not much excitation information
rv = 0.5
r = 15*np.vstack((np.sin(rv*t), rv*np.cos(rv*t))).T # sinusoid, good excitation
# Unknown external disturbance (tracked as a state)
dist = 8*np.ones_like(t); dist[:len(t)//2] = 0 # sudden push
# dist = 3*np.cos(2*rv*(t+2)) + 3 # sinusoid
# Controller with feedback and feedforward based on estimated model
ulims = (-50, 50)
gains = 5*np.array([1, 1])
feedback = 0; feedforward = 0 # for externally recording these quantities
def controller(r, rnext, x, Cx, dt):
global feedback, feedforward
feedback = gains.dot(r - x[:2])
feedforward = (1/x[3]) * ((rnext[1] - r[1])/dt + x[2]*r[1] - x[4])
return np.clip(feedback + feedforward, ulims[0], ulims[1])
# State, estimate, covariance, measurement, and effort timeseries
x = np.zeros((len(t), 5))
xh = np.zeros((len(t), 5))
Cx = np.zeros((len(t), 5, 5))
z = np.zeros((len(t), 1))
u = np.zeros((len(t), 1))
uff = np.zeros((len(t), 1))
# Initial conditions
x[0] = [15, 0, 5, 2, dist[0]]
xh[0] = [-15, 10, 1, 1, 0]
Cx[0] = 10*np.eye(5)
u[0] = 0
uff[0] = 0
# Configure navboxplus
# (note that we will give a "smoothed" encoder model to capture its true behavior)
nav = NavBoxPlus(x0=np.copy(xh[0]),
Cx0=np.copy(Cx[0]),
g=controller,
f=motor,
hDict={'encoder': lambda x, u, wh: res*x[0] + wh},
n_r=2,
n_wf=5,
n_whDict={'encoder': 1})
# Simulation
for i, ti in enumerate(t[1:]):
# Chose control and predict next state
try:
u[i+1] = nav.predict(r[i], r[i+1], wf0, Cf, dt)
uff[i+1] = feedforward
except npl.linalg.LinAlgError:
print("Cholesky failed in predict!")
break
# Advance true state using control
wf = np.random.multivariate_normal(wf0_true, Cf_true)
x[i+1] = motor(x[i], u[i+1], wf, dt)
x[i+1, 4] = dist[i+1] # update disturbance
# When new measurement comes in...
if i % i_per_z == 0:
# Get new measurement from real world
z[i+1] = encoder(x[i+1], 0, 0)
# Update state estimate
try:
nav.correct('encoder', z[i+1], wh0, Ch)
except npl.linalg.LinAlgError:
print("Cholesky failed in correct!")
break
# ...otherwise hold last measurement (for plotting only)
else:
z[i+1] = np.copy(z[i])
# Record new estimate
xh[i+1], Cx[i+1] = nav.get_state_and_cov()
# Just checkin...
if not nav.is_pdef(nav.Cx):
print("WOAH your state estimate covariance is not posdef, how'd that happen?\n")
print("Final state estimate covariance:")
print(np.round(nav.Cx, 3))
#### Plots
fig1 = plt.figure()
fig1.suptitle("Estimation and Tracking via Online UKF-Learned Model", fontsize=22)
ax1 = fig1.add_subplot(6, 1, 1)
ax1.plot(t[:i], x[:i, 0], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 0], label="estimate", color='k', ls=':', lw=3)
ax1.plot(t[:i], r[:i, 0], label="desired", color='r', ls='--')
ax1.set_xlim([0, ti])
ax1.set_ylabel("position\ndeg", fontsize=12)
ax1.legend(loc='upper right')
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 2)
ax1.plot(t[:i], x[:i, 1], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 1], label="estimate", color='k', ls=':', lw=3)
ax1.plot(t[:i], r[:i, 1], label="desired", color='r', ls='--')
ax1.set_xlim([0, ti])
ax1.set_ylabel("velocity\ndeg/s", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 3)
ax1.plot(t[:i], x[:i, 2], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 2], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("drag/inertia\n(deg/s^2)/(deg/s)", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 4)
ax1.plot(t[:i], x[:i, 3], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 3], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("b/inertia\n(deg/s^2)/V", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 5)
ax1.plot(t[:i], x[:i, 4], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 4], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("disturbance\ndeg/s^2", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 6)
ax1.plot(t[:i], u[:i], label="total", color='r', lw=3)
ax1.plot(t[:i], uff[:i], label="feedforward", color='b', ls='--', lw=2)
ax1.set_xlim([0, ti])
ax1.set_ylabel("effort\nV", fontsize=12)
ax1.set_xlabel("time\ns", fontsize=12)
ax1.legend(loc='upper right')
ax1.grid(True)
fig2 = plt.figure()
fig2.suptitle("Covariance Diagonals", fontsize=22)
ax2 = fig2.add_subplot(1, 1, 1)
dvs = np.array(map(np.diag, Cx[:i]))
for xi in xrange(len(x[0])):
ax2.plot(t[:i], dvs[:, xi], label="State {}".format(xi))
ax2.set_xlim([0, ti])
ax2.set_ylabel("value", fontsize=16)
ax2.set_xlabel("time\ns", fontsize=16)
ax2.legend(loc='upper right')
ax2.grid(True)
fig3 = plt.figure()
fig3.suptitle("Absolute Encoder Measurements", fontsize=22)
ax3 = fig3.add_subplot(1, 1, 1)
ax3.plot(t[:i], z[:i], color='b', lw=2)
ax3.set_xlim([0, ti])
ax3.set_ylabel("ticks", fontsize=16)
ax3.set_xlabel("time\ns", fontsize=16)
ax3.grid(True)
plt.show()
|
Kim, S., Spielberg, F., Mauksch, L., Farber, S., Duong, C., Fitch, W., & Greer, T. (2009). Comparing narrative and multiple-choice formats in online communication skill assessment. Medical Education, 43(6), 533-541.
|
# -*- coding:utf-8 -*-
"""
导出Model信息
待优化的点:加个filter过滤功能
"""
import time
import xlwt
from django.apps import apps
from django.http.response import HttpResponse
SECRET_FIELDS = ["admin_pwd", "password"]
def field_can_export(field):
"""
判断字段是否可以导出
:param field:
:return:
"""
if field in SECRET_FIELDS:
return False
else:
return True
def get_export_model(app_label, model_name):
"""
得到要导出的Model
:param app_label: app
:param model_name: model 注意大小写不敏感哦
:return: app.models.Model
"""
try:
model = apps.get_model(app_label=app_label, model_name=model_name)
return model
except Exception:
# 如果填写的信息有误,获取不到Model会报错
return None
def get_fields_verbosename(model, fields):
"""
获取字段的名字
:return:
"""
# 1. 获取到model的_meta.fields
model_fields = model._meta.fields
# 2. 获取到字段的verbose_name
fields_names = []
for field in fields:
find_field_flag = False
if "verbose_name" in field:
fields_names.append(field["verbose_name"])
find_field_flag = True
elif "manay" in field and field["many"]:
fields_names.append(field["name"])
find_field_flag = True
else:
for model_field in model_fields:
if model_field.name == field["name"]:
verbose_name = model_field.verbose_name
if verbose_name:
fields_names.append(verbose_name)
else:
fields_names.append(field["name"])
# 跳出循环
find_field_flag = True
break
if not find_field_flag:
raise Exception("没找到{}".format(field["name"]))
# 返回fields_names
return fields_names
def get_obj_fields_data(obj, fields):
"""
获取对象的各字段的值
:param obj:
:param fields:
:return:
"""
values = []
# 对每个字段进行处理
for field in fields:
# 第1步:如果这个字段不能导出,那么我们需要给它内容设置为:保密字段
if not field_can_export(field["name"]):
values.append("保密字段")
continue
# 进入下一个field
# 第2步:开始取出field的数据
# 2-1:得到field的数据,name:字段名称,如果是多对多的字段,需要传个manay
name = field["name"]
many = True if "many" in field and field["many"] else False
# 如果name中有.那么就表示是多级别的
# 比如:article.user.username, 文章用户的用户名
name_split = name.split('.')
length = len(name_split)
# 2-2: 得到第一级的值
value_levl_1 = getattr(obj, name_split[0])
if length > 1:
# 如果length大于1,就表示这个值要取几层
if many:
# 如果是多值的,那么先取出它的QuerySet,用.all()即可
value_levl_1_all = value_levl_1.all()
else:
# 不是多值的,那么把它变成列表,方便,后续迭代
value_levl_1_all = [value_levl_1]
values_list = []
for obj_i in value_levl_1_all:
v = ""
obj_i_tmp = obj_i
# v是最终要得到的值
for f in name_split[1:]:
# f是通过点号分割后的field,比如:article.user.username
try:
v = getattr(obj_i_tmp, f)
if v:
obj_i_tmp = v
except AttributeError:
# print(obj_i_tmp, f)
try:
v = obj_i_tmp.get(f, None)
if v:
obj_i_tmp = v
except Exception:
v = "---"
# 通过for 取到最后一层的field value
if v:
values_list.append(v)
# 把这个值用,连接起来【后续可能要改成可配置,默认用逗号】
if values_list:
value = ",".join(values_list)
else:
value = "---"
else:
# 如果,这个field["name"]通过点分割长度为1,那么直接取它的值
# 注意,没有点,那么就让它都是单值的,many_to_many的,name中请一定配置多级,加个点
value = value_levl_1
value = str(value)
# 把这个这个字段得到的value放入到values中
values.append(value)
# 第3步:返回这对象,这组field的值
return values
def exports_data_to_excel(data, filename=None):
"""
导出数据到excel表格中
:param data:
:param filename: 文件名
:return: response
"""
# 第1步:先创建个工作簿
wbook = xlwt.Workbook(encoding="utf-8", style_compression=0)
# 第2步:添加个工作表
wsheet = wbook.add_sheet(sheetname="导出数据")
row = 0
for line in data:
colum = 0
for value in line:
wsheet.write(row, colum, str(value))
colum += 1
row += 1
if not filename:
# 如果没有传文件名,就自动创建个
filename = "{}.xls".format(time.strftime("%Y%m%d%H%M%S"))
# 写入到文件
# wbook.save(filename_or_stream=filename)
# 写入到Response中
# 把要导出的内容写入到response中
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
wbook.save(filename_or_stream=response)
return response
def get_export_data(app_label, model_name, fields, filters=None):
"""
得到要导出的数据
:param app_label:
:param model_name:
:param fields: 字段列表
:param filters: 过滤列表
:return:
"""
# 第1步:先得到Model
model = get_export_model(app_label, model_name)
if not model:
return False
# 第2步:开始获取Model的数据
# 2-1: 先获取到满足条件的对象
objs = model.objects.all()
# 2-2:处理fields的verbose_name信息
fields_verbose_name_list = get_fields_verbosename(model=model, fields=fields)
# print(fields_verbose_name_list)
# 2-3: 处理filters信息
# [{"name": "id", flag: "__lt", value: ""}]
if isinstance(filters, list):
kwargs = {}
for _filter in filters:
filter_name = _filter["name"]
if _filter["flag"]:
filter_name += _filter["flag"]
filter_value = _filter["value"]
# 把这个过滤的字段,加入到kwargs中
kwargs[filter_name] = filter_value
objs = objs.filter(**kwargs)
data = [fields_verbose_name_list]
# 2-3:处理每个对象的数据
for obj in objs:
values = get_obj_fields_data(obj, fields)
# print(values)
data.append(values)
# 第3步:把数据写入到excel中
# print(data)
response = exports_data_to_excel(data)
return response
def test_export():
# 测试导出用户信息
app = "account"
model = "UserProfile"
fields = [
{"name": "id"},
{"name": "username", "verbose_name": "用户名"},
{"name": "nick_name", "verbose_name": "昵称"},
{"name": "last_login"},
{"name": "groups.name", "many": True, "verbose_name": "组"},
]
filters = [
{"name": "id", "flag": "__lt", "value": 15}
]
return get_export_data(app, model, fields, filters)
|
In order to get her comfortable on her bike, I took her out to a place free of Masshole drivers and the dangers of Boston streets. This weekend provided the perfect opportunity to head out to the Minuteman trail for some casual riding as well as to give her a chance to get acclimated to her new ride. The trail is located right off Mass ave and is a pretty flat ride with minimal pedestrian traffic around this time of the year.
Once we made our way through Cambridge to the start of the trail, we stopped for a quick snack of homemade blueberry & chocolate coconut rice cakes before heading down the path.
Our bikes are also now best friends.
Towards the end of the trail, Selma really wanted to see Walden Pond so we took a detour and headed over to Concord. Along the way, we passed Thoreau’s birthplace so we had to stop for some pictures. Apparently this guy is a big deal.
Once we arrived in Concord, we stopped by the Main Streets Market & Cafe to grab a bite to eat. It’s a hopping restaurant located right in the middle of downtown Concord and serves amazing coffee and pastries. We always make it a point to stop here for snacks with the cycling team whenever we come out this way. Additionally, a lot of cyclists come through here so it’s one of the only places I feel comfortable leaving my bike unlocked for an extended period of time.
By this point we were starving so we ordered egg and cheese croissant sandwiches, a poppy seed muffin and a large iced coffee to share.
The muffin was so moist and zesty.
And the croissant sandwich was fluffy, cheesy, and buttery.
Best of all, everything came to a grand total of only $12. All of this would have been ~$20 easily back in the city. Plus, people out here are really friendly and inviting. Bottom line, Concord is awesome.
Nevertheless, we took in the scenery and enjoyed some down time before the ride home.
All together, we went a little over 45 miles!!! I was so proud of her for making it all the way out and back on her very first ride. Her legs were definitely feeling it over the next few days but it was totally worth it.
|
from __future__ import absolute_import
from __future__ import print_function
import six
import rake
import operator
import io
import csv
import os
import MySQLdb
import collections
import gc
import time
from os import system
import formatter
import htmllib
import cStringIO
# Pull in chats from MySQL
db = MySQLdb.connect(host="127.0.0.1", port=3306, user="USERNAME", passwd="PASSWORD", db="DBNAME")
cursor = db.cursor()
cleanup = "DELETE FROM tablename WHERE columnname LIKE '%Text to clean up%'"
cursor.execute(cleanup)
db.commit()
print('Database cleaned of status messages')
cursor.execute("SELECT DISTINCT columnname->\"$.text\" FROM tablename")
# rows = cursor.fetchall()
rows = [item[0] for item in cursor.fetchall()]
# Clean up MySQLdb's weirdness with tuples
rows = [row.replace('"','') for row in rows]
rows = [row.replace('\n',' ') for row in rows]
# Output to a plaintext file
sqloutput = open('sqloutput.txt', 'w')
for row in rows:
sqloutput.write("%s\n" % row)
print('Printed chat messages to text file')
# Clean up HTML
print('Cleaning up HTML tags')
sqloutput = open('sqloutput.txt', 'r')
dirtytext = sqloutput.read()
outstream = cStringIO.StringIO()
parser = htmllib.HTMLParser(formatter.AbstractFormatter(formatter.DumbWriter(outstream)))
parser.feed(dirtytext)
cleantext = outstream.getvalue()
outstream.close()
print('Rewriting cleaned text back to file')
sqloutput = open('sqloutput.txt', 'w')
sqloutput.write(cleantext)
# Garbage collection so the database connections will close properly
db.close()
gc.collect()
# Chill for a bit to make sure the file is done writing
print('Thinking...')
time.sleep(5)
print('Calculationating...')
# Set the stopwords list
stoppath = "SmartStoplist.txt"
# 1. initialize RAKE by providing a path to a stopwords file
rake_object = rake.Rake(stoppath, 3, 3, 5)
# 2. run on RAKE on a given text
sample_file = io.open("sqloutput.txt", 'r',encoding="iso-8859-1")
text = sample_file.read().encode('utf-8')
keywords = rake_object.run(text)
# 3. Print results to screen
print("Keywords:", keywords)
print("----------")
# 4. Print results to CSV
print("Writing results to CSV.")
def WriteListToCSV(csv_file,csv_columns,data_list):
try:
with open(csv_file, 'w') as csvfile:
writer = csv.writer(csvfile, dialect='excel', quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(csv_columns)
for data in data_list:
writer.writerow(data)
except IOError as (errno, strerror):
print("I/O error({0}): {1}".format(errno, strerror))
return
csv_columns = ['Keyword','Score']
# Line 144 of rake.py rounds the score to 5 decimal places: word_score[item] = round(word_prescore, 5)
currentPath = os.getcwd()
csv_file = os.path.join("output","keywords.csv")
WriteListToCSV(csv_file,csv_columns,keywords)
print("Done!")
# #### More examples ####
#
# # Split text into sentences
# sentenceList = rake.split_sentences(text)
#
# # Outputs detected sentences to screen
# # for sentence in sentenceList:
# # print("Sentence:", sentence)
#
# ## Outputs detected phrases, candidates, and top 1/3rd scoring keywords to screen.
#
# # generate candidate keywords
# print(" ")
# print("----------")
# print("Phrases")
# print("----------")
# stopwordpattern = rake.build_stop_word_regex(stoppath)
# phraseList = rake.generate_candidate_keywords(sentenceList, stopwordpattern)
# for phrase in phraseList:
# # print("Phrases:", phraseList)
# print("Phrases: ", phrase)
#
# # calculate individual word scores
# wordscores = rake.calculate_word_scores(phraseList)
#
# # generate candidate keyword scores
# print(" ")
# print("----------")
# print("Candidates")
# print("----------")
# keywordcandidates = rake.generate_candidate_keyword_scores(phraseList, wordscores)
# for candidate in keywordcandidates.keys():
# print("Candidate: ", candidate, ", score: ", keywordcandidates.get(candidate))
#
# # sort candidates by score to determine top-scoring keywords
# sortedKeywords = sorted(six.iteritems(keywordcandidates), key=operator.itemgetter(1), reverse=True)
# totalKeywords = len(sortedKeywords)
#
# # for example, you could just take the top third as the final keywords
# print(" ")
# print("----------")
# print("Top Third")
# print("----------")
# for keyword in sortedKeywords[0:int(totalKeywords / 10)]:
# print("Keyword: ", keyword[0], " Score: ", keyword[1])
|
This delight detached house is located in Barnet. We were approached by the client to solve the needs of space which resulted in the expansion of the internal space to a rear extension and to create a more suitable kitchen displacement and dining space. New fittings were proposed and efficiently built on this project. Daylight efficiency was something that was missing on the existing corner of the house, so to increase this factor we proposed a roof light on the proposed extension and sliding doors to its corner, creating an interesting connection of access from the interior of the house to the outdoors. All materials used served to meet with the existing textures and characteristics of this dwelling.
|
# Copyright (C) 2012 Claudio "nex" Guarnieri (@botherder)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class SystemInfo(Signature):
name = "recon_systeminfo"
description = "Collects information on the system (ipconfig, netstat, systeminfo)"
severity = 3
categories = ["recon"]
authors = ["nex"]
minimum = "1.0"
evented = True
def on_call(self, call, process):
return self.check_argument_call(
call, pattern="(^cmd\.exe).*[(systeminfo)|(ipconfig)|(netstat)]",
name="CommandLine",
category="process",
regex=True
)
|
Excuse me while I take a break from the Artfest love, so I can show you the random weirdness we encountered yesterday during our shopping trip to Costco.
They came to Costco in this demure little putt-putt.
Yes, it's an H2 Hummer. With flags on every corner. Because toxic orange cars are so hard to pick out in the parking lot.
Oh, but the madness doesn't stop there. It stops at this house and the accompanying car.
We've seen this geodesic dome-style house before, and I like that it's such a throwback to the 70s. But I almost choked on my own spit when I saw the deLorean parked outside. All they need is the Back to the Future theme blaring from loudspeakers.
|
# encoding = utf-8
import os
import sys
import time
import datetime
import json
import jsonpath_rw
from datetime import datetime
def validate_input(helper, definition):
"""Implement your own validation logic to validate the input stanza configurations"""
# This example accesses the modular input variable
# server = definition.parameters.get('server', None)
# port = definition.parameters.get('port', None)
pass
def validate_input(helper, definition):
"""Implement your own validation logic to validate the input stanza configurations"""
# This example accesses the modular input variable
# server = definition.parameters.get('server', None)
# port = definition.parameters.get('port', None)
pass
def collect_events(helper, ew):
import datetime
import json
import jsonpath_rw
method = 'GET'
api_request = 'application/json'
api_token = helper.get_global_setting("token_")
server = helper.get_arg('server_')
port = helper.get_arg('port_')
pe_token = helper.get_arg('token_')
pe_link = helper.get_arg('puppet_enterprise_server_')
url = server + ":" + port + "/metrics/v1/mbeans/puppetlabs.puppetdb.mq%3Aname%3Dglobal.processing-time"
if pe_link:
input_source = pe_link
else:
input_source = pe_link
headers = {
'X-Authentication': pe_token,
'Content-type': api_request
}
response = helper.send_http_request(url,
method,
parameters=None,
payload=None,
headers=headers,
cookies=None,
verify=False,
cert=None,
timeout=None,
use_proxy=True)
r_status = response.status_code
response.raise_for_status()
helper.log_error (response.text)
r= response.json()
input_type = helper.get_input_type()
for stanza_name in helper.get_input_stanza_names():
data = json.dumps(r, sort_keys=False)
event = helper.new_event(source=input_source, index=helper.get_output_index(stanza_name), sourcetype=helper .get_sourcetype(stanza_name), data=data)
helper.log_error (response.text)
try:
ew.write_event(event)
helper.log_error (response.text)
except Exception as e:
raise e
return;
|
The 10th ranked Missouri Tigers won their inaugural men’s basketball game in the Southeastern Conference beating Alabama 84-68 at Mizzou Arena. Jabari Brown led the Tigers with 22 points as five players scored in double figures. Laurence Bowers left the game with five minutes to go after he landed funny on his right knee. Bowers missed all of last season with an injured left knee.
Brown shrugged off a tough shooting night on Saturday against Bucknell by hitting 7 of 11 shots from the field including 5 of 7 from three point range and 3 of 4 from the free throw line. Earnest Ross finished with 19, Bowers 16 and Phil Pressey and Alex Oriakhi finished with double-doubles. Pressey had 11 points and 13 assists. Oriakhi scored 16 with 10 rebounds.
|
on = 1;
a = -9.800000000000
print("Welcome to Global Toucan, a physics calculator.\n");
valid = "Please choose a valid option\n";
#I've never been good at commenting, if you can't understand whats going on contact me and I will help.
while(on == 1):
a = -9.800000000000000000000000
choice = raw_input("What equation would like to use?\n1. Final Velocity\n2. Distance\n3. Help\n4. List Equations\n5. Quit\n")
if(choice == "1"):
z = raw_input("What do you want to find?\n1. Final Velocity\n2. Initial Velocity\n3. Acceleration\n4. Time\n");
if(z == "1"):
vi = int(input("What is the initial velocity?\n"));
time = int(input("What is the time?\n"));
vf = (vi * 1.000000000000000000000000000000000) + (a * time)
print("Your final velocity is %s m/s" % vf);
elif(z == "2"):
vf = int(input("What is the final velocity?\n"));
time = int(input("For how long is the object travelling?\n"));
vi = (vf * 1.00000000000000000000000000) - (a * time)
print("The initial velocity is %s m/s" % vi);
elif(z == "3"):
vf = int(input("What is the final velocity?\n"));
vi = int(input("What is your initial velocity?\n"));
time = int(input("What is the time?\n"));
a = ((vf * 1.00000000000000000000 - vi) / time)
print("The acceleration is %s m/s/s" % a);
elif(z == "4"):
vf = int(input("What is the final velocity?\n"));
vi = int(input("What is the initial velocity?\n"));
time = (vf - vi) / (a * 1.0000000000000000000000000)
print("The time is %s seconds" % time);
else:
print valid
elif(choice == "2"):
choice = raw_input("What do you want to find?\n1. Distance\n2. Initial Velocity\n3. Acceleration\n4. Time\n");
# d = vi * t + (.5 * a * (t ** 2))
if(choice == "1"):
vi = int(input("What is your initial velocity?\n"))
t = int(input("What is the time?\n"))
d = vi * 1.00000000000000000 * t + (.5 * a * (t ** 2))
print("This object has travelled %s meters" % d);
elif(choice == "2"):
d = int(input("What is the distance (please remember to include a - sign if movement is negative)?\n"))
t = int(input("What is the time?\n"))
vi = (d * 1.000000000000000000 - (.5 * a * (t ** 2))) / t
print("The final velocity is %s m/s" % vi);
elif(choice == "3"):
d = int(input("What is the distance (please remember to include a - sign if movement is negative)?\n"))
vi = int(input("What is the initial velocity?\n"))
t = int(input("What is the time?\n"))
a = (d * 1.000000000000000000000000 - (vi * t)) / (.5 * (t ** 2))
print("The acceleration is %s m/s/s" % a);
elif(choice == "4"):
d = int(input("What is the distance (please remember to include a - sign if movement is negative)?\n"))
vi = int(input("What is the initial velocity?\n"))
vf = int(input("What is the final velocity?\n"))
# currently using the distance formula python won't figure it out. I'll have to put in a function later. In the
# meantime use vf and vi. vf = vi + at
t = (vf * 1.000000000000000 - vi) / a
print("The time is %s seconds" % t);
else:
print valid
elif(choice == "3"):
print("Global Toucan is a simple program written in python made to solve physics problems concerning motion.\n The calculator will only take numbers, so please no units.\nWhen calculating for directions going down please remember to include '-'. For example if your calculation involves an apple falling 50 meters one would input -50 as the distance.\nEverything will be done in seconds for time, m/s for velocity, and m/s/s for acceleration.\nAs such, please do any conversions beforehand.\nGlobal Toucan is licensed under the GPLv3 a Free Software License.\n");
raw_input("Press enter to return to the main menu.\n");
elif(choice == "4"):
print("The following are the equations used by this program:\nFinal Velocity: vf = vi + at, where vf is final velocity, a is acceleration and t is time.\nDistance: d = vi * t + .5 a * (t ^ 2), where d is distance, vi is initial velocity, t is time, a is acceleration, and t is time.\n");
raw_input("Press enter when you are ready to return to the main menu.\n");
elif(choice == "5"):
raw_input("Global Toucan will now exit, please press enter.")
on = 0;
else:
print valid
|
Morocco is pushing ahead with an ambitious strategy to grow its automotive manufacturing industry, creating a lower-cost hub for supplying nearby wealthy European markets and other regional ones.
In fact, the car industry in Morocco represents the first industrial exporting sector with more than 7 billion U.S. dollars turnover in 2017. The country is on track to reach its goal of making 10 billion U.S. dollars auto industry export turnover by 2020. The sector created some 83,845 jobs, which represents 93 percent of the goal set for 2020. An ambitious goal that can be achieved only if the government elaborates a comprehensive vocational training program as instructed by King Mohammed VI. It is worth reminding of latest royal speech made August 20th wherein the Moroccan Sovereign called on the government to carry out a thorough review of vocational training programs to align them with the needs of businesses and the public sector, and to make sure they are adapted to changes in industry and trades in general, thereby increasing graduates’ chances to access professional life.
The government should absolutely develop a new growth model that will deliver more and better jobs. But for the economy to adapt to these new sources of growth, the labour force needs to have the right skills. If young Moroccans are offered high quality training, they will certainly have much better chance to succeed.
Morocco’s automotive industry is rapidly growing due to government incentives as well as other competitive factors. The Moroccan Society of Automotive Construction (SOMACA) by doubling its production capacity will reach 160,000 vehicles per year by 2022. The production target was revealed by Renault chairman and chief executive Carlos Ghosn at a meeting with King Mohammed VI in Marrakesh today.
The twofold increase will enable the Renault group to increase its production capacity in Morocco to 500,000 vehicles per year, including 340,000 produced at the Tangier plant under the Industrial Acceleration Program.
In 2007, SOMACA had exported its first “Made in Morroco” vehicle. Since then, it has become a real exportation platform with over 60% of its production destined for export. The SOMACA extension is part of the development of the automotive sector to achieve a production capacity of one million vehicles, all manufacturers combined, with a projected turnover of 100 billion dirhams yearly.
Morocco will continue to be very attractive to potential car manufacturers especially that Investment incentives include a five-year corporate tax exemption for automotive companies setting up in Morocco, and a 25-year exemption if most production is exported. Other benefits include VAT exemptions, land purchase subsidies and rebates of up to 30% on investment cost.
Now the ball is in the government ‘s court that should elaborate a strong strategy and lay the foundations for sustainable growth. More incentives and skilled labor force will certainly be appealing to more car manufacturers.
|
"""
Compute the equilibrium wealth consumption ratio in the SSY model by first
computing the fixed point of A = phi K.
"""
from ssy_discretized_test import *
import numpy as np
default_K, default_I, default_J = 4, 4, 4
def wealth_cons_ratio(ssyd,
tol=1e-7,
init_val=1,
max_iter=1_000_000,
verbose=False):
"""
Iterate to convergence on the Koopmans operator associated with the SSY
model and then return the wealth consumption ratio.
"""
# Unpack and set up parameters EpsteinZin parameters
ψ, γ, β = ssyd.ssy.ψ, ssyd.ssy.γ, ssyd.ssy.β
θ = (1 - γ) / (1 - 1/ψ)
ζ = 1 - β
K_matrix = compute_K(ssyd)
M = ssyd.K * ssyd.I * ssyd.J
w = np.ones(M) * init_val
iter = 0
error = tol + 1
r = compute_spec_rad(K_matrix)
if verbose:
print(f"Test value = {r**(1/θ)} and θ = {θ}")
print("Beginning iteration\n\n")
while error > tol and iter < max_iter:
Tw = ζ + β * (K_matrix @ (w**θ))**(1/θ)
error = np.max(np.abs(w - Tw))
w = Tw
iter += 1
if verbose:
print(f"Iteration converged after {iter} iterations")
return w / ζ
def average_wealth_cons(ssy,
K=default_K,
I=default_I,
J=default_J,
verbose=False):
"""
Computes the mean wealth consumption ratio under the stationary
distribution pi.
"""
ssyd = discretize(ssy, K, I, J, add_x_data=True)
w = wealth_cons_ratio(ssyd, verbose=verbose)
x_mc = MarkovChain(ssyd.x_P)
x_pi = x_mc.stationary_distributions[0]
mean_w = w @ x_pi
return mean_w
|
Reserva do Ibitipoca is proud of our community intiatives. These projects encourage the inclusion of local residents in actions to enrich the local culture and rescue traditions that are disappearing. Since our founding, we have always prioritized everything local from goods, to flavor, to architecture.
One of our first actions was the creation of a Management Plan that pinpointed social and environmental concerns in the region. We rely on this plan to direct our projects, it influences the areas where we implement projects, products that should be explored, and things that should be preserved. Since our founding, we have always prioritized everything local from goods, to flavor, to architecture.
Today the employed labor is nearly totally from the region. We are proud to provide good employment opportunities for women. We participate in a number of community councils and NGOs and advocate for the community. We provide capacitation opportunities for our staff, such as English and computer courses, and have educated 200 people out of a village of 287 inhabitants. These intiatives were drivers for social change in the region, as these intiatives take place across three different municipalities and innumerous communities. With new skills, people could obtain new sources of incomes.
In addition to increasing local skills, we encourage entreapernership within the communities. We prioritize local businesses and wil pay up to 10% more for a locally produced good or service. This philosophy is why, in 2015, Reserva do Ibitipoca transferred ownership of the property to the employees. As a social enterprise, Reserva do Ibitipoca and all profits from tourism belong 100% to the staff, while the social and environmental projects are subsidized by the founder of the project.
|
import pygame, numpy
from .SceneBase import SceneBase
from .DrawingUtils import *
from widgets import Button
from models.game import Board, Move
from services import ImageService, FontService, SceneManager, SettingsService as Settings
class GameCompleted(SceneBase):
"""
This scene shows the result of a game by displaying the completed board and a message about which player won
"""
def __init__(self, game):
SceneBase.__init__(self)
# data needed to play the game
self.game = game
# calculate constants used for rendering
# (these are all done in the fixed transform space, so we can safely use constants)
self.MARGIN = 96
self.CELL_SIZE = 83
self.CELL_SPACING = 10
self.LOCAL_BOARD_SPACING = 25
self.BOARD_AREA_X = 1920 - self.MARGIN - 9*(self.CELL_SIZE + self.CELL_SPACING) - 2*self.LOCAL_BOARD_SPACING
self.BOARD_AREA_Y = self.MARGIN
self.FONT_SIZE = 48
# bounding box for the player who won
winner_box_width = 1920 - 3*self.MARGIN - self.BOARD_AREA_X
winner_box_height = self.FONT_SIZE * 3
self.WINNER_BOX = pygame.Rect(self.MARGIN, 0.5*1080 - self.MARGIN - winner_box_height, winner_box_width, winner_box_height)
# "Name" of winning player
winner = self.game.get_winner()
if winner == Board.X:
winner_name = "%s (X) wins!" % self.game.player1.name
elif winner == Board.O:
winner_name = "%s (O) wins!" % self.game.player2.name
else:
winner_name = "The Players Tie! Lame!"
self.winner_text = FontService.get_regular_font(self.FONT_SIZE)
self.winner_text_surface = self.winner_text.render(winner_name, False, Settings.theme['font'])
self.winner_text_size = self.winner_text.size(winner_name)
self.winner_text_location = (self.WINNER_BOX.centerx - 0.5 * self.winner_text_size[0],
self.WINNER_BOX.top + 0.5 * self.winner_text_size[1] + 10)
self.cell_sprites = ImageService.get_board_cell_sprites()
for key in self.cell_sprites.keys():
self.cell_sprites[key] = pygame.transform.scale(self.cell_sprites[key], (self.CELL_SIZE, self.CELL_SIZE))
# compute cell bounding boxes - Each element is a 4-tuple (left, top, right, bottom)
self.cell_locations = numpy.empty((3, 3, 3, 3), object)
for i in list(range(0, 9)):
metarow = i // 3
row = i % 3
for j in list(range(0, 9)):
metacol = j // 3
col = j % 3
# compute the location of the cell in the grid and shift it into the board area
location_x = (metacol * 3 + col)*(self.CELL_SIZE + self.CELL_SPACING) \
+ self.LOCAL_BOARD_SPACING*metacol \
+ self.BOARD_AREA_X
location_y = (metarow * 3 + row) * (self.CELL_SIZE + self.CELL_SPACING) \
+ self.LOCAL_BOARD_SPACING * metarow \
+ self.BOARD_AREA_Y
self.cell_locations[metarow][metacol][row][col] = (location_x, location_y, location_x + self.CELL_SIZE, location_y + self.CELL_SIZE)
exit_btn = Button(self.WINNER_BOX.left, 0.5*1080 + self.MARGIN,
self.WINNER_BOX.width, self.WINNER_BOX.height,
"Exit", lambda: SceneManager.go_to_main_menu(self))
self.widgets.append(exit_btn)
def process_input(self, events, pressed_keys):
for widget in self.widgets:
widget.process_input(events, pressed_keys)
def update(self):
pass
def render(self, screen):
bg = ImageService.get_game_bg()
screen.blit(bg, (0, 0))
# render the box for the winner info
if self.game.get_winner() == Board.X:
border_color = Settings.theme['primary']
elif self.game.get_winner() == Board.O:
border_color = Settings.theme['secondary']
else:
border_color = Settings.theme['widget_highlight']
# draw box
aa_border_rounded_rect(screen, self.WINNER_BOX, Settings.theme['widget_background'], border_color)
screen.blit(self.winner_text_surface, self.winner_text_location) # name of winner
# render the board
current_player_symbol = self.game.active_player.number
for i in list(range(0, 9)):
metarow = i // 3
row = i % 3
for j in list(range(0, 9)):
metacol = j // 3
col = j % 3
board_winner = self.game.board.check_cell(metarow, metacol)
cell_owner = self.game.board.check_small_cell(metarow, metacol, row, col)
move_object = Move(current_player_symbol, metarow, metacol, row, col)
# compute the location of the cell in the grid and shift it into the board area
location = self.cell_locations[metarow][metacol][row][col]
location_x, location_y = location[0], location[1]
# render the correct background for the cell:
if board_winner == Board.X :
screen.blit(self.cell_sprites['p1_won'], (location_x, location_y))
elif board_winner == Board.O:
screen.blit(self.cell_sprites['p2_won'], (location_x, location_y))
else:
screen.blit(self.cell_sprites['blank'], (location_x, location_y))
# render the cell's owner:
if cell_owner == Board.X:
screen.blit(self.cell_sprites['p1_marker'], (location_x, location_y))
elif cell_owner == Board.O:
screen.blit(self.cell_sprites['p2_marker'], (location_x, location_y))
for widget in self.widgets:
widget.render(screen)
|
This modern home in a popular Moorookyle position comprises four bedrooms, two bathrooms and two living areas. Conveniently close to transport, parkland and residents-only leisure centre - it is a smart family choice in this area. Please call us for further details and to arrange inspection.
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_rpt_builder.ui'
#
# Created: Sun Jun 08 13:58:36 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_ReportBuilder(object):
def setupUi(self, ReportBuilder):
ReportBuilder.setObjectName(_fromUtf8("ReportBuilder"))
ReportBuilder.resize(656, 523)
self.gridLayout = QtGui.QGridLayout(ReportBuilder)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.tabWidget = QtGui.QTabWidget(ReportBuilder)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.gridLayout_3 = QtGui.QGridLayout(self.tab)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.groupBox = QtGui.QGroupBox(self.tab)
self.groupBox.setTitle(_fromUtf8(""))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.comboBox = QtGui.QComboBox(self.groupBox)
self.comboBox.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.gridLayout_2.addWidget(self.comboBox, 0, 1, 1, 1)
self.label = QtGui.QLabel(self.groupBox)
self.label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.gridLayout_3.addWidget(self.groupBox, 0, 0, 1, 1)
self.groupBox_2 = QtGui.QGroupBox(self.tab)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_4 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_4.addWidget(self.label_2, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox_2)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_4.addWidget(self.label_3, 0, 2, 1, 1)
self.listWidget = QtGui.QListWidget(self.groupBox_2)
self.listWidget.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
self.listWidget.setObjectName(_fromUtf8("listWidget"))
self.gridLayout_4.addWidget(self.listWidget, 1, 0, 1, 1)
self.listWidget_2 = QtGui.QListWidget(self.groupBox_2)
self.listWidget_2.setObjectName(_fromUtf8("listWidget_2"))
self.gridLayout_4.addWidget(self.listWidget_2, 1, 2, 1, 1)
self.groupBox_3 = QtGui.QGroupBox(self.groupBox_2)
self.groupBox_3.setTitle(_fromUtf8(""))
self.groupBox_3.setFlat(True)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.btnAddField = QtGui.QPushButton(self.groupBox_3)
self.btnAddField.setText(_fromUtf8(""))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/next.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnAddField.setIcon(icon)
self.btnAddField.setObjectName(_fromUtf8("btnAddField"))
self.verticalLayout.addWidget(self.btnAddField)
self.btnRemField = QtGui.QPushButton(self.groupBox_3)
self.btnRemField.setText(_fromUtf8(""))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/previous.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnRemField.setIcon(icon1)
self.btnRemField.setObjectName(_fromUtf8("btnRemField"))
self.verticalLayout.addWidget(self.btnRemField)
self.btnAddAllFields = QtGui.QPushButton(self.groupBox_3)
self.btnAddAllFields.setText(_fromUtf8(""))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/last.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnAddAllFields.setIcon(icon2)
self.btnAddAllFields.setObjectName(_fromUtf8("btnAddAllFields"))
self.verticalLayout.addWidget(self.btnAddAllFields)
self.btnRemAllFields = QtGui.QPushButton(self.groupBox_3)
self.btnRemAllFields.setText(_fromUtf8(""))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/first.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnRemAllFields.setIcon(icon3)
self.btnRemAllFields.setObjectName(_fromUtf8("btnRemAllFields"))
self.verticalLayout.addWidget(self.btnRemAllFields)
self.gridLayout_4.addWidget(self.groupBox_3, 1, 1, 1, 1)
self.groupBox_4 = QtGui.QGroupBox(self.groupBox_2)
self.groupBox_4.setTitle(_fromUtf8(""))
self.groupBox_4.setFlat(True)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_4)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.btnRptFieldUp = QtGui.QPushButton(self.groupBox_4)
self.btnRptFieldUp.setText(_fromUtf8(""))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/down.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnRptFieldUp.setIcon(icon4)
self.btnRptFieldUp.setObjectName(_fromUtf8("btnRptFieldUp"))
self.verticalLayout_2.addWidget(self.btnRptFieldUp)
self.btnRptFieldDwn = QtGui.QPushButton(self.groupBox_4)
self.btnRptFieldDwn.setText(_fromUtf8(""))
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/up.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnRptFieldDwn.setIcon(icon5)
self.btnRptFieldDwn.setObjectName(_fromUtf8("btnRptFieldDwn"))
self.verticalLayout_2.addWidget(self.btnRptFieldDwn)
self.gridLayout_4.addWidget(self.groupBox_4, 1, 3, 1, 1)
self.gridLayout_3.addWidget(self.groupBox_2, 1, 0, 1, 1)
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.gridLayout_7 = QtGui.QGridLayout(self.tab_2)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.label_4 = QtGui.QLabel(self.tab_2)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_7.addWidget(self.label_4, 0, 0, 1, 2)
self.groupBox_5 = QtGui.QGroupBox(self.tab_2)
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.gridLayout_6 = QtGui.QGridLayout(self.groupBox_5)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.lstFields = QtGui.QListWidget(self.groupBox_5)
self.lstFields.setObjectName(_fromUtf8("lstFields"))
self.gridLayout_6.addWidget(self.lstFields, 0, 0, 1, 1)
self.lstUniqVal = QtGui.QListWidget(self.groupBox_5)
self.lstUniqVal.setObjectName(_fromUtf8("lstUniqVal"))
self.gridLayout_6.addWidget(self.lstUniqVal, 0, 1, 1, 1)
self.btnUniqVals = QtGui.QPushButton(self.groupBox_5)
self.btnUniqVals.setObjectName(_fromUtf8("btnUniqVals"))
self.gridLayout_6.addWidget(self.btnUniqVals, 1, 1, 1, 1)
self.gridLayout_7.addWidget(self.groupBox_5, 1, 0, 1, 6)
self.groupBox_6 = QtGui.QGroupBox(self.tab_2)
self.groupBox_6.setObjectName(_fromUtf8("groupBox_6"))
self.gridLayout_5 = QtGui.QGridLayout(self.groupBox_6)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.btnOpEqual = QtGui.QPushButton(self.groupBox_6)
self.btnOpEqual.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpEqual.setObjectName(_fromUtf8("btnOpEqual"))
self.gridLayout_5.addWidget(self.btnOpEqual, 0, 0, 1, 1)
self.btnOpNotEqual = QtGui.QPushButton(self.groupBox_6)
self.btnOpNotEqual.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpNotEqual.setObjectName(_fromUtf8("btnOpNotEqual"))
self.gridLayout_5.addWidget(self.btnOpNotEqual, 0, 1, 1, 1)
self.btnOpLike = QtGui.QPushButton(self.groupBox_6)
self.btnOpLike.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpLike.setObjectName(_fromUtf8("btnOpLike"))
self.gridLayout_5.addWidget(self.btnOpLike, 0, 2, 1, 1)
self.btnOpGreater = QtGui.QPushButton(self.groupBox_6)
self.btnOpGreater.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpGreater.setObjectName(_fromUtf8("btnOpGreater"))
self.gridLayout_5.addWidget(self.btnOpGreater, 1, 0, 1, 1)
self.btnOpGreaterEq = QtGui.QPushButton(self.groupBox_6)
self.btnOpGreaterEq.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpGreaterEq.setObjectName(_fromUtf8("btnOpGreaterEq"))
self.gridLayout_5.addWidget(self.btnOpGreaterEq, 1, 1, 1, 1)
self.btnOpAnd = QtGui.QPushButton(self.groupBox_6)
self.btnOpAnd.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpAnd.setObjectName(_fromUtf8("btnOpAnd"))
self.gridLayout_5.addWidget(self.btnOpAnd, 1, 2, 1, 1)
self.btnOpLess = QtGui.QPushButton(self.groupBox_6)
self.btnOpLess.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpLess.setObjectName(_fromUtf8("btnOpLess"))
self.gridLayout_5.addWidget(self.btnOpLess, 2, 0, 1, 1)
self.btnOpLess_2 = QtGui.QPushButton(self.groupBox_6)
self.btnOpLess_2.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpLess_2.setObjectName(_fromUtf8("btnOpLess_2"))
self.gridLayout_5.addWidget(self.btnOpLess_2, 2, 1, 1, 1)
self.btnOpOr = QtGui.QPushButton(self.groupBox_6)
self.btnOpOr.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpOr.setObjectName(_fromUtf8("btnOpOr"))
self.gridLayout_5.addWidget(self.btnOpOr, 2, 2, 1, 1)
self.gridLayout_7.addWidget(self.groupBox_6, 2, 0, 2, 1)
self.lblSqlEntity = QtGui.QLabel(self.tab_2)
self.lblSqlEntity.setObjectName(_fromUtf8("lblSqlEntity"))
self.gridLayout_7.addWidget(self.lblSqlEntity, 2, 1, 1, 5)
self.txtSqlParser = QtGui.QTextEdit(self.tab_2)
self.txtSqlParser.setObjectName(_fromUtf8("txtSqlParser"))
self.gridLayout_7.addWidget(self.txtSqlParser, 3, 1, 1, 5)
self.btnSQLClr = QtGui.QPushButton(self.tab_2)
self.btnSQLClr.setMinimumSize(QtCore.QSize(0, 30))
self.btnSQLClr.setObjectName(_fromUtf8("btnSQLClr"))
self.gridLayout_7.addWidget(self.btnSQLClr, 4, 1, 1, 1)
self.btnSQLVer = QtGui.QPushButton(self.tab_2)
self.btnSQLVer.setMinimumSize(QtCore.QSize(0, 30))
self.btnSQLVer.setObjectName(_fromUtf8("btnSQLVer"))
self.gridLayout_7.addWidget(self.btnSQLVer, 4, 2, 1, 1)
self.btnMap = QtGui.QPushButton(self.tab_2)
self.btnMap.setMinimumSize(QtCore.QSize(0, 30))
self.btnMap.setObjectName(_fromUtf8("btnMap"))
self.gridLayout_7.addWidget(self.btnMap, 4, 4, 1, 1)
self.btnSQLApply = QtGui.QPushButton(self.tab_2)
self.btnSQLApply.setMinimumSize(QtCore.QSize(0, 30))
self.btnSQLApply.setObjectName(_fromUtf8("btnSQLApply"))
self.gridLayout_7.addWidget(self.btnSQLApply, 4, 3, 1, 1)
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName(_fromUtf8("tab_3"))
self.gridLayout_8 = QtGui.QGridLayout(self.tab_3)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.label_5 = QtGui.QLabel(self.tab_3)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_8.addWidget(self.label_5, 0, 0, 1, 1)
self.lstRptFields = QtGui.QListWidget(self.tab_3)
self.lstRptFields.setObjectName(_fromUtf8("lstRptFields"))
self.gridLayout_8.addWidget(self.lstRptFields, 1, 0, 1, 1)
self.groupBox_7 = QtGui.QGroupBox(self.tab_3)
self.groupBox_7.setTitle(_fromUtf8(""))
self.groupBox_7.setFlat(True)
self.groupBox_7.setObjectName(_fromUtf8("groupBox_7"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox_7)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.btnAddGpField = QtGui.QPushButton(self.groupBox_7)
self.btnAddGpField.setText(_fromUtf8(""))
self.btnAddGpField.setIcon(icon)
self.btnAddGpField.setObjectName(_fromUtf8("btnAddGpField"))
self.verticalLayout_3.addWidget(self.btnAddGpField)
self.btnRemGpField = QtGui.QPushButton(self.groupBox_7)
self.btnRemGpField.setText(_fromUtf8(""))
self.btnRemGpField.setIcon(icon1)
self.btnRemGpField.setObjectName(_fromUtf8("btnRemGpField"))
self.verticalLayout_3.addWidget(self.btnRemGpField)
self.gridLayout_8.addWidget(self.groupBox_7, 1, 1, 1, 1)
self.tbGroupFields = QtGui.QTableWidget(self.tab_3)
self.tbGroupFields.setObjectName(_fromUtf8("tbGroupFields"))
self.tbGroupFields.setColumnCount(1)
self.tbGroupFields.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tbGroupFields.setHorizontalHeaderItem(0, item)
self.tbGroupFields.horizontalHeader().setDefaultSectionSize(95)
self.tbGroupFields.horizontalHeader().setStretchLastSection(True)
self.gridLayout_8.addWidget(self.tbGroupFields, 1, 2, 1, 1)
self.chIncludeGpFields = QtGui.QCheckBox(self.tab_3)
self.chIncludeGpFields.setObjectName(_fromUtf8("chIncludeGpFields"))
self.gridLayout_8.addWidget(self.chIncludeGpFields, 2, 0, 1, 1)
self.tabWidget.addTab(self.tab_3, _fromUtf8(""))
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName(_fromUtf8("tab_4"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.tab_4)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.label_6 = QtGui.QLabel(self.tab_4)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.verticalLayout_4.addWidget(self.label_6)
self.tbSortFields = QtGui.QTableWidget(self.tab_4)
self.tbSortFields.setObjectName(_fromUtf8("tbSortFields"))
self.tbSortFields.setColumnCount(3)
self.tbSortFields.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tbSortFields.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tbSortFields.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tbSortFields.setHorizontalHeaderItem(2, item)
self.tbSortFields.horizontalHeader().setDefaultSectionSize(130)
self.tbSortFields.horizontalHeader().setHighlightSections(False)
self.tbSortFields.horizontalHeader().setStretchLastSection(True)
self.verticalLayout_4.addWidget(self.tbSortFields)
self.tabWidget.addTab(self.tab_4, _fromUtf8(""))
self.tab_5 = QtGui.QWidget()
self.tab_5.setObjectName(_fromUtf8("tab_5"))
self.gridLayout_9 = QtGui.QGridLayout(self.tab_5)
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
self.label_7 = QtGui.QLabel(self.tab_5)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_9.addWidget(self.label_7, 0, 0, 1, 1)
self.trRptSettings = QtGui.QTreeWidget(self.tab_5)
self.trRptSettings.setObjectName(_fromUtf8("trRptSettings"))
item_0 = QtGui.QTreeWidgetItem(self.trRptSettings)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_0 = QtGui.QTreeWidgetItem(self.trRptSettings)
item_0 = QtGui.QTreeWidgetItem(self.trRptSettings)
self.trRptSettings.header().setVisible(False)
self.gridLayout_9.addWidget(self.trRptSettings, 1, 0, 1, 1)
self.stackedWidget = QtGui.QStackedWidget(self.tab_5)
self.stackedWidget.setObjectName(_fromUtf8("stackedWidget"))
self.gridLayout_9.addWidget(self.stackedWidget, 1, 1, 1, 1)
self.tabWidget.addTab(self.tab_5, _fromUtf8(""))
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 4)
self.btnLoad = QtGui.QPushButton(ReportBuilder)
self.btnLoad.setMinimumSize(QtCore.QSize(0, 30))
self.btnLoad.setObjectName(_fromUtf8("btnLoad"))
self.gridLayout.addWidget(self.btnLoad, 1, 0, 1, 1)
self.btnRptCancel = QtGui.QPushButton(ReportBuilder)
self.btnRptCancel.setMinimumSize(QtCore.QSize(0, 30))
self.btnRptCancel.setObjectName(_fromUtf8("btnRptCancel"))
self.gridLayout.addWidget(self.btnRptCancel, 1, 3, 1, 1)
self.btnGenRpt = QtGui.QPushButton(ReportBuilder)
self.btnGenRpt.setMinimumSize(QtCore.QSize(0, 30))
self.btnGenRpt.setObjectName(_fromUtf8("btnGenRpt"))
self.gridLayout.addWidget(self.btnGenRpt, 1, 2, 1, 1)
self.btnSave = QtGui.QPushButton(ReportBuilder)
self.btnSave.setMinimumSize(QtCore.QSize(0, 30))
self.btnSave.setObjectName(_fromUtf8("btnSave"))
self.gridLayout.addWidget(self.btnSave, 1, 1, 1, 1)
self.retranslateUi(ReportBuilder)
self.tabWidget.setCurrentIndex(0)
self.stackedWidget.setCurrentIndex(-1)
QtCore.QMetaObject.connectSlotsByName(ReportBuilder)
def retranslateUi(self, ReportBuilder):
ReportBuilder.setWindowTitle(_translate("ReportBuilder", "STDM Report Builder", None))
self.label.setText(_translate("ReportBuilder", "Entity", None))
self.groupBox_2.setTitle(_translate("ReportBuilder", "Report Contents Fields:", None))
self.label_2.setText(_translate("ReportBuilder", "Available Fields:", None))
self.label_3.setText(_translate("ReportBuilder", "Report Fields:", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("ReportBuilder", "Fields", None))
self.label_4.setText(_translate("ReportBuilder", "Enter a WHERE clause to select records that will be appended to the report.", None))
self.groupBox_5.setTitle(_translate("ReportBuilder", "Report Fields", None))
self.btnUniqVals.setText(_translate("ReportBuilder", "Get Unique Values", None))
self.groupBox_6.setTitle(_translate("ReportBuilder", "Operators:", None))
self.btnOpEqual.setText(_translate("ReportBuilder", "=", None))
self.btnOpNotEqual.setText(_translate("ReportBuilder", "<>", None))
self.btnOpLike.setText(_translate("ReportBuilder", "LIKE", None))
self.btnOpGreater.setText(_translate("ReportBuilder", ">", None))
self.btnOpGreaterEq.setText(_translate("ReportBuilder", ">=", None))
self.btnOpAnd.setText(_translate("ReportBuilder", "AND", None))
self.btnOpLess.setText(_translate("ReportBuilder", "<", None))
self.btnOpLess_2.setText(_translate("ReportBuilder", "<=", None))
self.btnOpOr.setText(_translate("ReportBuilder", "OR", None))
self.lblSqlEntity.setText(_translate("ReportBuilder", "Select * FROM [ENTITY] WHERE:", None))
self.btnSQLClr.setText(_translate("ReportBuilder", "Clear", None))
self.btnSQLVer.setText(_translate("ReportBuilder", "Verify", None))
self.btnMap.setText(_translate("ReportBuilder", "Show on Map", None))
self.btnSQLApply.setText(_translate("ReportBuilder", "Apply", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("ReportBuilder", "Filter", None))
self.label_5.setText(_translate("ReportBuilder", "Report Fields:", None))
item = self.tbGroupFields.horizontalHeaderItem(0)
item.setText(_translate("ReportBuilder", "Fields", None))
self.chIncludeGpFields.setText(_translate("ReportBuilder", "Include Group Fields", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("ReportBuilder", "Grouping", None))
self.label_6.setText(_translate("ReportBuilder", "Sort records by a maximum of three fields in either ascending or descending order.", None))
item = self.tbSortFields.horizontalHeaderItem(0)
item.setText(_translate("ReportBuilder", "Fields", None))
item = self.tbSortFields.horizontalHeaderItem(1)
item.setText(_translate("ReportBuilder", "Sort", None))
item = self.tbSortFields.horizontalHeaderItem(2)
item.setText(_translate("ReportBuilder", "Order", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("ReportBuilder", "Sorting", None))
self.label_7.setText(_translate("ReportBuilder", "Settings:", None))
self.trRptSettings.headerItem().setText(0, _translate("ReportBuilder", "1", None))
__sortingEnabled = self.trRptSettings.isSortingEnabled()
self.trRptSettings.setSortingEnabled(False)
self.trRptSettings.topLevelItem(0).setText(0, _translate("ReportBuilder", "Elements", None))
self.trRptSettings.topLevelItem(0).child(0).setText(0, _translate("ReportBuilder", "Title", None))
self.trRptSettings.topLevelItem(0).child(1).setText(0, _translate("ReportBuilder", "Subtitle", None))
self.trRptSettings.topLevelItem(0).child(2).setText(0, _translate("ReportBuilder", "Field Names", None))
self.trRptSettings.topLevelItem(0).child(3).setText(0, _translate("ReportBuilder", "Date", None))
self.trRptSettings.topLevelItem(0).child(4).setText(0, _translate("ReportBuilder", "Page Numbering", None))
self.trRptSettings.topLevelItem(1).setText(0, _translate("ReportBuilder", "Fields", None))
self.trRptSettings.topLevelItem(2).setText(0, _translate("ReportBuilder", "Groups", None))
self.trRptSettings.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate("ReportBuilder", "Display", None))
self.btnLoad.setText(_translate("ReportBuilder", "Load...", None))
self.btnRptCancel.setText(_translate("ReportBuilder", "Cancel", None))
self.btnGenRpt.setText(_translate("ReportBuilder", "Generate Report", None))
self.btnSave.setText(_translate("ReportBuilder", "Save", None))
|
(Noun) An official or authoritative book of record.
(Verb) To set down formally in writing; to record.
Aid in the form of money or necessities given to a person or persons in a state of poverty or want.
One who stands for or in the place of another or others. Holding the place of, and acting for, a larger body of persons. (Note the distinction from delegate).
Money kept back or placed on one side to meet future demands or contingencies.
1. The income of a government from all sources appropriated for the payment of public expenses.
2. Income of a private organisation from any source (especially when not directly earned).
Literally: a number of persons seated round a circular table or imagined as forming a gathering of this kind.
Often used for a gathering for discussion only, without chairman or formal agenda.
|
from django.contrib.auth.models import User
from django.db import models
from django.utils.timezone import now
import os
class Client(models.Model):
api_key = models.TextField(unique=True, blank=True)
app_name = models.TextField()
user = models.ForeignKey(User)
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u"{0}:{1}".format(self.user.username, self.app_name)
def _keygen(self, length):
alphabet = ('0123456789'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz')
junk = os.urandom(20)
key = [alphabet[ord(j) % len(alphabet)] for j in junk]
return ''.join(key)
def save(self, *args, **kwargs):
while True:
if len(self.api_key) == 0:
self.api_key = self._keygen(20)
objs = Client.objects.filter(api_key=self.api_key)
if len(objs) == 0:
return super(Client, self).save(*args, **kwargs)
else:
self.api_key = ''
class Counter(models.Model):
name = models.TextField(unique=True)
count = models.PositiveIntegerField(default=0)
last_modified = models.DateField(auto_now=True)
class Meta:
ordering = ['id']
def __unicode__(self):
return u"{0}:{1}".format(self.name, self.count)
def increment(self):
if self.last_modified < now().date():
self.count = 1
else:
self.count += 1
self.save()
def reset(self):
self.count = 0
self.save()
|
You might be fooled into thinking that Chick Downtown only sell womenswear. It's true they do sell womenswear, but there is also a Just For Him section, which caters for the man in your life ladies!
"Our core philosophy is to provide our customers with the finest brands in the world at the ABSOLUTE best pricing in the world.
chickdowntown.com is the premier online retailer for women's fashion - including apparel, handbags, accessories, and shoes.
Featuring the top designers including Vince, Elizabeth and James, Alice + Olivia, Siwy, Joe's Jeans, L.A.M.B., Current Elliott, and many more!
With an ever changing image, chickdowntown.com offers new selections from the hottest designers on a weekly basis.
Check out our sale section for the latest and greatest deals!"
|
"""
Selector
Selector decides whether a picture should be download.
"""
import copy
class Selector:
"""
Selector middleware implements a queue of selector functions. Candidates
go through a series of functions and are flitered.
"""
def __init__(self):
self._selector_queue = []
self._decisive_selector_queue = []
def add_normal_selector(self, selector_function):
"""
Add selector into queue. The selector who first is added will be the
first to take effective.
"""
self._selector_queue.append(selector_function)
def select(self, candidates):
"""
Select eligible picture from candidates.
"""
candidates_copy = copy.deepcopy(candidates)
eligible_pictures = []
for decisive_selector in self._decisive_selector_queue:
for candidate in candidates:
if decisive_selector(candidate):
eligible_pictures.append(candidate)
candidates_copy.remove(candidate)
for selector in self._selector_queue:
remove_list = []
for candidate in candidates_copy:
if not selector(candidate):
remove_list.append(candidate)
for remove_item in remove_list:
candidates_copy.remove(remove_item)
return candidates_copy + eligible_pictures
def add_decisive_selector(self, decisive_selector):
"""
Add decisive selector into queue.
Picture passing test of any decisive selector will be selected.
"""
self._decisive_selector_queue.append(decisive_selector)
|
Just about everyone in Leeds has heard of Salvo’s. Opened in 1976, this busy, buzzing trattoria run by the Dammone family has been sending out heaps of Italian and Sicilian favourites ever since. From a lengthy menu you might go for king prawns with chilli or pot-roast beef in red wine, but it’s the pasta dishes and thin, crisp pizzas that people really come to Salvo’s for. In the old days space was at a premium and queuing was a necessity, but that was all swept away in 2011, with an extension next door and a dramatically revamped design. Now, happily, you can book – though they still leave some tables for walk-ins, and anyone waiting can make use of the upstairs bar which is suitably decorated with family photos, a real-live Vespa and a working jukebox. Salvo’s got cool.
Good old Salvo's - probably the best bet on a Sunday evening in Leeds, and full of happy punters creating a good atmosphere to go with the always satisfying, uncomplicated but tasty, well-balanced and well-presented Italian cooking. In the unfortunate absence of the signature dish, Gamberoni Abruzzese, as they had run out of king prawns, we resorted to Queen scallops in the shell with a sheep's cheese base, lemon and rocket, King scallops with ham and a thick pea velouté, and fresh sardines tastily served with grilled paprika. Our disappointment about the prawns was in fact more than made up for by the fact that one of our all-time favourites, swordfish, was on the specials offer, and we jumped at the chance to sample it as it appears on the menu so rarely these days, even in fish restaurants. It was beautifully cooked and came with new potatoes, an agro-dolce vegetable mix, sultanas and pine nuts. One of the party had very good cod, probably done in the water-bath but none the worse for that, with a Sicilian caponata and garlic aïoli. Our desserts were also up to scratch - nothing too unusual, but the Amalfi lemon mousse with a lovely forest fruits sorbet, blood orange syrup and segments, and a delightfully tart chocolate ganache with proper vanilla ice cream brought our meal to a satisfying conclusion. With its friendly, efficient service and consistently enjoyable food, Salvo's could teach some fine-dining places a lesson or two.
Upon initial inspection, Salvos appears to be a friendly, family-owned, slightly classy Italian restaurant at the side of a busy road. This is just a cover. The truth is, Salvos was born to the sounds of thunder in a cobwebbed laboratory by some balding genius who had eaten too much Parmesan cheese before bed. It is a creation, not a business, designed to crush, destroy and laugh-at all the other “italian” restaurants in Headingley and if it had ears, there would be two bloody big bolts behind them. It's fancy, it's friendly and it knows it. Salvos caters to two groups of Headingley diners; people who like cricket and the parents of middle-class-white-students (or is that just one group?). A warning I wish I'd received before my first visit: this is not an eatery reserved for “sugar-daddies”, those are their daughters. The service was experienced, efficient and convivial in all the right ratios, testament again to the genius of the Salvos “creator”. The food, however, lacked the ‘spark’ that was felt elsewhere. It was as though, on that stormy night, when Dr. Salvo-stein watched the bolt of life-giving electricity pulse through the building, it didn't quite reach the kitchen. My aubergine with Ricotta and rocket was nicely smokey, but a little cold and was SCREAMING for a balsamic reduction or a citrus-y dressing to kick it up the bum. My main of traditional pesto spaghetti with pan fried greens was not-so-traditional and, instead, came with pan fried cream. A fellow diner's whitefish chowder tasted good, but looked a little like one of those Sunday morning, Baileys coloured puddles outside “Tequila” or “Tiger Tiger”. I'm exaggerating, but honestly, if I were to go again, I'd just ask for a basket of the amazing complimentary foccacia (from the Salumeria?) and a bucket of oil and vinegar. Alas, it seems we have learned nothing from those gothic tales of artificial creation; we should know not to ask too much from a beast which has been built in a lab. Perfection is more easily stumbled upon than manufactured.
|
from django.shortcuts import redirect
from django.utils.cache import add_never_cache_headers
from django.core.signing import TimestampSigner, BadSignature
from django.contrib.auth.models import User
from . import app_settings
from .utils import login, strip_token, get_user_salt
class AutomaticLoginMiddleware(object):
def process_request(self, request):
token = request.GET.get(app_settings.KEY)
if not token:
return
r = redirect(strip_token(request.get_full_path()))
try:
pk = int(token.split(':', 1)[0])
# Only change user if necessary. We strip the token in any case.
# The AnonymousUser class has no 'pk' attribute (#18093)
if getattr(request.user, 'pk', request.user.id) == pk:
return r
user = User.objects.get(pk=pk)
except (ValueError, User.DoesNotExist):
return r
try:
TimestampSigner(salt=get_user_salt(user)).unsign(
token, max_age=app_settings.MAX_AGE,
)
except BadSignature:
return r
response = self.render(
request,
user,
token,
strip_token(request.get_full_path()),
)
add_never_cache_headers(response)
return response
def render(self, request, user, token, path):
"""
Subclasses may override this behaviour.
"""
login(request, user)
return redirect(path)
|
Well, that's what my friends call me. I've been professionally in the advertising business for about 17 years. Of those, 12 as a Creative Director in advertising agencies in Latin and North America. My primary focus is the transformation of material culture through brand experiences. As a designer I understand the relevance of ergonomics, aesthetics and user experience, while as a marketing creative I recognize the importance of the unexpected and embrace the value of the cultural insights. I have a great passion for technology and innovation. Love pushing the limits of where creativity and technology meet by designing solutions that solve complex business challenges, and by connecting consumers to their products and services at a personal level.
My client experience includes LG, HP, SAP, Texaco, United Nations, NHL, Bacardi, Accenture, Danon, MetLife, Vanguard, NBC Universal, J&J, Nestlé, Time Warner Cable, Pfizer, Humana, Google, Electrolux, Clorox Group, LifeScan, Davidoff, Xerox, ADP and StartUpNY.
Digital Strategy, Design, Creative Direction, Art Direction, Advertising, Branding, Illustration, User Experience, Emerging Technologies, Team Leadership, Talent Recruiting, Team Structuring, Social Media, Information Architecture, Project Planning, Interactive Media Production, Large Scale Application Development.
|
from logging import getLogger
import numpy
from bsread.data.compression import NoCompression, BitshuffleLZ4
_logger = getLogger(__name__)
def deserialize_number(numpy_array):
"""
Return single value arrays as a scalar.
:param numpy_array: Numpy array containing a number to deserialize.
:return: Array or scalar, based on array size.
"""
if numpy_array is None:
return numpy_array
if len(numpy_array) == 1:
return numpy_array[0]
else:
return numpy_array
def deserialize_string(numpy_array):
"""
Return string that is serialized as a numpy array.
:param numpy_array: Array to deserialize (UTF-8 is assumed)
:return: String.
"""
return numpy_array.tobytes().decode()
def serialize_numpy(numpy_number, dtype=None):
"""
Serialize the provided numpy array.
:param numpy_number: Number to serialize.
:param dtype: Ignored. Here just to have a consistent interface.
:return: Numpy array.
"""
# Numpy array are already the format we are looking for.
return numpy.array([numpy_number], dtype=numpy_number.dtype)
def serialize_python_number(value, dtype):
"""
Serialize a python number by converting it into a numpy array and getting its bytes.
:param value: Value to serialize.
:param dtype: Numpy value representation.
:return: Numpy array.
"""
return numpy.array([value], dtype=dtype)
def serialize_python_string(value, dtype):
"""
Serialize string into numpy array.
:param value: Value to serialize.
:param dtype: Dtype to use (UTF-8 is assumed, use u1)
:return: Numpy array.
"""
return numpy.frombuffer(value.encode(), dtype=dtype)
def serialize_python_list(value, dtype):
"""
Convert python list into ndarray.
:param value: List to convert.
:param dtype: Ignored. Type if retrieved from the list items.
:return: Numpy array.
"""
return numpy.array(value, dtype=dtype)
# Compression string to compression provider mapping.
compression_provider_mapping = {
None: NoCompression,
"none": NoCompression,
"bitshuffle_lz4": BitshuffleLZ4
}
# Channel type to numpy dtype and serializer mapping.
# channel_type: (dtype, deserializer)
channel_type_deserializer_mapping = {
# Default value if no channel_type specified.
None: ("f8", deserialize_number),
'int8': ('i1', deserialize_number),
'uint8': ('u1', deserialize_number),
'int16': ('i2', deserialize_number),
'uint16': ('u2', deserialize_number),
'int32': ('i4', deserialize_number),
'uint32': ('u4', deserialize_number),
'int64': ('i8', deserialize_number),
'uint64': ('u8', deserialize_number),
'float32': ('f4', deserialize_number),
'float64': ('f8', deserialize_number),
'string': ('u1', deserialize_string),
'bool': ('u1', deserialize_number)
}
# Value to send to channel type and serializer mapping.
# type(value): (dtype, channel_type, serializer, shape)
channel_type_scalar_serializer_mapping = {
# Default value if no channel_type specified.
type(None): ("f8", "float64", serialize_python_number, [1]),
float: ('f8', "float64", serialize_python_number, [1]),
int: ('i8', "int64", serialize_python_number, [1]),
str: ('u1', "string", serialize_python_string, [1]),
numpy.int8: ('i1', 'int8', serialize_numpy, [1]),
numpy.uint8: ('u1', 'uint8', serialize_numpy, [1]),
numpy.int16: ('i2', 'int16', serialize_numpy, [1]),
numpy.uint16: ('u2', 'uint16', serialize_numpy, [1]),
numpy.int32: ('i4', 'int32', serialize_numpy, [1]),
numpy.uint32: ('u4', 'uint32', serialize_numpy, [1]),
numpy.int64: ('i8', 'int64', serialize_numpy, [1]),
numpy.uint64: ('u8', 'uint64', serialize_numpy, [1]),
numpy.float32: ('f4', 'float32', serialize_numpy, [1]),
numpy.float64: ('f8', 'float64', serialize_numpy, [1]),
}
|
online voucher code 80 Wpengine August 2018 What are the Benefits of Using WP Engine Coupon????
With the intro of the Internet, the entire world has altered right into a Global City. Information is passed into every edge of the world within mins. This increasing appeal provided rise to several information and also material holding sites on the net.
. Net hosting service is a service which allows the companies and people to put data and also web content on the Internet.
WordPress is the most previously owned content administration system. According to some statistics, it’s utilized by 30.6% of the top 10 million websites for material uploading as well as blog writing.
online voucher code 80 Wpengine August 2018 Just what is the function of hosting??
When the suggestion of owning website and also websites was first introduced, the situation came to be complicated. The idea was truly advantageous but to have a site, it called for special computers which can get the job done. As a result, webhosting service was presented as well as it began to use the services, without the customer requiring the needed framework called for to do the work. In this method, this idea spread.
Nowadays there are many organizing platforms like webBuilder, iPage, Hostgator, WordPress engine etc. From all these, we will be focusing on WP engine vs hostgator online voucher code 80 Wpengine August 2018 .
When it comes to providing holding for WordPress, WP engine is at the top in this area. However, the major problem with WP engine is that it doesn’t sustain any kind of various other CMS aside from WordPress.
WP engine vouchers are offered to obtain price cuts. Despite of the discount rates, WP engine is still much expensive compared to the others.
If you are worried with making use of simply WordPress, WP engine could be a great option as it is maximized for WordPress and also is specifically constructed for the function. Nonetheless many individuals don’t make use of simply WordPress as well as it could be an issue for them to utilize WP engine.
WP engine gives the customer with appealing user interfaces, simple techniques, and one-click procedures. The user interface of WP engine is truly outstanding.
While composing the content or making sites, your current information can be really essential in order to make a record. WP engine, by default, offers a 30-day back-up and also keeps the track. This truly is an excellent feature to appreciate.
Web safety and security is the primary migraine nowadays for the on-line systems. We listen to the information of data violations really much nowadays. WP engine not only provides safety and security with the security code however on top of that, it sends out an additional code to the cell phone in order to permit access to the account.
Transferring the websites to the customers is likewise feasible with WP engine. It is very easy and actually useful when doing mass work.
Below, hostgator is plainly a far better choice as it’s much, much less costly compared to the various other one. WP engine is actually a pricey one. Despite of WP engine coupon discount rate, Hostgator is more affordable. So in this element of WP engine vs hostgator, hostgator is much better.
Here hostgator has an upper side with child hosting and many other unique attributes. WP engine additionally provides several features however taking the rate into consideration, they are not enough.
A search engine is one of the most essential sources to obtain on the internet traffic to your blog site or site. Today, WordPress is the most popular and credible system which supplies 99% SEO friendly functions.
So, it doesn’t matter if you wish to develop a blog or an innovative web site, WordPress will constantly be your front runner. Because WordPress has set its benchmark for being the very best platform to create fully functional websites.
Right here we are telling you the million buck suggestions concerning just how to introduce and also avail the truly incredible advantages of WordPress. Picking the ideal holding website will certainly break the code of making the best use WordPress.
When you look around for grabbing a trusted holding source for your WordPress platform it gets a mind-wobbling experience. There are so lots of organizing carriers out there, it becomes really confusing to arrange via this mess and select up one best organizing source for you.
Picking the most appropriate WordPress hosting plan is an additional crucial factor. Off program, everybody available desires to get hold of the most effective opportunity which is not much heavy on pocket also. Wp Engine provides you wp engine coupon codes, to avail massive discount rates and also save a good-looking amount of your tough made cash.
You simply have to browse the net properly to find some real and fantastic price cuts on your organizing aircraft by the merit of wp engine promotion codes and also wp engine coupon code.
Considering that time is cash and it’s everything about conserving time. If you are conserving your time you are conserving your valuable money. We recommend you to always choose up much less time consuming and also even more satisfying remedies when it comes to picking up a Wp engine coupon code for your very own business or for someone else.
Really, Wp engine will certainly need you to pay couple of additional dollars as compared with the other holding resources which are offering you their services for some economical dollars. Most definitely, this additional quantity off will pay you off in the lengthy run. As your benefit for investment in the Wp Engine coupon, you get an unbelievable conserving of hundreds as well as hundreds of dollars yearly.
With 24/7 effective customer assistance, Wp Engine deserves your investment as compared with various other holding providers like hostgator. Normally, you need to pay to an internet designer a quantity of approx. 100bucks per hour for their job. As well as still after that, if there occurs any type of unfavorable fault on your site, you will certainly remain in loss. Because they will never ever give you with 24/7 assistance facility.
Wp Engine is running their systems with an extraordinary group of effectively educated professionals. They will certainly return to you promptly, the min you report them regarding any type of mistake or issue in your web site. This is a big relief to obtain 24/7 professionals’ support for troubleshooting your troubles quickly.
Yes, this is additionally one of several advantages of spending in Wp Engine as well as Wp Engine Coupon code as compared to spending in other hosting carriers like hostgator. If your internet site obtains hacked also after paying several bucks, clearly it is no excellent for you.
But Wp Engine has actually scaled up its safety showcases to top quality level. They have special security monitoring professionals in their team who are constantly maintaining an eye on bad intruders disrupting your site or blog.
Their continuous scanning for cyberpunks and also malware conserve you from large stress and also loss of cash. Their commitment is guaranteed by their committed services for preventing your sites from concerning 2 countless malicious internet strikes on everyday basis.
When we are speaking regarding the on-line world, its all about the time. And also below “time equals money”. Your customer will never ever favor to squander their time on your page if your website or blog site takes couple of added secs to totally load. There you shed your online site visitors resulting in massive loss of money. You could get hold of a lot more and also more online traffic to your internet site if you obtain it maximized effectively. Proper optimization of your website ensures fast tons time as well as pleased customers and inevitably the pleased you. A lot more site visitors indicate even more income and sales.
Wp Engine as well as Wp Engine coupon code offer give you with this benefit of enhanced and optimized rate generating more loan for you.
You could effortlessly see numerous promotion codes and complimentary coupon websites when you are searching the net. All those appealing offers on those discount coupons are mostly invalid.
You have to browse the right as well as authentic internet sites to locate the legitimate Wp Engine Coupon code. Now if you have to identify which site is really genuine, we recommend you focus on their testimonials. The site with even more positive testimonials is extra reputable and also that’s specifically where you will end up locating the valid and also actual Wp Engine Coupon code.
In the end, we extremely recommend you to locate the best Wp Engine coupon codes for your website. Make your on-line existence safe and obvious with a reliable organizing company.
|
from agua.config import get_btr_columns
from agua.utils import get_check_function
from agua.validators import EMPTY_VALUES
def evaluate(data, config):
result = [None] * len(config)
for i, c in enumerate(config):
column, test_column, result_column = get_btr_columns(config[i])
check_function = get_check_function(c['comparator'])
kwargs = c.get('kwargs', {})
column_result = {'attempted': 0, 'success': 0}
separator = c.get('separator')
for row in data:
r = None
if row[test_column] not in EMPTY_VALUES:
column_result['attempted'] += 1
test_value = row[test_column]
if separator:
base_values = row[column].split(separator)
else:
base_values = [row[column]]
for base_value in base_values:
r = check_function(base_value, test_value, **kwargs)
if r:
break
if r:
column_result['success'] += 1
row[result_column] = r
result[i] = column_result
return {'data': data, 'result': result}
|
Just picked her up last night. 08 Club Car Precedent. Looks like it has decently been cared for. This is my first golf cart so I have a lot to learn. Seems like she has a couple issues but hopefully nothing major.
Sorry, posted too quick to attach files.
Nice looking cart. There's lots of knowledge here to help get you through most problems you'll run across.
|
import sqlite3
"""
This module is used to obtain the name of the starting malware tested in each log file.
Malware process names are the first 14 characters of the md5, the log file name is actually the uuid.
"""
db_name = 'panda.db'
table_name = 'samples'
column1 = 'uuid'
column2 = 'filename'
column3 = 'md5'
def acquire_malware_file_dict(dir_database_path):
"""
Read the panda database file (SQLite) and returns a dictionary mapping panda log file names (uuids) to
malicious process names (md5 hashes) only the first 14 characters.
:param dir_database_path:
:return:
"""
conn = sqlite3.connect(dir_database_path + '/' + db_name)
c = conn.cursor()
uuid_md5_dict = {}
c.execute('SELECT {col1},{col2} FROM {tn}'.format(tn=table_name, col1=column1, col2=column3))
all_rows = c.fetchall()
for row in all_rows:
uuid_md5_dict[row[0]] = row[1][:14]
conn.close()
return uuid_md5_dict
def acquire_malware_file_dict_full(dir_database_path):
"""
Read the panda database file (SQLite) and returns a dictionary mapping panda log file names (uuids) to
malicious process names (md5 hashes).
:param dir_database_path:
:return:
"""
conn = sqlite3.connect(dir_database_path + '/' + db_name)
c = conn.cursor()
uuid_md5_dict = {}
c.execute('SELECT {col1},{col2} FROM {tn}'.format(tn=table_name, col1=column1, col2=column3))
all_rows = c.fetchall()
for row in all_rows:
uuid_md5_dict[row[0]] = row[1]
conn.close()
return uuid_md5_dict
|
GET YOUR ENTRY IN FOR FREE COFFEE & GOODYBAG!
Get in the mood for the 24Hour event in January and enjoy a nightride on Delvera & neighbouring trails before sunset to return to Dirtopia Café in the dark. So yes, you need lights, a good spirit and bring some friends!
Goodybag & free coffee for pre-entries!
The trail will get a different feel when the sun has set! We’ll ride some singletrack, jeeptrack and new section as well.
General info: Drinks at the finish & and riders will be timed with number boards issued at registration. Results issued, but this is a fun social event!
|
# -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
__author__ = 'henar'
import httplib
from xml.dom.minidom import parse, parseString
from urlparse import urlparse
import sys
import json
import httplib
import mimetypes
def post_multipart(host, port, selector, fields, files):
content_type, body = encode_multipart_formdata(fields, files)
h = httplib.HTTP(host, port)
h.putrequest('POST', selector)
h.putheader('content-type', content_type)
h.putheader('content-length', str(len(body)))
h.endheaders()
h.send(body)
errcode, errmsg, headers = h.getreply()
print errcode
return h.file.read()
def encode_multipart_formdata(fields, files):
LIMIT = '100'
dd = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + LIMIT)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
print files
for (filename, value) in files:
L.append('--' + LIMIT)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (filename, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + LIMIT + '--')
L.append('')
print L
body = dd.join(L)
content_type = 'multipart/form-data; boundary=%s' % LIMIT
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def __do_http_req(method, url, headers, payload):
parsed_url = urlparse(url)
con = httplib.HTTPConnection(parsed_url.netloc)
con.request(method, parsed_url.path, payload, headers)
return con.getresponse()
##
## Metod que hace el HTTP-GET
##
def get(url, headers):
return __do_http_req("GET", url, headers, None)
def delete(url, headers):
return __do_http_req("DELETE", url, headers, None)
##
## Metod que hace el HTTP-PUT
##
def __put(url, headers):
return __do_http_req("PUT", url, headers, None)
##
## Metod que hace el HTTP-POST
##
def post(url, headers, payload):
return __do_http_req("POST", url, headers, payload)
def get_token(keystone_url, tenant, user, password):
# url="%s/%s" %(keystone_url,"v2.0/tokens")
print keystone_url
headers = {'Content-Type': 'application/json',
'Accept': "application/xml"}
payload = '{"auth":{"tenantName":"' + tenant + '","passwordCredentials":{"username":"' + user + '","password":"' + password + '"}}}'
print payload
response = post(keystone_url, headers, payload)
data = response.read()
## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.
if response.status != 200:
print 'error to obtain the token ' + str(response.status)
sys.exit(1)
else:
dom = parseString(data)
try:
result = (dom.getElementsByTagName('token'))[0]
var = result.attributes["id"].value
return var
except:
print ("Error in the processing enviroment")
sys.exit(1)
def processTask(headers, taskdom):
try:
print taskdom
href = taskdom["@href"]
status = taskdom["@status"]
while status == 'RUNNING':
data1 = get_task(href, headers)
data = json.loads(data1)
status = data["@status"]
if status == 'ERROR':
error = taskdom["error"]
message = error["message"]
majorErrorCode = error["majorErrorCode"]
print "ERROR : " + message + " " + majorErrorCode
return status
except:
print "Unexpected error:", sys.exc_info()[0]
sys.exit(1)
def get_task(url, headers):
# url="%s/%s" %(keystone_url,"v2.0/tokens")
response = get(url, headers)
## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.
if response.status != 200:
print 'error to obtain the token ' + str(response.status)
sys.exit(1)
else:
data = response.read()
return data
|
I don’t believe it! I have a day to play in the kitchen so enjoy this Pin I created! I took this photo at a western-theme wedding reception. OK, I gotta go now and have some foodie fun!
|
# -- coding: utf-8 --
# encoding=utf8
import requests, os, time, random, csv
from lxml import html
from lxml.html.clean import Cleaner
url = 'http://www.cittametropolitana.mi.it/cultura/progetti/integrando/cd-online/htm/tab_riassuntiva.htm'
base_url = 'http://www.cittametropolitana.mi.it/cultura/progetti/integrando/cd-online'
cleaner = Cleaner(style=True, links=True, add_nofollow=True, page_structure=False, safe_attrs_only=False)
def main():
try:
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get(url, headers=headers)
tree = html.fromstring(r.content)
anchors = tree.xpath("//a[contains(@href, 'javascript:openBrWindow')]/@href")
with open('cittametropolitana', 'w') as f:
for anchor in anchors:
link = base_url + "/" + anchor[anchor.find("/")+1:anchor.find(".htm")+4]
r2 = requests.get(link, headers=headers)
tree2 = html.fromstring(cleaner.clean_html(r2.content))
line = "$$$".join(tree2.xpath("*//text()[normalize-space()]")).replace("\r", "###").replace("\n", "%%%").strip()
f.write(line + "\n")
except Exception as e:
print(e.__doc__)
print(e.args)
if __name__ == '__main__':
main()
|
I was caught by surprise by the question. Not because I couldn’t think of anything to be thankful for but, I was struck by another question that emerged in my mind. I must have appeared dazed to my kindly questioner because their smile turned into a puzzled look.
Thanksgiving is tomorrow. If you are on Facebook or Twitter, you’ve already noticed Thanksgiving themed posts streaming from the news feeds. Thanksgiving is taking over! In the midst of the turkey, stuffing, Black Friday and football, I think I found the real question behind Thanksgiving.
From the breath in my lungs to the love of my family and friends, where do I begin?
Is it that moment on a cool fall day when my face is warmed by the sun?
Is it for that moment when I get home from a long day and the love within washes over me?
Is it when I awake before dawn and realize I’m alive?
Is it when a friend passes through my life one last time before leaving this life behind?
Is it wrestling with my children on the living room floor?
What about the food on the table?
Is it the healing in a life of someone I love?
Is it for the calling on my life where I glimpse all of its parts masterfully woven together?
Is it for a loving letter from my mother?
The embrace of my wife?
The laughter of my children?
The chase that is my life?
The same place I begin is the same place I end.
The grace of my Savior, my brother and friend.
When I was in college, I was preoccupied with stuff. In fact, I attended college to learn how to make money to get my hands on the best kind and most expensive stuff. I wanted the nicest cloths, cars, food, vacations and toys that money could buy. I wanted to make a lot of money, pursue pleasure and comfort. Note: There is nothing wrong with nice things, money or stuff. It all comes down to how I relate to stuff, the significance I place on it and where I derive my self-worth.
Below is a video interview for Christ Chapel Bible Church Christmas 2012 sharing our stewardship journey.
Lance and Kathryn Cashion-QuickTime H.264 from Christ Chapel Bible Church on Vimeo.
In Tithing Part 2, I explained the mechanics and process of tithing from my perspective. Remember our focus should be on God through faithful stewardship. Tithing is not ‘Tipping God’; it is an act of obedience and worship that reminds us of our place in God’s economy. I can find no other instance in Scripture where God encourages us to test him except in regard to the Tithe.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cgi
import sqlite3
import time
import config
def valid(qs):
required_keys = ['title', 'comment', 'posted_by', 'localite', 'latitude', 'longitude']
return all([qs.has_key(k) for k in required_keys])
def post(title, comment, posted_by, localite, latitude, longitude):
rate = 0
created_at = int(time.time()*1000)
updated_at = created_at
sql = u'insert into posts (id, title, comment, posted_by, localite, rate, latitude, longitude, created_at, updated_at) values (null,?,?,?,?,?,?,?,?,?);'
con = sqlite3.connect(config.db_path, isolation_level=None)
con.execute(sql, (title, comment, posted_by, localite, rate, latitude, longitude, created_at, updated_at))
con.close()
if __name__ == '__main__':
import utils
qs = utils.fs2dict(cgi.FieldStorage())
if valid(qs):
keys = ['title', 'comment', 'posted_by', 'localite', 'latitude', 'longitude']
query_string = [qs[k].decode('utf-8') for k in keys]
post(*query_string)
result = '{"message": "Successfully posted!"}'
else:
result = '{"message": "Invalid query string"}'
utils.cgi_header()
print result
|
TUKWILA — On a rainy Tuesday afternoon, Chad Peters is picking up oil from the Din Tai Fung restaurant at Southcenter mall.
The SeQuential Pacific Biodiesel vacuum truck driver wears his long, salt-and-pepper hair in a ponytail to keep it out of the way, as he wheels one of the restaurant’s two plastic oil collection vats out into the chilly, spitting rain.
Peters isn’t thinking much about what is going on an hour’s drive south at the Legislature in Olympia — but his bosses are.
Here’s why: The cooking oil he is collecting into his 1,500-gallon vacuum truck will get a second life as biodiesel, an alternative fuel made partially of vegetable oil or animal fats that have a distinctly lower carbon impact than traditional diesel made solely of petroleum. That means biodiesel also doesn’t pollute as much when it’s burned.
But here’s the catch: The biodiesel-to-be that is being sucked into Peters’ truck, like much of what’s collected in Washington, will help reduce climate-wrecking carbon emissions not in the Evergreen State, but across the border in Oregon. There, lawmakers have required progressive reductions in the amount of fossil fuel allowed in gasoline and diesel, hitting a 10 percent drop by 2025.
In other words, in Oregon — as in California and British Columbia — the used cooking oil commands a hefty premium over its price in Washington, where there is no such requirement. You might say the used oil flows toward the money.
This year that may change, though. And it could be a good thing for Washington’s economy.
An hour from Olympia, a maze of industrial pipes feeding storage tanks the size of small apartment houses looms over Grays Harbor on Washington’s central Pacific coast.
This industrial facility was the second-largest biodiesel producer in the nation last year.
The Renewable Energy Group plant employs 40 people in Hoquiam, a town hard-hit by the one-two punch of a withered timber industry and the opiate addiction crisis. Here, those 40 jobs really matter.
REG and Phillips 66 have announced plans to build a similar renewable fuel plant near Bellingham, in Whatcom County, and the companies are still calculating whether to move ahead later this year.
The answer may depend on what the Washington lawmakers do about House Bill 1110, which won committee approval from majority Democrats on the House Environment and Energy Committee during just the second week of the 105-day legislative session. But some Republicans question whether the state should intervene in energy markets, suggesting consumers could pay more.
REG spokesmen say a decision on the Whatcom County plant is due later this year. It would turn out so-called renewable diesel, which it now produces from used food oils, animal-fat wastes and canola at a similar Louisiana plant and sends to West Coast markets like California.
Though REG and Phillips are basing their decision on current markets, the prospect of increased demand in Washington could nudge their decision toward a “yes” if the Washington legislation passes.
If Washington joins Oregon, California and British Columbia with a fuel-blend mandate, Hartwig said "it definitely reinforces our decision."
The low-carbon fuels standard is also being considered in a Senate bill, SB 5412, sponsored by Sen. Rebecca Saldaña, D-Seattle. It got a hearing on Wednesday in Olympia, where Ian Hill, co-founder of SeQuential, told senators Washington is losing out.
Some 17 percent of California’s diesel is biodiesel and that figure is about 7 percent in Oregon, while in Washington it’s just one-half of 1 percent, Hill said in testimony Wednesday.
Environmentalists, other biofuel producers and electric car-maker groups are backing the legislation. Keeping the fuels in Washington state would promote jobs and investments in this state, advocates say.
Some 43 percent of Washington's greenhouse gas emissions come from the transportation sector, with a large share of that from road vehicles targeted by legislation, as Rep. Joe Fitzgibbon, D-Burien, lead sponsor of HB 1110, pointed out at a hearing on his bill earlier in the month.
The legislation would create a somewhat complicated system of tradeable credits for cleaner fuels that can offset the impacts debits associated with the full life-cycle greenhouse-gas pollution associated with dirty fuels.
"It's a tough problem to solve so the solution is going to be a little bit complicated," Fitzgibbon said, adding that the system to be devised by the state Department of Ecology would be "technology neutral." In other words, the system would leave it up to fuel makers which kinds of cleaner fuel credits they buy or create.
Fitzgibbon said that fuel makers and distributors whose product does not meet fuel-blend standards could buy credits from fuel makers whose products do — with credits calculated to reflect the life-cycle carbon content or intensity of the fuels. Those earning credits could include makers of biofuels, meaning biodiesel and renewable diesel producers, as well as producers of renewable natural gas taken from landfills or farm digesters. Similarly, credits could be earned by utilities that generate electricity used for electric or plug-in hybrid cars, if they are able to show this use.
The credits and deficits system should help promote electrification of road car fleets, according to Fitzgibbon.
The oil industry has long opposed state efforts to require or mandate blend standards. But while it fended off action in Washington until this year, it lost political battles in California, Oregon and British Columbia.
Lobbyist Jessica Spiegel of the Sacramento-based Western States Petroleum Association testified against HB 1110 earlier this month, telling a committee in Olympia that consumers would pay more if a fuel standard or mandate is passed.
Spiegel did not make the same doomsday claims her industry made in California and Washington a few years ago that a fuel standard could boost fuel costs by more than a dollar per gallon.
But Spiegel cited a report late last year from industry consultant Stillwater Associates that claimed California's 2011 standard has jacked up the price of gasoline by more than 13 cents per gallon. The California Air Resources Board offered a similar estimate of 13.5 cents for gasoline and about 16 cents more for diesel.
However, neither the Stillwater Report nor the CARB calculations reflect that fuel producers may not be passing on all the extra costs. The Stillwater report suggests that gasoline prices could end up 36 cents a gallon higher in California by the time that state reaches its goal in 2030 of cutting the carbon intensity of road fuels by 20 percent.
The implication is that Washington, which has the second highest state gas tax at more than 49 cents per gallon, could see the same additional increase.
The industry also warns that fuel stocks needed to produce biofuel blends may not yet exist in sufficient quantities to let the state reach its biofuels goals on the gasoline side. Past efforts to convert cellulosic fiber from waste products for ethanol, for example, have not resulted in local production, Spiegel said in an interview.
Backers of biofuels say cost impacts at the fuel pump may be much lower for consumers, citing Oregon state statistics. And they say there are additional environmental and economic benefits if Washington-produced fuels are used closer to home.
Whatever state lawmakers decide, some parties in the oil industry may be getting reconciled to a lower-carbon future for its fuels.
BP — Phillips 66's refining rival in Whatcom County — already opened its own renewable diesel plant adjacent to its Cherry Point refinery last year. The BP plant produces biodiesel from waste or biomass products such as used cooking oil and animal fats, and its process lets the company mix the less-carbon-emitting biomix into regular diesel without a change in performance.
The moves by BP, Phillips and REG suggest that availability of renewable fuel stocks is a less than a deal-killer for the industry. REG’s Hoquiam plant, for example, imports canola oil from the Midwest. And state Department of Commerce spokesmen say fuel stocks are ample. The Union of Concerned Scientists has also put out a report showing Washington’s targets could be achieved.
State legislators are not so concerned either — at least not majority Democrats backing the Fitzgibbon bill. His proposal requires carbon intensity in road fuels be cut by 10 percent by 2028 and by 20 percent by 2035. The measure also would encourage investments in electric car and charging stations as offsets for fuel intensity.
HB 1110 received its first OK in committee last week with support from six of the seven Democrats on the environmental panel and no ayes from the GOP. One member voted against the policy and several others were neutral.
Rep. Richard DeBolt, R-Chehalis, was among the neutral votes, expressing some support for the policy going forward. DeBolt, who is the GOP point person on carbon in the House, also said he didn't think the transportation sector is moving quickly enough toward electrification.
Republicans have largely been seeking incentives rather than mandates to reach climate policy goals.
And clean-fuel critics like Todd Myers of Washington Policy Center think tank argue that a fuels mandate is not the most efficient or cost-effective way to cut carbon emissions from the road sector.
But a long list of interest groups has weighed in with support for the rule, including electric car sellers, environmentalists and biofuels makers like REG and Portland-based SeQuential, which produces some 30 million gallons of biodiesel a year after its merger last year with a Bakersfield, Calif., firm.
Like REG, SeQuential is a big player in collecting restaurant oil wastes, serving an estimated 30 percent of restaurants in Washington.
Hill, the SeQuential co-founder, said in a phone interview that the company collects used cooking oil from up and down the West Coast, and ships most of its product to consumers in Oregon, as well as to California and British Columbia. Both California and British Columbia have policies similar to Oregon’s, which makes the respective markets lucrative. But Washington state doesn’t have this policy, which is why SeQuential only ships a fraction of its biodiesel to consumers in the state, Hill said. It’s simply not that profitable.
"We will continue to operate in Washington state as a feedstock company, and we’ll continue to work hard to grow in Washington state, and be a part of the economy there, but as far as being able to supply low-carbon biodiesel fuel options in a state without a carbon value, it just won’t be feasible," said Hill. "And that’s true for the entire industry."
The same factors are why south Seattle-based General Biodiesel doesn’t sell to any consumers in the state, the company’s CEO and Chairman Jeff Haas said.
Because of the policies in place in Oregon and California — General Biodiesel’s market — it’s still better financially for the company to pay for the shipping costs required to transport the fuel to those states than it is to try to sell to companies in Washington state. Without a similar policy here, there is almost no demand from within the state, he said.
“If Washington creates a competitive environment for biofuels, I would love to sell every drop we produce in Washington,” Haas said in a telephone interview.
The fuel-blend mandate is one of several major climate proposals backed by Democratic Gov. Jay Inslee, who has indicated that climate change policies would be a top campaign priority if he decides to seek his party's presidential nomination.
The House legislation is still a long way from becoming law. It heads now to the House Transportation Committee, after which it would have to go through the Rules Committee, be passed by the full House, and then approved by the Senate in a similar succession of hearings and votes. In the Senate, the legislation still awaits an initial committee approval.
InvestigateWest is a Seattle-based nonprofit newsroom producing journalism for the common good.
Carolyn Bick is a freelance journalist based in South Seattle.
|
# This file is part of Maker Keeper Framework.
#
# Copyright (C) 2017-2018 reverendus
# Copyright (C) 2018 bargst
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
from functools import total_ordering, reduce
from decimal import *
_context = Context(prec=1000, rounding=ROUND_DOWN)
@total_ordering
class Wad:
"""Represents a number with 18 decimal places.
`Wad` implements comparison, addition, subtraction, multiplication and division operators. Comparison, addition,
subtraction and division only work with other instances of `Wad`. Multiplication works with instances
of `Wad` and `Ray` and also with `int` numbers. The result of multiplication is always a `Wad`.
`Wad`, along with `Ray`, are the two basic numeric types used by Maker contracts.
Notes:
The internal representation of `Wad` is an unbounded integer, the last 18 digits of it being treated
as decimal places. It is similar to the representation used in Maker contracts (`uint128`).
"""
def __init__(self, value):
"""Creates a new Wad number.
Args:
value: an instance of `Wad`, `Ray` or an integer. In case of an integer, the internal representation
of Maker contracts is used which means that passing `1` will create an instance of `Wad`
with a value of `0.000000000000000001'.
"""
if isinstance(value, Wad):
self.value = value.value
elif isinstance(value, Ray):
self.value = int((Decimal(value.value) // (Decimal(10)**Decimal(9))).quantize(1, context=_context))
elif isinstance(value, Rad):
self.value = int((Decimal(value.value) // (Decimal(10)**Decimal(27))).quantize(1, context=_context))
elif isinstance(value, int):
# assert(value >= 0)
self.value = value
else:
raise ArithmeticError
@classmethod
def from_number(cls, number):
# assert(number >= 0)
pwr = Decimal(10) ** 18
dec = Decimal(str(number)) * pwr
return Wad(int(dec.quantize(1, context=_context)))
def __repr__(self):
return "Wad(" + str(self.value) + ")"
def __str__(self):
tmp = str(self.value).zfill(19)
return (tmp[0:len(tmp)-18] + "." + tmp[len(tmp)-18:len(tmp)]).replace("-.", "-0.")
def __add__(self, other):
if isinstance(other, Wad):
return Wad(self.value + other.value)
else:
raise ArithmeticError
def __sub__(self, other):
if isinstance(other, Wad):
return Wad(self.value - other.value)
else:
raise ArithmeticError
def __mod__(self, other):
if isinstance(other, Wad):
return Wad(self.value % other.value)
else:
raise ArithmeticError
# z = cast((uint256(x) * y + WAD / 2) / WAD);
def __mul__(self, other):
if isinstance(other, Wad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(18))
return Wad(int(result.quantize(1, context=_context)))
elif isinstance(other, Ray):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(27))
return Wad(int(result.quantize(1, context=_context)))
elif isinstance(other, Rad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(45))
return Wad(int(result.quantize(1, context=_context)))
elif isinstance(other, int):
return Wad(int((Decimal(self.value) * Decimal(other)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __truediv__(self, other):
if isinstance(other, Wad):
return Wad(int((Decimal(self.value) * (Decimal(10) ** Decimal(18)) / Decimal(other.value)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __abs__(self):
return Wad(abs(self.value))
def __eq__(self, other):
if isinstance(other, Wad):
return self.value == other.value
else:
raise ArithmeticError
def __hash__(self):
return hash(self.value)
def __lt__(self, other):
if isinstance(other, Wad):
return self.value < other.value
else:
raise ArithmeticError
def __int__(self):
return int(self.value / 10**18)
def __float__(self):
return self.value / 10**18
def __round__(self, ndigits: int = 0):
return Wad(round(self.value, -18 + ndigits))
def __sqrt__(self):
return Wad.from_number(math.sqrt(self.__float__()))
@staticmethod
def min(*args):
"""Returns the lower of the Wad values"""
return reduce(lambda x, y: x if x < y else y, args[1:], args[0])
@staticmethod
def max(*args):
"""Returns the higher of the Wad values"""
return reduce(lambda x, y: x if x > y else y, args[1:], args[0])
@total_ordering
class Ray:
"""Represents a number with 27 decimal places.
`Ray` implements comparison, addition, subtraction, multiplication and division operators. Comparison, addition,
subtraction and division only work with other instances of `Ray`. Multiplication works with instances
of `Ray` and `Wad` and also with `int` numbers. The result of multiplication is always a `Ray`.
`Ray`, along with `Wad`, are the two basic numeric types used by Maker contracts.
Notes:
The internal representation of `Ray` is an unbounded integer, the last 27 digits of it being treated
as decimal places. It is similar to the representation used in Maker contracts (`uint128`).
"""
def __init__(self, value):
"""Creates a new Ray number.
Args:
value: an instance of `Ray`, `Wad` or an integer. In case of an integer, the internal representation
of Maker contracts is used which means that passing `1` will create an instance of `Ray`
with a value of `0.000000000000000000000000001'.
"""
if isinstance(value, Ray):
self.value = value.value
elif isinstance(value, Wad):
self.value = int((Decimal(value.value) * (Decimal(10)**Decimal(9))).quantize(1, context=_context))
elif isinstance(value, Rad):
self.value = int((Decimal(value.value) / (Decimal(10)**Decimal(18))).quantize(1, context=_context))
elif isinstance(value, int):
# assert(value >= 0)
self.value = value
else:
raise ArithmeticError
@classmethod
def from_number(cls, number):
# assert(number >= 0)
pwr = Decimal(10) ** 27
dec = Decimal(str(number)) * pwr
return Ray(int(dec.quantize(1, context=_context)))
def __repr__(self):
return "Ray(" + str(self.value) + ")"
def __str__(self):
tmp = str(self.value).zfill(28)
return (tmp[0:len(tmp)-27] + "." + tmp[len(tmp)-27:len(tmp)]).replace("-.", "-0.")
def __add__(self, other):
if isinstance(other, Ray):
return Ray(self.value + other.value)
else:
raise ArithmeticError
def __sub__(self, other):
if isinstance(other, Ray):
return Ray(self.value - other.value)
else:
raise ArithmeticError
def __mod__(self, other):
if isinstance(other, Ray):
return Ray(self.value % other.value)
else:
raise ArithmeticError
def __mul__(self, other):
if isinstance(other, Ray):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(27))
return Ray(int(result.quantize(1, context=_context)))
elif isinstance(other, Wad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(18))
return Ray(int(result.quantize(1, context=_context)))
elif isinstance(other, Rad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(45))
return Ray(int(result.quantize(1, context=_context)))
elif isinstance(other, int):
return Ray(int((Decimal(self.value) * Decimal(other)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __truediv__(self, other):
if isinstance(other, Ray):
return Ray(int((Decimal(self.value) * (Decimal(10) ** Decimal(27)) / Decimal(other.value)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __abs__(self):
return Ray(abs(self.value))
def __eq__(self, other):
if isinstance(other, Ray):
return self.value == other.value
else:
raise ArithmeticError
def __hash__(self):
return hash(self.value)
def __lt__(self, other):
if isinstance(other, Ray):
return self.value < other.value
else:
raise ArithmeticError
def __int__(self):
return int(self.value / 10**27)
def __float__(self):
return self.value / 10**27
def __round__(self, ndigits: int = 0):
return Ray(round(self.value, -27 + ndigits))
def __sqrt__(self):
return Ray.from_number(math.sqrt(self.__float__()))
@staticmethod
def min(*args):
"""Returns the lower of the Ray values"""
return reduce(lambda x, y: x if x < y else y, args[1:], args[0])
@staticmethod
def max(*args):
"""Returns the higher of the Ray values"""
return reduce(lambda x, y: x if x > y else y, args[1:], args[0])
@total_ordering
class Rad:
"""Represents a number with 45 decimal places.
`Rad` implements comparison, addition, subtraction, multiplication and division operators. Comparison, addition,
subtraction and division only work with other instances of `Rad`. Multiplication works with instances
of `Rad`, `Ray and `Wad` and also with `int` numbers. The result of multiplication is always a `Rad`.
`Rad` is rad is a new unit that exists to prevent precision loss in the core CDP engine of MCD.
Notes:
The internal representation of `Rad` is an unbounded integer, the last 45 digits of it being treated
as decimal places.
"""
def __init__(self, value):
"""Creates a new Rad number.
Args:
value: an instance of `Rad`, `Ray`, `Wad` or an integer. In case of an integer, the internal representation
of Maker contracts is used which means that passing `1` will create an instance of `Rad`
with a value of `0.000000000000000000000000000000000000000000001'.
"""
if isinstance(value, Rad):
self.value = value.value
elif isinstance(value, Ray):
self.value = int((Decimal(value.value) * (Decimal(10)**Decimal(18))).quantize(1, context=_context))
elif isinstance(value, Wad):
self.value = int((Decimal(value.value) * (Decimal(10)**Decimal(27))).quantize(1, context=_context))
elif isinstance(value, int):
# assert(value >= 0)
self.value = value
else:
raise ArithmeticError
@classmethod
def from_number(cls, number):
# assert(number >= 0)
pwr = Decimal(10) ** 45
dec = Decimal(str(number)) * pwr
return Rad(int(dec.quantize(1, context=_context)))
def __repr__(self):
return "Rad(" + str(self.value) + ")"
def __str__(self):
tmp = str(self.value).zfill(46)
return (tmp[0:len(tmp)-45] + "." + tmp[len(tmp)-45:len(tmp)]).replace("-.", "-0.")
def __add__(self, other):
if isinstance(other, Rad):
return Rad(self.value + other.value)
else:
raise ArithmeticError
def __sub__(self, other):
if isinstance(other, Rad):
return Rad(self.value - other.value)
else:
raise ArithmeticError
def __mod__(self, other):
if isinstance(other, Rad):
return Rad(self.value % other.value)
else:
raise ArithmeticError
def __mul__(self, other):
if isinstance(other, Rad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(45))
return Rad(int(result.quantize(1, context=_context)))
elif isinstance(other, Ray):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(27))
return Rad(int(result.quantize(1, context=_context)))
elif isinstance(other, Wad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(18))
return Rad(int(result.quantize(1, context=_context)))
elif isinstance(other, int):
return Rad(int((Decimal(self.value) * Decimal(other)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __truediv__(self, other):
if isinstance(other, Rad):
return Rad(int((Decimal(self.value) * (Decimal(10) ** Decimal(45)) / Decimal(other.value)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __abs__(self):
return Rad(abs(self.value))
def __eq__(self, other):
if isinstance(other, Rad):
return self.value == other.value
else:
raise ArithmeticError
def __hash__(self):
return hash(self.value)
def __lt__(self, other):
if isinstance(other, Rad):
return self.value < other.value
else:
raise ArithmeticError
def __int__(self):
return int(self.value / 10**45)
def __float__(self):
return self.value / 10**45
def __round__(self, ndigits: int = 0):
return Rad(round(self.value, -45 + ndigits))
def __sqrt__(self):
return Rad.from_number(math.sqrt(self.__float__()))
@staticmethod
def min(*args):
"""Returns the lower of the Rad values"""
return reduce(lambda x, y: x if x < y else y, args[1:], args[0])
@staticmethod
def max(*args):
"""Returns the higher of the Rad values"""
return reduce(lambda x, y: x if x > y else y, args[1:], args[0])
|
This is a proven natural remedy for gastric swellings and intestinal gas (excessive belching, bloating and flatulence). It is also recommended for people suffering from nervous tension.
Soak Mint into white wine and let it rest for 12 hours, then pour it in a saucepan and heat it to the boiling point. Remove from heat, cover the saucepan and let it cool. Drink two cups a day: a cup of tea in the morning on an empty stomach, and a cup in the evening, before going to bed.
|
from django.utils.termcolors import colorize
class Reporter:
"""Store reports and render them on demand."""
ERROR = 1
WARNING = 2
NOTICE = 3
LEVEL_LABEL = {
ERROR: 'errors',
WARNING: 'warnings',
NOTICE: 'notices',
}
def __init__(self, verbosity):
self.verbosity = verbosity
self._reports = {
self.ERROR: {},
self.WARNING: {},
self.NOTICE: {}
}
def compile(self):
lines = []
def write(text, **kwargs):
lines.append(colorize(text=text, **kwargs))
if self._reports:
write('{space}Reports{space}'.format(space=' '*32), bg='blue',
fg='white')
for level, reports in self._reports.items():
if reports:
write(self.LEVEL_LABEL[level].title())
for msg, data in reports.items():
write('- {} ({})'.format(msg, len(data)))
if self.verbosity >= level:
for item in data:
fg = 'red' if level == self.ERROR else 'white'
write(' . {}'.format(item), fg=fg)
return lines
def __str__(self):
return '\n'.join(self.compile())
def _report(self, level, msg, data):
self._reports[level].setdefault(msg, [])
self._reports[level][msg].append(data)
def error(self, msg, data):
self._report(self.ERROR, msg, data)
def warning(self, msg, data):
self._report(self.WARNING, msg, data)
def notice(self, msg, data):
self._report(self.NOTICE, msg, data)
def has_errors(self):
return bool(self._reports[self.ERROR])
|
Light Shade is a photograph by Misentropy which was uploaded on July 4th, 2016.
There are no comments for Light Shade. Click here to post the first comment.
|
def Bfunc(number,k):
if( (number & (1<<k)) ==0 ): return False
return True
def gcd2(num1,num2):
n1 = max(num1,num2)
n2 = min(num1,num2)
while n2!=0:
n = n2
n2 = n1%n2
n1 = n
return n1
def gcdn(num_lst):
if(len(num_lst)==0): return 0
if(len(num_lst)==1): return num_lst[0]
start = gcd2(num_lst[0],num_lst[1])
for num in num_lst[2:]:
start = gcd2(num,start)
return start
def revealPrimes(secrets):
num_users = len(secrets)
tmp = []
for i in xrange(1,1<<num_users):
tmp.append((bin(i).count("1"),i))
tmp.sort()
tmp.reverse()
primes = []
for numb in tmp:
set_id = numb[1]
numbers_in_set = []
for i in xrange(1,num_users+1):
if(Bfunc(set_id,num_users-i)):
numbers_in_set.append(i)
#print set_id,numbers_in_set
tmp_lst = []
for num in numbers_in_set:
tmp_lst.append(secrets[num-1])
gcd_nums = gcdn(tmp_lst)
primes.append(gcd_nums)
for num in numbers_in_set:
secrets[num-1] = secrets[num-1]/gcd_nums
return primes
|
Sale Pending. ARROWHEAD is a fully-loaded, virtually new Cutwater 302 Coupe with only 25 total engine hours. Complete with trailer, she comes with new boat warranties and every option available. The Cutwater 302 Coupe packs more into her 30’ hull than any other boat – bow and stern thrusters, air conditioning and heat, generator, cockpit grill and wet bar, ice maker, wine fridge, satellite TV, full galley, stall shower, aft steering station and comfortable sleeping for six. She is easily trailerable for last-minute weekend adventures. Due to a change in the owner’s plans she is now available at a significant discount from what he paid less than a year ago. She is the newest Cutwater 302 on the market in the USA and ready to be sold!
|
import re
from warnings import warn
from .errors import MalformedHeaderError, UnexpectedFoldingError
from .util import ascii_splitlines
def scan_string(s, **kwargs):
"""
Scan a string for RFC 822-style header fields and return a generator of
``(name, value)`` pairs for each header field in the input, plus a ``(None,
body)`` pair representing the body (if any) after the header section.
See `scan()` for more information on the exact behavior of the scanner.
:param s: a string which will be broken into lines on CR, LF, and CR LF
boundaries and passed to `scan()`
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of pairs of strings
:raises ScannerError: if the header section is malformed
"""
return scan(ascii_splitlines(s), **kwargs)
def scan_file(fp, **kwargs):
"""
Scan a file for RFC 822-style header fields and return a generator of
``(name, value)`` pairs for each header field in the input, plus a ``(None,
body)`` pair representing the body (if any) after the header section.
See `scan()` for more information on the exact behavior of the scanner.
.. deprecated:: 0.4.0
Use `scan()` instead.
:param fp: A file-like object than can be iterated over to produce lines to
pass to `scan()`. Opening the file in universal newlines mode is
recommended.
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of pairs of strings
:raises ScannerError: if the header section is malformed
"""
warn("scan_file() is deprecated. Use scan() instead.", DeprecationWarning)
return scan(fp, **kwargs)
def scan_lines(fp, **kwargs):
"""
Scan an iterable of lines for RFC 822-style header fields and return a
generator of ``(name, value)`` pairs for each header field in the input,
plus a ``(None, body)`` pair representing the body (if any) after the
header section.
See `scan()` for more information on the exact behavior of the scanner.
.. deprecated:: 0.4.0
Use `scan()` instead.
:param iterable: an iterable of strings representing lines of input
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of pairs of strings
:raises ScannerError: if the header section is malformed
"""
warn("scan_lines() is deprecated. Use scan() instead.", DeprecationWarning)
return scan(fp, **kwargs)
def scan(iterable, **kwargs):
"""
.. versionadded:: 0.4.0
Scan a text-file-like object or iterable of lines for RFC 822-style header
fields and return a generator of ``(name, value)`` pairs for each header
field in the input, plus a ``(None, body)`` pair representing the body (if
any) after the header section.
All lines after the first blank line are concatenated & yielded as-is in a
``(None, body)`` pair. (Note that body lines which do not end with a line
terminator will not have one appended.) If there is no empty line in
``iterable``, then no body pair is yielded. If the empty line is the last
line in ``iterable``, the body will be the empty string. If the empty line
is the *first* line in ``iterable`` and the ``skip_leading_newlines``
option is false (the default), then all other lines will be treated as part
of the body and will not be scanned for header fields.
:param iterable: a text-file-like object or iterable of strings
representing lines of input
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of pairs of strings
:raises ScannerError: if the header section is malformed
"""
lineiter = iter(iterable)
for name, value in _scan_next_stanza(lineiter, **kwargs):
if name is not None:
yield (name, value)
elif value:
yield (None, "".join(lineiter))
def scan_next_stanza(iterator, **kwargs):
"""
.. versionadded:: 0.4.0
Scan a text-file-like object or iterator of lines for RFC 822-style header
fields and return a generator of ``(name, value)`` pairs for each header
field in the input. Input processing stops as soon as a blank line is
encountered, leaving the rest of the iterator unconsumed (If
``skip_leading_newlines`` is true, the function only stops on a blank line
after a non-blank line).
:param iterator: a text-file-like object or iterator of strings
representing lines of input
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of pairs of strings
:raises ScannerError: if the header section is malformed
"""
for name, value in _scan_next_stanza(iterator, **kwargs):
if name is not None:
yield (name, value)
def _scan_next_stanza(
iterator,
separator_regex=re.compile(r"[ \t]*:[ \t]*"), # noqa: B008
skip_leading_newlines=False,
):
"""
.. versionadded:: 0.4.0
Like `scan_next_stanza()`, except it additionally yields as its last item a
``(None, flag)`` pair where ``flag`` is `True` iff the stanza was
terminated by a blank line (thereby suggesting there is more input left to
process), `False` iff the stanza was terminated by EOF.
This is the core function that all other scanners ultimately call.
"""
name = None
value = ""
begun = False
more_left = False
if not hasattr(separator_regex, "match"):
separator_regex = re.compile(separator_regex)
for line in iterator:
line = line.rstrip("\r\n")
if line.startswith((" ", "\t")):
begun = True
if name is not None:
value += "\n" + line
else:
raise UnexpectedFoldingError(line)
else:
m = separator_regex.search(line)
if m:
begun = True
if name is not None:
yield (name, value)
name = line[: m.start()]
value = line[m.end() :]
elif line == "":
if skip_leading_newlines and not begun:
continue
else:
more_left = True
break
else:
raise MalformedHeaderError(line)
if name is not None:
yield (name, value)
yield (None, more_left)
def scan_next_stanza_string(s, **kwargs):
"""
.. versionadded:: 0.4.0
Scan a string for RFC 822-style header fields and return a pair ``(fields,
extra)`` where ``fields`` is a list of ``(name, value)`` pairs for each
header field in the input up to the first blank line and ``extra`` is
everything after the first blank line (If ``skip_leading_newlines`` is
true, the dividing point is instead the first blank line after a non-blank
line); if there is no appropriate blank line in the input, ``extra`` is the
empty string.
:param s: a string to scan
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: pair of a list of pairs of strings and a string
:raises ScannerError: if the header section is malformed
"""
lineiter = iter(ascii_splitlines(s))
fields = list(scan_next_stanza(lineiter, **kwargs))
body = "".join(lineiter)
return (fields, body)
def scan_stanzas(iterable, **kwargs):
"""
.. versionadded:: 0.4.0
Scan a text-file-like object or iterable of lines for zero or more stanzas
of RFC 822-style header fields and return a generator of lists of ``(name,
value)`` pairs, where each list represents a stanza of header fields in the
input.
The stanzas are terminated by blank lines. Consecutive blank lines between
stanzas are treated as a single blank line. Blank lines at the end of the
input are discarded without creating a new stanza.
:param iterable: a text-file-like object or iterable of strings
representing lines of input
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of lists of pairs of strings
:raises ScannerError: if the header section is malformed
"""
lineiter = iter(iterable)
while True:
fields = list(_scan_next_stanza(lineiter, **kwargs))
more_left = fields.pop()[1]
if fields or more_left:
yield fields
else:
break
kwargs["skip_leading_newlines"] = True
def scan_stanzas_string(s, **kwargs):
"""
.. versionadded:: 0.4.0
Scan a string for zero or more stanzas of RFC 822-style header fields and
return a generator of lists of ``(name, value)`` pairs, where each list
represents a stanza of header fields in the input.
The stanzas are terminated by blank lines. Consecutive blank lines between
stanzas are treated as a single blank line. Blank lines at the end of the
input are discarded without creating a new stanza.
:param s: a string which will be broken into lines on CR, LF, and CR LF
boundaries and passed to `scan_stanzas()`
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of lists of pairs of strings
:raises ScannerError: if the header section is malformed
"""
return scan_stanzas(ascii_splitlines(s), **kwargs)
|
Book your room in our brand new bed & breakfast in the heart of the quaint village of Cold Spring, NY.
Carved mahogany queen beds and stained glass windows with a spectacular view. The bathrooms offer a walk-in shower with a ceramic tile wall, with a personalized heating and cooling system.
|
import os
import json
import sqlite3
import functools
import itertools
import shutil
from os.path import abspath
import logging
from gpc import hexdigest
logger = logging.getLogger(__name__)
class DatabaseError(Exception): pass
class Database(object):
"""Class to interact with the plain text-backed sqlite database."""
def __init__(self, path):
"""Open a database"""
super(Database, self).__init__()
path = abspath(path)
schema_script = Database._get_schema_script(path)
self._data_dir = Database._get_data_dir(path)
def data_statements():
for file in os.listdir(self._data_dir):
stmt_path = os.path.join(self._data_dir, file)
with open(stmt_path, 'r') as f:
sql = f.read()
yield sql
self._conn = sqlite3.connect(':memory:')
with self._conn as conn:
conn.executescript(schema_script)
for stmt in data_statements():
conn.execute(stmt)
@staticmethod
def _get_data_dir(path):
return abspath(os.path.join(path, 'data'))
@staticmethod
def _get_schema_script(path):
with open(Database._get_schema_path(path), 'r') as f:
return f.read()
@staticmethod
def _get_schema_path(path):
return abspath(os.path.join(path, 'schema.sql'))
def write(self):
statements = self._conn.iterdump()
def should_be_saved(stmt):
return stmt.startswith('INSERT')
for stmt in filter(should_be_saved, statements):
digest = hexdigest(stmt)
path = os.path.join(self._data_dir, digest)
if not os.path.exists(path):
with open(path, 'w') as file:
file.write(stmt)
file.write('\n')
def __enter__(self):
return self._conn.__enter__()
def __exit__(self, *args, **kwargs):
self._conn.__exit__(*args, **kwargs)
def execute(self, *args, **kwargs):
return self._conn.execute(*args, **kwargs)
def executemany(self, *args, **kwargs):
return self._conn.executemany(*args, **kwargs)
def executescript(self, *args, **kwargs):
return self._conn.executescript(*args, **kwargs)
@classmethod
def create(cls, path, schema):
"""
Create a new database
Raises:
DatabaseError: If path exists.
"""
path = abspath(path)
if os.path.exists(path):
raise DatabaseError('Path must not exist when creating database!')
os.makedirs(Database._get_data_dir(path))
with open(Database._get_schema_path(path), 'w') as f:
f.write(schema)
# Test it
try:
db = Database(path)
except Exception as e:
shutil.rmtree(path)
raise e
|
The Trinity College Dublin Study Abroad Excellence Scholarship is awarded to a semester or year study abroad student who demonstrates academic excellence in their area of study and conveys how study abroad at Trinity will aid them in achieving their academic and personal goals.
Applicants must apply to study abroad at Trinity College Dublin on a semester or academic year basis. Applicants must be fee-paying, as exchange students are not eligible for scholarships. They must meet the minimum requirements for admission, noting especially the minimum GPA of 3.3. Applications to Study Abroad in Trinity can be made here.
Students must submit a personal statement of 750 words and an unofficial transcript to study.abroad@tcd.ie by the deadline, with the email subject line of ‘TCD Global Study Abroad Excellence Scholarship’. Students must also include their name, home institution, area of study, study abroad provider (if applicable), and confirm that they are not coming to Trinity via exchange.
Students will be selected based on their academic performance to date at their home university. Performance in their major and minor subjects will be particularly relevant to their application, as will be the student’s personal statement. Applications which include personal statements of more than 750 words, as well as incomplete applications, will not be considered.
Awardees will need to be enrolled in at least 25 ECTS at Trinity. They will need to open an Irish bank account in order to receive the scholarship, and will be required to provide receipt based evidence that the scholarship was used for tuition, room, board or airfare.
Awardees will participate in the Trinity Study Abroad blogger programme. Students in the programme are expected to write three blogs per semester for the World of Trinity blog. Further details on deadlines, length and blog criteria will be shared at the time of the award announcement.
|
#coding=utf-8
import urllib.request
import re
import os
import sys
import threading
import datetime
import pickle
import time
import MailService
begURL = 'http://tieba.baidu.com/f?'
PATH_DOWNLOAD_CACHE = sys.path[0]+'\\dlcache\\'
GV_DOWNLOAD_ALL = []
GV_THEAD_COUNT = 4
page = 0
x=0
max_page = 0
sum = 0
pocessList=[]
def setupfiles():
if os.path.exists('result.txt') == False:
f = open('result.txt','w')
if os.path.exists('result_add') == False:
f = open('result_add','w')
def getHtml(url):
page = urllib.request.urlopen(url)
html = page.read()
return html
def getTitle(html):
# <a href="/p/4745088342" title="DDD" target="_blank" class="j_th_tit ">DDDD</a>
reg = r"<a href=\"/p/.*?class=\"j_th_tit \">.*?</a>"
imgre = re.compile(reg)
titlelist = re.findall(imgre,html)
t=1
dstr = '\r\n\t\t'
for dta in titlelist:
k = re.sub("<a href=\"/p/.*?class=\"j_th_tit \">","",dta)
k = re.sub("</a>","",k)
#print('\t',k.encode('utf-8'))
dstr = dstr + '\r\n\t\t' + k
t+=1
return t,dstr
def savetofile(data,path):
f = open(path,'wb')
f.write(data.encode('gb18030'))
f.close()
def downloadPage(psum,count,beg=0):
x=beg
page = x*50
GV_DOWNLOAD_ALL.append(False)
while x < psum:
#os.system('cls')
print('>>>>>thead '+str(count)+':now downloading page[',str(x + 1)+'/'+str(psum),']')
html = getHtml(begURL + str(page))
pocessList.append(html)
x += 1
page +=50
print('[thead'+str(count)+']<<<<<All pages downloaded!')
GV_DOWNLOAD_ALL[count-1] = True
def pocessDataList(GV_COUNT):
titlesum = 0
titlelist = ''
count = 0
dstr = '0x0'
m = 0
NO_OUT = True
while NO_OUT:
if( len(pocessList) > 0 ):
count += 1
print('>>>>>now pocess page[',count,'],------[',titlesum,']pieces of data in all')
m , dstr= getTitle(pocessList[0].decode('utf-8','ignore'))
del pocessList[0]
titlelist += dstr
titlesum += m
x = 0
for item in GV_DOWNLOAD_ALL:
if item == True:
x += 1
if x == GV_COUNT:
NO_OUT = False
break
return titlesum,titlelist
setupfiles()
os.system('clear')
print('>>>>> This script used to download data from Tieba\n>>>>>by Kanch kanchisme@gmail.com')
isize = os.path.getsize('result.txt')
if isize > 10:
f = open('result_add','rb')
xs = pickle.load(f)
f.close()
print('>>>>>data dectecrd\n\t>>>size:'+str(isize)+' bytes,with '+str(xs['sum'])+' pieeces of data,created on:'+str(xs['time']) +'\n')
opt = input('\r\n>>>>>Would you like to set the Tieba with script going to collect?(if not,script will collect CUIT ba)(Y/N):____\b\b')
if opt == 'Y':
tieba_name = input('>>>>>please enter the name you wish to collect:______________________\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b')
print('>>>>>script will collect [SET NO SHOW ]!')
else:
tieba_name = '成都信息工程大学'
print('>>>>>no settleed Tieba,collect CUIT defaultly')
KWD = urllib.parse.urlencode({'kw':tieba_name})
begURL = begURL + KWD + '&ie=utf-8&pn='
max_page = input('>>>>>how many page you wish to collect?:______\b\b\b\b\b')
TC = input('how many theads you\'d like to run?____\b\b\b')
GV_THEAD_COUNT = int(TC)
mstr = "============================================================\r\nRESULT\r\n============================================================="
createdtime = datetime.datetime.now()
createdtime.strftime('%Y-%m-%d %H:%M:%S')
time1 = time.time()
#下面是多线程方案
MAX_PAGE = int(max_page)
#创建线程
t = []
x = 0
deltaX = MAX_PAGE / GV_THEAD_COUNT
BEG = 0
END = deltaX
while x < GV_THEAD_COUNT:
tn = threading.Thread(target=downloadPage,args=(int(END),x+1,int(BEG),))
t.append(tn)
x += 1
BEG += deltaX
END += deltaX
for item in t:
item.setDaemon(True)
item.start()
#循环处理数据
sum,mstr = pocessDataList(GV_THEAD_COUNT)
#===================================全部处理完毕,储存至文件======================================
now = datetime.datetime.now()
now.strftime('%Y-%m-%d %H:%M:%S')
last_data_source = {'sum':sum,'time':now}
savetofile(mstr,'result.txt')
f = open('result_add','wb')
pickle.dump(last_data_source, f,2)
f.close()
time2 = time.time()
tc = time2 - time1
print('>>>>>Collect Success,total time cost:',str(tc),'sec\n>>>>>total data collect[',sum,']\n>>>>>result save to ','result.txt')
Title = "Download Success! Finised on " + str(now) + '.'
line1 = "Tieba job created on " + str(createdtime) + " now has been finised!\r\n=========================\r\nSummary\r\n\r\n"
line2 = "\r\nJob Created on: \t"+str(createdtime)+'\r\nJob finished on: \t'+str(now) +"\r\nPieces of data retrived: " + str(sum) +"\r\nTotal time cost: \t" + str(tc) + " seconds"
line3 = "\r\n\r\n\r\n This mail is send by Kanch's PythonBot @ 216.45.55.153\r\n=========================\r\n"
Content = line1 + line2 + line3
#print(Title,'\r\n',Content)
MailService.SendMail('james0121@vip.qq.com',Title,Content)
|
Enjoy this brand new townhouse in a private rear section. Three bedrooms, open plan living area capturing plenty of sun. Attached double garage with internal access and good parking.
Your own paradise in town!
SUNNY WARM & EASY CARE!
Sunny three bedroom townhouse recently remodeled including new kitchen and re carpeted throughout. Private easy care grounds. Tandem garaging with workshop space.
|
import scipy.linalg as LA
import numpy as np
from pySDC.Sweeper import sweeper
class generic_LU(sweeper):
"""
Custom sweeper class, implements Sweeper.py
LU sweeper using LU decomposition of the Q matrix for the base integrator
Attributes:
Qd: U^T of Q^T = L*U
"""
def __init__(self,params):
"""
Initialization routine for the custom sweeper
Args:
coll: collocation object
"""
# call parent's initialization routine
super(generic_LU,self).__init__(params)
# LU integration matrix
self.Qd = self.__get_Qd()
pass
def __get_Qd(self):
"""
Compute LU decomposition of Q^T
Returns:
Qd: U^T of Q^T = L*U
"""
# strip Qmat by initial value u0
QT = self.coll.Qmat[1:,1:].T
# do LU decomposition of QT
[P,L,U] = LA.lu(QT,overwrite_a=True)
# enrich QT by initial value u0
Qd = np.zeros(np.shape(self.coll.Qmat))
Qd[1:,1:] = U.T
return Qd
def integrate(self):
"""
Integrates the right-hand side
Returns:
list of dtype_u: containing the integral as values
"""
# get current level and problem description
L = self.level
P = L.prob
me = []
# integrate RHS over all collocation nodes
for m in range(1,self.coll.num_nodes+1):
# new instance of dtype_u, initialize values with 0
me.append(P.dtype_u(P.init,val=0))
for j in range(1,self.coll.num_nodes+1):
me[-1] += L.dt*self.coll.Qmat[m,j]*L.f[j]
return me
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
# gather all terms which are known already (e.g. from the previous iteration)
# this corresponds to u0 + QF(u^k) - QdF(u^k) + tau
# get QF(u^k)
integral = self.integrate()
for m in range(M):
# get -QdF(u^k)_m
for j in range(M+1):
integral[m] -= L.dt*self.Qd[m+1,j]*L.f[j]
# add initial value
integral[m] += L.u[0]
# add tau if associated
if L.tau is not None:
integral[m] += L.tau[m]
# do the sweep
for m in range(0,M):
# build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
rhs = P.dtype_u(integral[m])
for j in range(m+1):
rhs += L.dt*self.Qd[m+1,j]*L.f[j]
# implicit solve with prefactor stemming from the diagonal of Qd
L.u[m+1] = P.solve_system(rhs,L.dt*self.Qd[m+1,m+1],L.u[m+1],L.time+L.dt*self.coll.nodes[m])
# update function values
L.f[m+1] = P.eval_f(L.u[m+1],L.time+L.dt*self.coll.nodes[m])
# indicate presence of new values at this level
L.status.updated = True
return None
def compute_end_point(self):
"""
Compute u at the right point of the interval
The value uend computed here might be a simple copy from u[M] (if right point is a collocation node) or
a full evaluation of the Picard formulation (if right point is not a collocation node)
"""
# get current level and problem description
L = self.level
P = L.prob
# check if Mth node is equal to right point (flag is set in collocation class)
if self.coll.right_is_node:
# a copy is sufficient
L.uend = P.dtype_u(L.u[-1])
else:
# start with u0 and add integral over the full interval (using coll.weights)
L.uend = P.dtype_u(L.u[0])
for m in range(self.coll.num_nodes):
L.uend += L.dt*self.coll.weights[m]*L.f[m+1]
# add up tau correction of the full interval (last entry)
if L.tau is not None:
L.uend += L.tau[-1]
return None
|
Wendy joined Ellenbecker Investment Group in January of 2015 as the Director of Marketing. Her primary responsibilities include the overall marketing strategy for EIG and continuing the strong brand presence in our surrounding communities. Wendy has over fifteen years of experience in the financial industry. Prior to joining EIG, Wendy spent four years at BMO Global Asset Management and eleven years at Robert W. Baird.
Wendy was raised in Brookfield and attended the University of Wisconsin-Eau Claire. She currently serves on the board of directors for the Juvenile Diabetes Research Foundation (JDRF) and the Pewaukee Chamber of Commerce. Wendy is fortunate to have extended family in Southeast Wisconsin and enjoys an annual retreat to Door County each year. On the weekends she and her husband enjoy keeping up with their children at any number of sporting events including soccer, baseball, basketball and sailing.
|
import random
import sys
from src.AI import AI
from src.Board import Board
from src.InputParser import InputParser
WHITE = True
BLACK = False
def askForPlayerSide():
playerChoiceInput = input(
"What side would you like to play as [wB]? ").lower()
if 'w' in playerChoiceInput:
print("You will play as white")
return WHITE
else:
print("You will play as black")
return BLACK
def askForDepthOfAI():
depthInput = 2
try:
depthInput = int(input("How deep should the AI look for moves?\n"
"Warning : values above 3 will be very slow."
" [2]? "))
except KeyboardInterrupt:
sys.exit()
except:
print("Invalid input, defaulting to 2")
return depthInput
def printCommandOptions():
undoOption = 'u : undo last move'
printLegalMovesOption = 'l : show all legal moves'
randomMoveOption = 'r : make a random move'
quitOption = 'quit : resign'
moveOption = 'a3, Nc3, Qxa2, etc : make the move'
options = [undoOption, printLegalMovesOption, randomMoveOption,
quitOption, moveOption, '', ]
print('\n'.join(options))
def printAllLegalMoves(board, parser):
for move in parser.getLegalMovesWithNotation(board.currentSide, short=True):
print(move.notation)
def getRandomMove(board, parser):
legalMoves = board.getAllMovesLegal(board.currentSide)
randomMove = random.choice(legalMoves)
randomMove.notation = parser.notationForMove(randomMove)
return randomMove
def makeMove(move, board):
print("Making move : " + move.notation)
board.makeMove(move)
def printPointAdvantage(board):
print("Currently, the point difference is : " +
str(board.getPointAdvantageOfSide(board.currentSide)))
def undoLastTwoMoves(board):
if len(board.history) >= 2:
board.undoLastMove()
board.undoLastMove()
def startGame(board, playerSide, ai):
parser = InputParser(board, playerSide)
while True:
print()
print(board)
print()
if board.isCheckmate():
if board.currentSide == playerSide:
print("Checkmate, you lost")
else:
print("Checkmate! You won!")
return
if board.isStalemate():
if board.currentSide == playerSide:
print("Stalemate")
else:
print("Stalemate")
return
if board.currentSide == playerSide:
# printPointAdvantage(board)
move = None
command = input("It's your move."
" Type '?' for options. ? ")
if command.lower() == 'u':
undoLastTwoMoves(board)
continue
elif command.lower() == '?':
printCommandOptions()
continue
elif command.lower() == 'l':
printAllLegalMoves(board, parser)
continue
elif command.lower() == 'r':
move = getRandomMove(board, parser)
elif command.lower() == 'exit' or command.lower() == 'quit':
return
try:
move = parser.parse(command)
except ValueError as error:
print("%s" % error)
continue
makeMove(move, board)
else:
print("AI thinking...")
move = ai.getBestMove()
move.notation = parser.notationForMove(move)
makeMove(move, board)
def twoPlayerGame(board):
parserWhite = InputParser(board, WHITE)
parserBlack = InputParser(board, BLACK)
while True:
print()
print(board)
print()
if board.isCheckmate():
print("Checkmate")
return
if board.isStalemate():
print("Stalemate")
return
# printPointAdvantage(board)
if board.currentSide == WHITE:
parser = parserWhite
else:
parser = parserBlack
move = None
command = input("It's your move, {}.".format(board.currentSideRep()) + \
" Type '?' for options. ? ")
if command.lower() == 'u':
undoLastTwoMoves(board)
continue
elif command.lower() == '?':
printCommandOptions()
continue
elif command.lower() == 'l':
printAllLegalMoves(board, parser)
continue
elif command.lower() == 'r':
move = getRandomMove(board, parser)
elif command.lower() == 'exit' or command.lower() == 'quit':
return
try:
move = parser.parse(command)
except ValueError as error:
print("%s" % error)
continue
makeMove(move, board)
board = Board()
def main():
try:
if len(sys.argv) >= 2 and sys.argv[1] == "--two":
twoPlayerGame(board)
else:
playerSide = askForPlayerSide()
print()
aiDepth = askForDepthOfAI()
opponentAI = AI(board, not playerSide, aiDepth)
startGame(board, playerSide, opponentAI)
except KeyboardInterrupt:
sys.exit()
if __name__ == "__main__":
main()
|
Tinisha Dolaira is the female lead of my series and love interest of Patrick Donovan. When I created her, I based aspects of her personality on my late grandmother, which include a compassionate heart, a sharp mind, and strong moral code. In addition, I made Tinisha the same age as my grandmother when she passed away. In terms of species, I made Tinisha one of my favorite of all mythological beings, a high elf. However, to avoid cliches from other sword and sorcery stories, I placed Tinisha outside of her comfort zone when she is transported from her home world into twenty-first century Earth. She is very afraid and curious about how the modern world works and often turns to Patrick Donovan for comfort and explanations. While she is inexperienced about the modern world, she is morally wise and has strong ideals. Due to her advanced age, Tinisha serves as the big sister or motherly figure of the team. Because of the nature of their first encounter, Tinisha feels like watching over Patrick and his friends to be her responsibility.
Written by Christopher Paolini, I did not know what to expect when I first read Eragon, but when I did, I was in for a surprise. This story is an elegant blend between Star Wars and The Lord of the Rings, two of my favorite story franchises. I also enjoyed how the author described the mythological creatures that are depicted in the story, specifically the dragons and elves, which are two of my favorite fantasy beings. Eragon allowed me to see the spiritual side of dragons, making them appear more human and less like animals. When the author described the elves, it was like he was trying to illustrate the appearance of angels without wings. I would recommend this story to anyone who enjoys sword and sorcery.
When I designed one of the main characters of my series, Patrick Donovan, I based him on the positive aspects of my personality. In addition, I gave him the powers that I would want to have if I was a superhero, which included fire-based powers. Also, he has insecurities and doubts about his potential as a superhero. He is haunted by the murder of his younger sister, which gives him a strong dislike of all that is evil. Patrick starts out as an average teenager who is a social outcast and trying to live a normal life. However, when he becomes superhuman, Patrick has a psychological struggle with his own power, which is linked with his anger and hatred. He does not feel comfortable with the idea that he is considered the team leader, but nevertheless, is eager to join his teammates in their crusade against evil.
One of my favorite superpowers has always been pyrokinesis or fire manipulation because of the powerful ferocity it can give the user. When I was designing one of the main characters of The Young Guardians Saga, Patrick Donovan, I wanted to give him a power that stood apart from ordinary fire manipulation. To that end, I turned my attention to my favorite of all mythological beasts: dragons. I wish to give Patrick features of a dragon while maintaining his human form, so I decided to give him the ability to generate and control the very flames that dragons breathe. Apart from wielding dragon fire, I wanted this power to have a psychological effect on Patrick whenever he uses it to the extreme, which would result in giving Patrick the ferocious mindset of a dragon.
Stan Lee has been widely acknowledged as the founding father of the modern superhero genre as well as the co-creator of many of the superheroes that we all know, such as Spider-Man, The Incredible Hulk, The Fantastic Four, and so forth. In addition, he has made a cameo appearance in every single film Marvel Studios ever created. For anyone who wishes to learn more about this comic book legend, click here.
Written by Brian K. Vaughan, this amazing graphic novel tells the story of a group of children who discover their parents are secretly an organization of super villains called The Pride. Complete with a unique origin story and a fast-paced plot, the Runaways keeps the reader wondering what will happen next as these children attempt to save the very world their parents are trying to destroy. In addition, the powers and equipment that the children acquire to combat their parents’ evil are very intriguing with a deep sense of variety. When the writer created The Pride, it was enjoyable to see that he based them on all the various archetypes of super villains: crime lords, dark sorcerers, mad scientists, time travelers, alien invaders, and mutants. This story is highly recommended to anyone who would enjoy the superhero genre.
Goals: Seek revenge against Patrick Donovan and kill anyone or anything that gets in his way.
Powers and Abilities: Cybernetic body equipped with superhuman strength, durability, reflexes, flight, and advanced weaponry.
Vulnerabilities: If his brain is damaged or destroyed, he will die. In addition, the experimental technology that keeps him alive also drives him insane.
|
#!/usr/bin/env python
# coding=utf-8
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP
# (C) Copyright 2018 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wait for specific Kafka topics.
For using this script you need to set two environment variables:
* `KAFKA_URI` for connection string to Kafka together with port.
Example: `kafka:9092`, `192.168.10.6:9092`.
* `KAFKA_WAIT_FOR_TOPICS` that contain topics that should exist in Kafka
to consider it's working. Many topics should be separated with comma.
Example: `retry-notifications,alarm-state-transitions`.
After making sure that this environment variables are set you can simply
execute this script in the following way:
`python3 kafka_wait_for_topics.py && ./start_service.sh`
`python3 kafka_wait_for_topics.py || exit 1`
Additional environment variables available are:
* `LOG_LEVEL` - default to `INFO`
* `KAFKA_WAIT_RETRIES` - number of retries, default to `24`
* `KAFKA_WAIT_INTERVAL` - in seconds, default to `5`
"""
import logging
import os
import sys
import time
from pykafka import KafkaClient
from pykafka.exceptions import NoBrokersAvailableError
# Run this script only with Python 3
if sys.version_info.major != 3:
sys.stdout.write("Sorry, requires Python 3.x\n")
sys.exit(1)
LOG_LEVEL = logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO'))
logging.basicConfig(level=LOG_LEVEL)
logger = logging.getLogger(__name__)
KAFKA_HOSTS = os.environ.get('KAFKA_URI', 'kafka:9092')
REQUIRED_TOPICS = os.environ.get('KAFKA_WAIT_FOR_TOPICS', '') \
.encode('utf-8').split(b',')
KAFKA_WAIT_RETRIES = int(os.environ.get('KAFKA_WAIT_RETRIES', '24'))
KAFKA_WAIT_INTERVAL = int(os.environ.get('KAFKA_WAIT_INTERVAL', '5'))
class TopicNoPartition(Exception):
"""Raise when topic has no partitions."""
class TopicNotFound(Exception):
"""Raise when topic was not found."""
def retry(retries=KAFKA_WAIT_RETRIES, delay=KAFKA_WAIT_INTERVAL,
check_exceptions=()):
"""Retry decorator."""
def decorator(func):
"""Decorator."""
def f_retry(*args, **kwargs):
"""Retry running function on exception after delay."""
for i in range(1, retries + 1):
try:
return func(*args, **kwargs)
# pylint: disable=W0703
# We want to catch all exceptions here to retry.
except check_exceptions + (Exception,) as exc:
if i < retries:
logger.info('Connection attempt %d of %d failed',
i, retries)
if isinstance(exc, check_exceptions):
logger.debug('Caught known exception, retrying...',
exc_info=True)
else:
logger.warn(
'Caught unknown exception, retrying...',
exc_info=True)
else:
logger.exception('Failed after %d attempts', retries)
raise
# No exception so wait before retrying
time.sleep(delay)
return f_retry
return decorator
@retry(check_exceptions=(TopicNoPartition, TopicNotFound))
def check_topics(client, req_topics):
"""Check for existence of provided topics in Kafka."""
client.update_cluster()
logger.debug('Found topics: %r', client.topics.keys())
for req_topic in req_topics:
if req_topic not in client.topics.keys():
err_topic_not_found = 'Topic not found: {}'.format(req_topic)
logger.warning(err_topic_not_found)
raise TopicNotFound(err_topic_not_found)
topic = client.topics[req_topic]
if not topic.partitions:
err_topic_no_part = 'Topic has no partitions: {}'.format(req_topic)
logger.warning(err_topic_no_part)
raise TopicNoPartition(err_topic_no_part)
logger.info('Topic is ready: %s', req_topic)
@retry(check_exceptions=(NoBrokersAvailableError,))
def connect_kafka(hosts):
"""Connect to Kafka with retries."""
return KafkaClient(hosts=hosts)
def main():
"""Start main part of the wait script."""
logger.info('Checking for available topics: %r', repr(REQUIRED_TOPICS))
client = connect_kafka(hosts=KAFKA_HOSTS)
check_topics(client, REQUIRED_TOPICS)
if __name__ == '__main__':
main()
|
By completing a bone graft procedure, Dr. Kleiman, Dr. Huberman or Dr. Regenye is now able to restore bone function and growth, thereby halting the effects of poor denture care.
Some conditions or syndromes are characterized by missing portions of the teeth, facial bones, jaw or skull. Drs. Kleiman, Huberman or Regenye may be able to perform a bone graft procedure to restore bone function and growth where it may be absent.
This condition usually develops over several years and may result in insufficient bone from the placement of dental implants. Drs. Kleiman, Huberman or Regenye can perform a procedure called a “sinus lift” that can treat enlarged sinuses.
|
"""
RSS/Atom feeds for the blog app.
"""
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.feedgenerator import Atom1Feed
from django.utils.translation import ugettext_lazy as _
from apps.licenses.models import License
from .models import (Article,
ArticleTag,
ArticleCategory)
from .settings import NB_ARTICLES_PER_FEED
class BaseBlogArticleFeed(Feed):
"""
Base feed for articles.
"""
def items(self):
"""
Require implementation.
"""
raise NotImplementedError()
def item_title(self, item):
"""
Return the title of the article.
:param item: The current feed item.
"""
return item.title
def item_description(self, item):
"""
Return the description of the article.
:param item: The current feed item.
"""
content_html = item.content_html # TODO handle membership restriction
# FIXME Handle footnotes
return '<p><strong>%s</strong></p>\n%s' % (item.description_html, content_html) if item.description_html else content_html
def item_author_name(self, item):
"""
Return the author name for the article.
:param item: The current feed item.
"""
return item.author.username if item.author.is_active else _('Anonymous')
def item_pubdate(self, item):
"""
Return the published date of the article.
:param item: The current feed item.
"""
return item.pub_date
def item_updateddate(self, item):
"""
Return the last modification date of the article.
:param item: The current feed item.
"""
return item.last_content_modification_date or item.pub_date
def item_categories(self, item):
"""
Return the list of categories of the article.
:param item: The current feed item.
"""
cat_names = [c.name for c in item.categories.all()]
tag_names = [t.name for t in item.tags.all()]
return cat_names + tag_names
class LatestArticlesFeed(BaseBlogArticleFeed):
"""
Feed of latest articles.
"""
title = _('Latest articles')
link = reverse_lazy('blog:index')
feed_url = reverse_lazy('blog:latest_articles_rss')
description = _('Latest articles, all categories together')
def items(self):
"""
Return a list of the N most recent articles.
"""
return Article.objects.published().select_related('author') \
.prefetch_related('categories', 'tags')[:NB_ARTICLES_PER_FEED]
class LatestArticlesAtomFeed(LatestArticlesFeed):
"""
Feed of latest articles (ATOM version).
"""
feed_type = Atom1Feed
feed_url = reverse_lazy('blog:latest_articles_atom')
subtitle = LatestArticlesFeed.description
class LatestArticlesForCategoryFeed(BaseBlogArticleFeed):
"""
Feed of latest articles for a specific category.
"""
def get_object(self, request, *args, **kwargs):
"""
Return the desired ArticleCategory object by his slug hierarchy.
:param request: The current request.
:param args: Extra arguments.
:param kwargs: Extra keywords arguments.
:return: ArticleCategory
"""
# Get desired category hierarchy
hierarchy = kwargs.pop('hierarchy')
assert hierarchy is not None
# Get the category object by slug hierarchy
return ArticleCategory.objects.get(slug_hierarchy=hierarchy)
def title(self, obj):
"""
Return the title of the category.
:param obj: The feed object.
"""
return _('Latest articles in category "%s"') % obj.name
def link(self, obj):
"""
Return the permalink to the category.
:param obj: The feed object.
"""
return obj.get_absolute_url()
def feed_url(self, obj):
"""
Return the permalink to the latest articles RSS feed for this category.
:param obj: The feed object.
"""
return obj.get_latest_articles_rss_feed_url()
def description(self, obj):
"""
Return the description of the category.
:param obj: The feed object.
"""
return obj.description_html or _('Latest articles in category "%s"') % obj.name
def items(self, obj):
"""
Return all article for this category.
:param obj: The feed object.
"""
return obj.articles.published().select_related('author') \
.prefetch_related('categories', 'tags')[:NB_ARTICLES_PER_FEED]
class LatestArticlesForCategoryAtomFeed(LatestArticlesForCategoryFeed):
"""
Feed of latest articles for a specific category (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestArticlesForCategoryFeed.description
def feed_url(self, obj):
"""
Return the permalink to the latest articles ATOM feed for this category.
:param obj: The feed object.
"""
return obj.get_latest_articles_atom_feed_url()
class LatestArticlesForLicenseFeed(BaseBlogArticleFeed):
"""
Feed of latest articles for a specific license.
"""
def get_object(self, request, *args, **kwargs):
"""
Return the desired License object by his slug.
:param request: The current request.
:param args: Extra arguments.
:param kwargs: Extra keywords arguments.
:return: ArticleLicense
"""
# Get desired license slug
slug = kwargs.pop('slug')
assert slug is not None
# Retrieve the license object
return License.objects.get(slug=slug)
def title(self, obj):
"""
Return the title of the license.
:param obj: The feed object.
"""
return _('Latest articles with license "%s"') % obj.name
def link(self, obj):
"""
Return the permalink to the license.
:param obj: The feed object.
"""
return reverse('bloglicense:license_articles_detail', kwargs={'slug': obj.slug})
def description(self, obj):
"""
Return the description of the license.
:param obj: The feed object.
"""
return obj.description_html or _('Latest articles with license "%s"') % obj.name
def feed_url(self, obj):
"""
Return the permalink to the latest articles RSS feed with this license.
:param obj: The feed object.
"""
return reverse('bloglicense:latest_license_articles_rss', kwargs={'slug': obj.slug})
def items(self, obj):
"""
Return all article for this license.
:param obj: The feed object.
"""
return obj.articles.published().select_related('author') \
.prefetch_related('categories', 'tags')[:NB_ARTICLES_PER_FEED]
class LatestArticlesForLicenseAtomFeed(LatestArticlesForLicenseFeed):
"""
Feed of latest articles for a specific license (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestArticlesForLicenseFeed.description
def feed_url(self, obj):
"""
Return the permalink to the latest articles Atom feed with this license.
:param obj: The feed object.
"""
return reverse('bloglicense:latest_license_articles_atom', kwargs={'slug': obj.slug})
class LatestArticlesForTagFeed(BaseBlogArticleFeed):
"""
Feed of latest articles for a specific tag.
"""
def get_object(self, request, *args, **kwargs):
"""
Return the desired ArticleTag object by his slug.
:param request: The current request.
:param args: Extra arguments.
:param kwargs: Extra keywords arguments.
:return: ArticleTag
"""
# Get desired tag slug
slug = kwargs.pop('slug')
assert slug is not None
# Retrieve the tag object
return ArticleTag.objects.get(slug=slug)
def title(self, obj):
"""
Return the title of the tag.
:param obj: The feed object.
"""
return _('Latest articles with tag "%s"') % obj.name
def link(self, obj):
"""
Return the permalink to the tag.
:param obj: The feed object.
"""
return obj.get_absolute_url()
def description(self, obj):
"""
Return the description of the tag.
:param obj: The feed object.
"""
return _('Latest articles with tag "%s"') % obj.name
def feed_url(self, obj):
"""
Return the permalink to the latest articles RSS feed for this tag.
:param obj: The feed object.
"""
return obj.get_latest_articles_rss_feed_url()
def items(self, obj):
"""
Return all article for this tag.
:param obj: The feed object.
"""
return obj.articles.published().select_related('author') \
.prefetch_related('categories', 'tags')[:NB_ARTICLES_PER_FEED]
class LatestArticlesForTagAtomFeed(LatestArticlesForTagFeed):
"""
Feed of latest articles for a specific tag (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestArticlesForTagFeed.description
def feed_url(self, obj):
"""
Return the permalink to the latest articles Atom feed for this tag.
:param obj: The feed object.
"""
return obj.get_latest_articles_atom_feed_url()
class ArticlesForYearFeed(BaseBlogArticleFeed):
"""
Feed of articles for a specific year.
"""
def get_object(self, request, *args, **kwargs):
"""
Return the desired year as a dict.
:param request: The current request.
:param args: Extra arguments.
:param kwargs: Extra keywords arguments.
:return: dict with year key.
"""
# Get desired archive year
year = kwargs.pop('year')
assert year is not None
# Return the year
return {'year': year}
def title(self, obj):
"""
Return the title of the archive.
:param obj: The feed object.
"""
return _('Latest articles for year %(year)s') % obj
def link(self, obj):
"""
Return the permalink to the archive.
:param obj: The feed object.
"""
return reverse('blog:archive_year', kwargs=obj)
def description(self, obj):
"""
Return the description of the archive.
:param obj: The feed object.
"""
return _('Latest articles for year %(year)s') % obj
def feed_url(self, obj):
"""
Return the permalink to the articles archive RSS feed for this year.
:param obj: The feed object.
"""
return reverse('blog:articles_archive_year_rss', kwargs=obj)
def items(self, obj):
"""
Return all article for this archive.
:param obj: The feed object.
"""
return Article.objects.published().filter(pub_date__year=int(obj['year'])) \
.select_related('author').prefetch_related('categories', 'tags')
class ArticlesForYearAtomFeed(ArticlesForYearFeed):
"""
Feed of articles for a specific year (ATOM version).
"""
feed_type = Atom1Feed
subtitle = ArticlesForYearFeed.description
def feed_url(self, obj):
"""
Return the permalink to the articles archive Atom feed for this year.
:param obj: The feed object.
"""
return reverse('blog:articles_archive_year_atom', kwargs=obj)
class ArticlesForYearAndMonthFeed(BaseBlogArticleFeed):
"""
Feed of articles for a specific year and month.
"""
def get_object(self, request, *args, **kwargs):
"""
Return the desired year and month as a dict.
:param request: The current request.
:param args: Extra arguments.
:param kwargs: Extra keywords arguments.
:return: dict with year and month keys.
"""
# Get desired archive year and month
year = kwargs.pop('year')
month = kwargs.pop('month')
assert year is not None
assert month is not None
# Return the year and month
return {'year': year, 'month': month}
def title(self, obj):
"""
Return the title of the archive.
:param obj: The feed object.
"""
return _('Latest articles for month %(year)s/%(month)s') % obj
def link(self, obj):
"""
Return the permalink to the archive.
:param obj: The feed object.
"""
return reverse('blog:archive_month', kwargs=obj)
def description(self, obj):
"""
Return the description of the archive.
:param obj: The feed object.
"""
return _('Latest articles for month %(year)s/%(month)s') % obj
def feed_url(self, obj):
"""
Return the permalink to the articles archive RSS feed for this year.
:param obj: The feed object.
"""
return reverse('blog:articles_archive_month_rss', kwargs=obj)
def items(self, obj):
"""
Return all article for this archive.
:param obj: The feed object.
"""
return Article.objects.published().filter(pub_date__year=int(obj['year']),
pub_date__month=int(obj['month'])) \
.select_related('author').prefetch_related('categories', 'tags')
class ArticlesForYearAndMonthAtomFeed(ArticlesForYearAndMonthFeed):
"""
Feed of articles for a specific year and month (ATOM version).
"""
feed_type = Atom1Feed
subtitle = ArticlesForYearAndMonthFeed.description
def feed_url(self, obj):
"""
Return the permalink to the articles archive Atom feed for this year.
:param obj: The feed object.
"""
return reverse('blog:articles_archive_month_atom', kwargs=obj)
|
Beautiful CHANEL dark grey satin evening bag with Rhinestone CC Camellia flower accent and quilted CC on the bottom of the bag. This is the perfect Chanel bag for an evening out when you just need your phone, cash, & a lip gloss. Dustbag included. So Elegant!
|
"""Identity objects for constructing names for bundles and partitions, and
Object Numbers for datasets, columns, partitions and tables.
Copyright (c) 2013 Clarinova. This file is licensed under the terms of
the Revised BSD License, included in this distribution as LICENSE.txt
"""
from collections import OrderedDict
from copy import copy
import json
import os
import random
import time
from six import iteritems, itervalues, string_types
import requests
import semantic_version as sv
from .util import md5_for_file, Constant
class NotObjectNumberError(ValueError):
pass
class Base62DecodeError(ValueError):
pass
class Name(object):
"""The Name part of an identity."""
NAME_PART_SEP = '-'
DEFAULT_FORMAT = 'db'
# Name, Default Value, Is Optional
_name_parts = [('source', None, False),
('dataset', None, False),
('subset', None, True),
('type', None, True),
('part', None, True),
('bspace', None, True),
('btime', None, True),
('variation', None, True),
# Semantic Version, different from Object Number revision,
# which is an int. "Version" is the preferred name,
# but 'revision' is in the databases schema.
('version', None, True)
]
# Names that are generated from the name parts.
_generated_names = [
('name', None, True),
('vname', None, True),
('fqname', None, True)]
source = None
dataset = None
subset = None
type = None
part = None
variation = None
btime = None
bspace = None
version = None
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
for k, default, optional in self.name_parts:
if optional:
setattr(self, k, kwargs.get(k, default))
else:
setattr(self, k, kwargs.get(k))
self.version = self._parse_version(self.version)
self.clean()
self.is_valid()
def clean(self):
import re
for k, default, optional in self.name_parts:
# Skip the names in name query.
v = getattr(self, k)
if not v or not isinstance(v, string_types):
# Can only clean strings.
continue
# The < and > chars are only there to for <any> and <none> and version specs.
# . is needs for source, and + is needed for version specs
nv = re.sub(r'[^a-zA-Z0-9\.\<\>=]', '_', v).lower()
if v != nv:
setattr(self, k, nv)
def is_valid(self):
"""
:raise ValueError:
"""
for k, _, optional in self.name_parts:
if not optional and not bool(getattr(self, k)):
raise ValueError(
"Name requires field '{}' to have a value. Got: {}" .format(
k,
self.name_parts))
def _parse_version(self, version):
if version is not None and isinstance(version, string_types):
if version == NameQuery.ANY:
pass
elif version == NameQuery.NONE:
pass
else:
try:
version = str(sv.Version(version))
except ValueError:
try:
version = str(sv.Spec(version))
except ValueError:
raise ValueError("Could not parse '{}' as a semantic version".format(version))
if not version:
version = str(sv.Version('0.0.0'))
return version
@property
def name_parts(self):
return self._name_parts
def clear_dict(self, d):
return {k: v for k, v in list(d.items()) if v}
@property
def dict(self):
"""Returns the identity as a dict.
values that are empty are removed
"""
return self._dict(with_name=True)
def _dict(self, with_name=True):
"""Returns the identity as a dict.
values that are empty are removed
"""
d = dict([(k, getattr(self, k)) for k, _, _ in self.name_parts])
if with_name:
d['name'] = self.name
try:
d['vname'] = self.vname
except ValueError:
pass
return self.clear_dict(d)
@property
def name(self):
"""String version of the name, excluding the version, and excluding the
format, if the format is 'db'."""
d = self._dict(with_name=False)
return self.NAME_PART_SEP.join([str(d[k]) for (k, _, _) in self.name_parts if k and d.get(
k, False) and k != 'version' and not (k == 'format' and d[k] == Name.DEFAULT_FORMAT)])
@property
def vname(self):
if not self.version:
raise ValueError('No version set')
if isinstance(self.version, sv.Spec):
return self.name + str(self.version)
else:
return self.name + self.NAME_PART_SEP + str(self.version)
def _path_join(self, names=None, excludes=None, sep=os.sep):
d = self._dict(with_name=False)
if isinstance(excludes, string_types):
excludes = {excludes}
if not isinstance(excludes, set):
excludes = set(excludes)
if not names:
if not excludes:
excludes = set([])
names = set(k for k, _, _ in self.name_parts) - set(excludes)
else:
names = set(names)
final_parts = [str(d[k]) for (k, _, _) in self.name_parts
if k and d.get(k, False) and k in (names - excludes)]
return sep.join(final_parts)
@property
def path(self):
"""The path of the bundle source.
Includes the revision.
"""
# Need to do this to ensure the function produces the
# bundle path when called from subclasses
names = [k for k, _, _ in Name._name_parts]
return os.path.join(
self.source, self._path_join(names=names, excludes='source', sep=self.NAME_PART_SEP))
@property
def source_path(self):
"""The name in a form suitable for use in a filesystem.
Excludes the revision
"""
# Need to do this to ensure the function produces the
# bundle path when called from subclasses
names = [k for k, _, _ in self._name_parts]
parts = [self.source]
if self.bspace:
parts.append(self.bspace)
parts.append(
self._path_join(names=names, excludes=['source', 'version', 'bspace'], sep=self.NAME_PART_SEP))
return os.path.join(*parts)
@property
def cache_key(self):
"""The name in a form suitable for use as a cache-key"""
try:
return self.path
except TypeError:
raise TypeError("self.path is invalild: '{}', '{}'".format(str(self.path), type(self.path)))
def clone(self):
return self.__class__(**self.dict)
def ver(self, revision):
"""Clone and change the version."""
c = self.clone()
c.version = self._parse_version(self.version)
return c
def type_is_compatible(self, o):
if not isinstance(o, DatasetNumber):
return False
else:
return True
# The name always stores the version number as a string, so these
# convenience functions make it easier to update specific parts
@property
def version_minor(self):
return sv.Version(self.version).minor
@version_minor.setter
def version_minor(self, value):
v = sv.Version(self.version)
v.minor = int(value)
self.version = str(v)
@property
def version_major(self):
return sv.Version(self.version).minor
@version_major.setter
def version_major(self, value):
v = sv.Version(self.version)
v.major = int(value)
self.version = str(v)
@property
def version_patch(self):
return sv.Version(self.version).patch
@version_patch.setter
def version_patch(self, value):
v = sv.Version(self.version)
v.patch = int(value)
self.version = str(v)
@property
def version_build(self):
return sv.Version(self.version).build
@version_build.setter
def version_build(self, value):
v = sv.Version(self.version)
v.build = value
self.version = str(v)
def as_partition(self, **kwargs):
"""Return a PartitionName based on this name."""
return PartitionName(**dict(list(self.dict.items()) + list(kwargs.items())))
def as_namequery(self):
return NameQuery(**self._dict(with_name=False))
def __str__(self):
return self.name
class PartialPartitionName(Name):
"""For specifying a PartitionName within the context of a bundle."""
FORMAT = 'default'
time = None
space = None
table = None
grain = None
format = None
variant = None
segment = None
_name_parts = [
('table', None, True),
('time', None, True),
('space', None, True),
('grain', None, True),
('format', None, True),
('variant', None, True),
('segment', None, True)]
def promote(self, name):
"""Promote to a PartitionName by combining with a bundle Name."""
return PartitionName(**dict(list(name.dict.items()) + list(self.dict.items())))
def is_valid(self):
pass
def __eq__(self, o):
return (self.time == o.time and self.space == o.space and self.table == o.table and
self.grain == o.grain and self.format == o.format and self.segment == o.segment
and self.variant == o.variant
)
def __cmp__(self, o):
return cmp(str(self), str(o))
def __hash__(self):
return (hash(self.time) ^ hash(self.space) ^ hash(self.table) ^
hash(self.grain) ^ hash(self.format) ^ hash(self.segment) ^ hash(self.variant))
class PartitionName(PartialPartitionName, Name):
"""A Partition Name."""
_name_parts = (Name._name_parts[0:-1] +
PartialPartitionName._name_parts +
Name._name_parts[-1:])
def _local_parts(self):
parts = []
if self.format and self.format != Name.DEFAULT_FORMAT:
parts.append(str(self.format))
if self.table:
parts.append(self.table)
l = []
if self.time:
l.append(str(self.time))
if self.space:
l.append(str(self.space))
if l:
parts.append(self.NAME_PART_SEP.join(l))
l = []
if self.grain:
l.append(str(self.grain))
if self.variant:
l.append(str(self.variant))
if self.segment:
l.append(str(self.segment))
if l:
parts.append(self.NAME_PART_SEP.join([str(x) for x in l]))
# the format value is part of the file extension
return parts
@property
def name(self):
d = self._dict(with_name=False)
return self.NAME_PART_SEP.join(
[str(d[k]) for (k, _, _) in self.name_parts
if k and d.get(k, False) and k != 'version' and (k != 'format' or str(d[k]) != Name.DEFAULT_FORMAT)]
)
@property
def path(self):
"""The path of the bundle source.
Includes the revision.
"""
# Need to do this to ensure the function produces the
# bundle path when called from subclasses
names = [k for k, _, _ in Name._name_parts]
return os.path.join(self.source,
self._path_join(names=names, excludes=['source', 'format'], sep=self.NAME_PART_SEP),
*self._local_parts()
)
@property
def source_path(self):
raise NotImplemented("PartitionNames don't have source paths")
@property
def sub_path(self):
"""The path of the partition source, excluding the bundle path parts.
Includes the revision.
"""
try:
return os.path.join(*(self._local_parts()))
except TypeError as e:
raise TypeError(
"Path failed for partition {} : {}".format(
self.name,
e.message))
def type_is_compatible(self, o):
if not isinstance(o, PartitionNumber):
return False
else:
return True
@classmethod
def format_name(cls):
return cls.FORMAT
@classmethod
def extension(cls):
return cls.PATH_EXTENSION
def as_namequery(self):
return PartitionNameQuery(**self._dict(with_name=False))
def as_partialname(self):
return PartialPartitionName(** self.dict)
@property
def partital_dict(self, with_name=True):
"""Returns the name as a dict, but with only the items that are
particular to a PartitionName."""
d = self._dict(with_name=False)
d = {k: d.get(k) for k, _, _ in PartialPartitionName._name_parts if d.get(k, False)}
if 'format' in d and d['format'] == Name.DEFAULT_FORMAT:
del d['format']
d['name'] = self.name
return d
class PartialMixin(object):
NONE = '<none>'
ANY = '<any>'
use_clear_dict = True
def clear_dict(self, d):
if self.use_clear_dict:
return {k: v if v is not None else self.NONE for k, v in list(d.items())}
else:
return d
def _dict(self, with_name=True):
"""Returns the identity as a dict.
values that are empty are removed
"""
d = dict([(k, getattr(self, k)) for k, _, _ in self.name_parts])
return self.clear_dict(d)
def with_none(self):
"""Convert the NameQuery.NONE to None. This is needed because on the
kwargs list, a None value means the field is not specified, which
equates to ANY. The _find_orm() routine, however, is easier to write if
the NONE value is actually None.
Returns a clone of the origin, with NONE converted to None
"""
n = self.clone()
for k, _, _ in n.name_parts:
if getattr(n, k) == n.NONE:
delattr(n, k)
n.use_clear_dict = False
return n
def is_valid(self):
return True
@property
def path(self):
raise NotImplementedError("Can't get a path from a partial name")
@property
def cache_key(self):
raise NotImplementedError("Can't get a cache_key from a partial name")
class NameQuery(PartialMixin, Name):
"""A partition name used for finding and searching. does not have an
expectation of having all parts completely defined, and can't be used to
generate a string.
When a partial name is returned as a dict, parts that were not
specified in the constructor have a value of '<any.', and parts that
were specified as None have a value of '<none>'
"""
NONE = PartialMixin.NONE
ANY = PartialMixin.ANY
# These are valid values for a name query, so we need to remove the
# properties
name = None
vname = None
fqname = None
def clean(self):
"""Null operation, since NameQueries should not be cleaned.
:return:
"""
pass
@property
def name_parts(self):
"""Works with PartialNameMixin.clear_dict to set NONE and ANY
values."""
default = PartialMixin.ANY
np = ([(k, default, True)
for k, _, _ in super(NameQuery, self).name_parts]
+
[(k, default, True)
for k, _, _ in Name._generated_names]
)
return np
class PartitionNameQuery(PartialMixin, PartitionName):
"""A partition name used for finding and searching.
does not have an expectation of having all parts completely defined,
and can't be used to generate a string
"""
# These are valid values for a name query
name = None
vname = None
fqname = None
def clean(self):
"""Null operation, since NameQueries should not be cleaned.
:return:
"""
pass
@property
def name_parts(self):
"""Works with PartialNameMixin.clear_dict to set NONE and ANY
values."""
default = PartialMixin.ANY
return ([(k, default, True)
for k, _, _ in PartitionName._name_parts]
+
[(k, default, True)
for k, _, _ in Name._generated_names]
)
class ObjectNumber(object):
"""Static class for holding constants and static methods related to object
numbers."""
# When a name is resolved to an ObjectNumber, orig can
# be set to the input value, which can be important, for instance,
# if the value's use depends on whether the user specified a version
# number, since all values are resolved to versioned ONs
orig = None
assignment_class = 'self'
TYPE = Constant()
TYPE.DATASET = 'd'
TYPE.PARTITION = 'p'
TYPE.TABLE = 't'
TYPE.COLUMN = 'c'
TYPE.CONFIG = 'F'
TYPE.OTHER1 = 'other1'
TYPE.OTHER2 = 'other2'
VERSION_SEP = ''
DLEN = Constant()
# Number of digits in each assignment class
# TODO: Add a 22 digit version for UUIDs ( 2^128 ~= 62^22 )
DLEN.DATASET = (3, 5, 7, 9)
DLEN.DATASET_CLASSES = dict(
authoritative=DLEN.DATASET[0], # Datasets registered by number authority .
registered=DLEN.DATASET[1], # For registered users of a numbering authority
unregistered=DLEN.DATASET[2], # For unregistered users of a numebring authority
self=DLEN.DATASET[3]) # Self registered
DLEN.PARTITION = 3
DLEN.TABLE = 2
DLEN.COLUMN = 3
DLEN.REVISION = (0, 3)
DLEN.OTHER1 = 4
DLEN.OTHER2 = 4
# Because the dataset number can be 3, 5, 7 or 9 characters,
# And the revision is optional, the datasets ( and thus all
# other objects ) , can have several different lengths. We
# Use these different lengths to determine what kinds of
# fields to parse
# 's'-> short dataset, 'l'->long dataset, 'r' -> has revision
#
# generate with:
# {
# ds_len+rl:(ds_len, (rl if rl != 0 else None), cls)
# for cls, ds_len in self.DLEN.ATASET_CLASSES.items()
# for rl in self.DLEN.REVISION
# }
#
DATASET_LENGTHS = {
3: (3, None, 'authoritative'),
5: (5, None, 'registered'),
6: (3, 3, 'authoritative'),
7: (7, None, 'unregistered'),
8: (5, 3, 'registered'),
9: (9, None, 'self'),
10: (7, 3, 'unregistered'),
12: (9, 3, 'self')}
# Length of the caracters that aren't the dataset and revisions
NDS_LENGTH = {'d': 0,
'p': DLEN.PARTITION,
't': DLEN.TABLE,
'c': DLEN.TABLE + DLEN.COLUMN,
'other1': DLEN.OTHER1,
'other2': DLEN.OTHER1 + DLEN.OTHER2,
'F': DLEN.OTHER1 # Configs
}
TCMAXVAL = 62 ** DLEN.TABLE - 1 # maximum for table values.
CCMAXVAL = 62 ** DLEN.COLUMN - 1 # maximum for column values.
# maximum for table and column values.
PARTMAXVAL = 62 ** DLEN.PARTITION - 1
EPOCH = 1389210331 # About Jan 8, 2014
@classmethod
def parse(cls, on_str, force_type=None): # @ReservedAssignment
"""Parse a string into one of the object number classes."""
on_str_orig = on_str
if on_str is None:
return None
if not on_str:
raise NotObjectNumberError("Got null input")
if not isinstance(on_str, string_types):
raise NotObjectNumberError("Must be a string. Got a {} ".format(type(on_str)))
# if isinstance(on_str, unicode):
# dataset = on_str.encode('ascii')
if force_type:
type_ = force_type
else:
type_ = on_str[0]
on_str = on_str[1:]
if type_ not in list(cls.NDS_LENGTH.keys()):
raise NotObjectNumberError("Unknown type character '{}' for '{}'".format(type_, on_str_orig))
ds_length = len(on_str) - cls.NDS_LENGTH[type_]
if ds_length not in cls.DATASET_LENGTHS:
raise NotObjectNumberError(
"Dataset string '{}' has an unfamiliar length: {}".format(on_str_orig, ds_length))
ds_lengths = cls.DATASET_LENGTHS[ds_length]
assignment_class = ds_lengths[2]
try:
dataset = int(ObjectNumber.base62_decode(on_str[0:ds_lengths[0]]))
if ds_lengths[1]:
i = len(on_str) - ds_lengths[1]
revision = int(ObjectNumber.base62_decode(on_str[i:]))
on_str = on_str[0:i] # remove the revision
else:
revision = None
on_str = on_str[ds_lengths[0]:]
if type_ == cls.TYPE.DATASET:
return DatasetNumber(dataset, revision=revision, assignment_class=assignment_class)
elif type_ == cls.TYPE.TABLE:
table = int(ObjectNumber.base62_decode(on_str))
return TableNumber(
DatasetNumber(dataset, assignment_class=assignment_class), table, revision=revision)
elif type_ == cls.TYPE.PARTITION:
partition = int(ObjectNumber.base62_decode(on_str))
return PartitionNumber(
DatasetNumber(dataset, assignment_class=assignment_class), partition, revision=revision)
elif type_ == cls.TYPE.COLUMN:
table = int(ObjectNumber.base62_decode(on_str[0:cls.DLEN.TABLE]))
column = int(ObjectNumber.base62_decode(on_str[cls.DLEN.TABLE:]))
return ColumnNumber(
TableNumber(DatasetNumber(dataset, assignment_class=assignment_class), table),
column, revision=revision)
elif type_ == cls.TYPE.OTHER1 or type_ == cls.TYPE.CONFIG:
return GeneralNumber1(on_str_orig[0],
DatasetNumber(dataset, assignment_class=assignment_class),
int(ObjectNumber.base62_decode(on_str[0:cls.DLEN.OTHER1])),
revision=revision)
elif type_ == cls.TYPE.OTHER2:
return GeneralNumber2(on_str_orig[0],
DatasetNumber(dataset, assignment_class=assignment_class),
int(ObjectNumber.base62_decode(on_str[0:cls.DLEN.OTHER1])),
int(ObjectNumber.base62_decode(
on_str[cls.DLEN.OTHER1:cls.DLEN.OTHER1+cls.DLEN.OTHER2])),
revision=revision)
else:
raise NotObjectNumberError('Unknown type character: ' + type_ + ' in ' + str(on_str_orig))
except Base62DecodeError as e:
raise NotObjectNumberError('Unknown character: ' + str(e))
@classmethod
def base62_encode(cls, num):
"""Encode a number in Base X.
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
@classmethod
def base62_decode(cls, string):
"""Decode a Base X encoded string into the number.
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
try:
num += alphabet.index(char) * (base ** power)
except ValueError:
raise Base62DecodeError(
"Failed to decode char: '{}'".format(char))
idx += 1
return num
@classmethod
def increment(cls, v):
"""Increment the version number of an object number of object number string"""
if not isinstance(v, ObjectNumber):
v = ObjectNumber.parse(v)
return v.rev(v.revision+1)
def rev(self, i):
"""Return a clone with a different revision."""
on = copy(self)
on.revision = i
return on
def __eq__(self, other):
return str(self) == str(other)
@classmethod
def _rev_str(cls, revision):
if not revision:
return ''
revision = int(revision)
return (
ObjectNumber.base62_encode(revision).rjust(
cls.DLEN.REVISION[1],
'0') if bool(revision) else '')
class TopNumber(ObjectNumber):
"""A general top level number, with a given number space.
Just like a DatasetNumber, with without the 'd'
"""
def __init__(self, space, dataset=None, revision=None, assignment_class='self'):
"""Constructor."""
if len(space) > 1:
raise ValueError("Number space must be a single letter")
self.space = space
self.assignment_class = assignment_class
if dataset is None:
digit_length = self.DLEN.DATASET_CLASSES[self.assignment_class]
# On 64 bit machine, max is about 10^17, 2^53
# That should be random enough to prevent
# collisions for a small number of self assigned numbers
max = 62 ** digit_length
dataset = random.randint(0, max)
self.dataset = dataset
self.revision = revision
@classmethod
def from_hex(cls, h, space, assignment_class='self'):
"""Produce a TopNumber, with a length to match the given assignment
class, based on an input hex string.
This can be used to create TopNumbers from a hash of a string.
"""
from math import log
# Use the ln(N)/ln(base) trick to find the right number of hext digits
# to use
hex_digits = int(
round(log(62 ** TopNumber.DLEN.DATASET_CLASSES[assignment_class]) / log(16), 0))
i = int(h[:hex_digits], 16)
return TopNumber(space, i, assignment_class=assignment_class)
@classmethod
def from_string(cls, s, space):
"""Produce a TopNumber by hashing a string."""
import hashlib
hs = hashlib.sha1(s).hexdigest()
return cls.from_hex(hs, space)
def _ds_str(self):
ds_len = self.DLEN.DATASET_CLASSES[self.assignment_class]
return ObjectNumber.base62_encode(self.dataset).rjust(ds_len, '0')
def __str__(self):
return (self.space + self._ds_str() + ObjectNumber._rev_str(self.revision))
class DatasetNumber(ObjectNumber):
"""An identifier for a dataset."""
def __init__(self, dataset=None, revision=None, assignment_class='self'):
"""Constructor."""
self.assignment_class = assignment_class
if dataset is None:
digit_length = self.DLEN.DATASET_CLASSES[self.assignment_class]
# On 64 bit machine, max is about 10^17, 2^53
# That should be random enough to prevent
# collisions for a small number of self assigned numbers
max = 62 ** digit_length
dataset = random.randint(0, max)
self.dataset = dataset
self.revision = revision
def _ds_str(self):
ds_len = self.DLEN.DATASET_CLASSES[self.assignment_class]
return ObjectNumber.base62_encode(self.dataset).rjust(ds_len, '0')
@property
def as_dataset(self):
return copy(self)
def as_partition(self, partition_number=0):
"""Return a new PartitionNumber based on this DatasetNumber."""
return PartitionNumber(self, partition_number)
def __str__(self):
return (ObjectNumber.TYPE.DATASET + self._ds_str() + ObjectNumber._rev_str(self.revision))
class TableNumber(ObjectNumber):
"""An identifier for a table."""
def __init__(self, dataset, table, revision=None):
if not isinstance(dataset, DatasetNumber):
raise ValueError("Constructor requires a DatasetNumber")
if table > ObjectNumber.TCMAXVAL:
raise ValueError("Table value '{}' is too large".format(table))
self.dataset = dataset
self.table = table
self.revision = revision
if not self.revision and dataset.revision:
self.revision = dataset.revision
@property
def as_table(self):
"""Returns self, so TableNumber and Column number can be used
interchangably."""
return self
@property
def as_dataset(self):
"""Unlike the .dataset property, this will include the revision."""
return self.dataset.rev(self.revision)
def __str__(self):
return (
ObjectNumber.TYPE.TABLE +
self.dataset._ds_str() +
ObjectNumber.base62_encode(self.table).rjust(self.DLEN.TABLE, '0') +
ObjectNumber._rev_str(self.revision))
class ColumnNumber(ObjectNumber):
"""An identifier for a column."""
def __init__(self, table, column, revision=None):
if not isinstance(table, TableNumber):
raise ValueError("Constructor requires a TableNumber. got: " + str(type(table)))
column = int(column)
if column > ObjectNumber.CCMAXVAL:
raise ValueError(
"Value {} is too large ( max is {} ) ".format(
column,
ObjectNumber.TCMAXVAL))
self.table = table
self.column = column
self.revision = revision
if not self.revision and table.revision:
self.revision = table.revision
@property
def dataset(self):
"""Return the dataset number for ths partition."""
return self.table.dataset
@property
def as_dataset(self):
"""Unlike the .dataset property, this will include the revision."""
return self.table.dataset.rev(self.revision)
@property
def as_table(self):
"""Unlike the .dataset property, this will include the revision."""
return self.table.rev(self.revision)
def __str__(self):
return (
ObjectNumber.TYPE.COLUMN +
self.dataset._ds_str() +
ObjectNumber.base62_encode(
self.table.table).rjust(
self.DLEN.TABLE,
'0') +
ObjectNumber.base62_encode(
self.column).rjust(
self.DLEN.COLUMN,
'0') +
ObjectNumber._rev_str(
self.revision))
class PartitionNumber(ObjectNumber):
"""An identifier for a partition."""
def __init__(self, dataset, partition, revision=None):
"""
Arguments:
dataset -- Must be a DatasetNumber
partition -- an integer, from 0 to 62^3
"""
partition = int(partition)
if not isinstance(dataset, DatasetNumber):
raise ValueError("Constructor requires a DatasetNumber. Got '{}' ".format(dataset))
if partition > ObjectNumber.PARTMAXVAL:
raise ValueError("Value is too large. Max is: {}".format(ObjectNumber.PARTMAXVAL))
self.dataset = dataset
self.partition = partition
self.revision = revision
if not self.revision and dataset.revision:
self.revision = dataset.revision
@property
def as_dataset(self):
"""Unlike the .dataset property, this will include the revision."""
return self.dataset.rev(self.revision)
def __str__(self):
return (
ObjectNumber.TYPE.PARTITION +
self.dataset._ds_str() +
ObjectNumber.base62_encode(self.partition).rjust(self.DLEN.PARTITION, '0') +
ObjectNumber._rev_str(self.revision))
class GeneralNumber1(ObjectNumber):
"""Other types of number. Can have any type code, and 4 digits of number, directly
descended from the dataset"""
def __init__(self, type_code, dataset, num, revision=None):
if isinstance(dataset, string_types):
dataset = ObjectNumber.parse(dataset).as_dataset
try:
dataset = dataset.as_dataset
except AttributeError:
raise ValueError(
'Constructor requires a DatasetNumber or ObjectNumber that converts to a DatasetNumber')
self.type_code = type_code
self.dataset = dataset
self.number = num
self.revision = revision
if not self.revision and dataset.revision:
self.revision = dataset.revision
@property
def as_dataset(self):
"""Unlike the .dataset property, this will include the revision."""
return self.dataset.rev(self.revision)
def __str__(self):
return (
self.type_code +
self.dataset._ds_str() +
ObjectNumber.base62_encode(self.number).rjust(self.DLEN.OTHER1, '0') +
ObjectNumber._rev_str(self.revision))
class GeneralNumber2(ObjectNumber):
"""Like General Number 2, but has a second level"""
def __init__(self, type_code, dataset, num1, num2, revision=None):
if isinstance(dataset, string_types):
dataset = ObjectNumber.parse(dataset).as_dataset
try:
dataset = dataset.as_dataset
except AttributeError:
raise ValueError(
'Constructor requires a DatasetNumber or ObjectNumber that converts to a DatasetNumber')
self.type_code = type_code
self.dataset = dataset
self.num1 = num1
self.num2 = num2
self.revision = revision
if not self.revision and dataset.revision:
self.revision = dataset.revision
@property
def as_dataset(self):
"""Unlike the .dataset property, this will include the revision."""
return self.dataset.rev(self.revision)
def __str__(self):
return (
self.type_code +
self.dataset._ds_str() +
ObjectNumber.base62_encode(self.num1).rjust(self.DLEN.OTHER1, '0') +
ObjectNumber.base62_encode(self.num2).rjust(self.DLEN.OTHER2, '0') +
ObjectNumber._rev_str(self.revision))
class Identity(object):
"""Identities represent the defining set of information about a bundle or a
partition.
Only the vid is actually required to uniquely identify a bundle or
partition, but the identity is also used for generating unique names
and for finding bundles and partitions.
"""
is_bundle = True
is_partition = False
OBJECT_NUMBER_SEP = '~'
_name_class = Name
_on = None
_name = None
# Extra data for the library and remotes
locations = None
partitions = None
files = None
urls = None # Url dict, from a remote library.
url = None # Url of remote where object should be retrieved
# A bundle if it is created during the identity listing process.
bundle = None
# Path to bundle in file system. Set in SourceTreeLibrary.list()
bundle_path = None
# Build state of the bundle. Set in SourceTreeLibrary.list()
bundle_state = None
# State of the git repository. Set in SourceTreeLibrary.list()
git_state = None
md5 = None
data = None # Catch-all for other information
def __init__(self, name, object_number):
assert isinstance(name, self._name_class), 'Wrong type: {}. Expected {}'\
.format(type(name), self._name_class)
self._on = object_number
self._name = name
if not self._name.type_is_compatible(self._on):
raise TypeError('The name and the object number must be ' +
'of compatible types: got {} and {}'
.format(type(name), type(object_number)))
# Update the patch number to always be the revision
nv = sv.Version(self._name.version)
nv.patch = int(self._on.revision)
self._name.version = str(nv)
self.data = {}
self.is_valid()
@classmethod
def from_dict(cls, d):
assert isinstance(d, dict)
if 'id' in d and d['id'] and 'revision' in d:
# The vid should be constructed from the id and the revision
if not d['id']:
raise ValueError(" 'id' key doesn't have a value in {} ".format(d))
ono = ObjectNumber.parse(d['id'])
if not ono:
raise ValueError("Failed to parse '{}' as an ObjectNumber ".format(d['id']))
on = ono.rev(d['revision'])
elif 'vid' in d and d['vid']:
on = ObjectNumber.parse(d['vid'])
if not on:
raise ValueError("Failed to parse '{}' as an ObjectNumber ".format(d['vid']))
else:
raise ValueError("Must have id and revision, or vid. Got neither from {}".format(d))
if isinstance(on, DatasetNumber):
try:
name = cls._name_class(**d)
ident = cls(name, on)
except TypeError as e:
raise TypeError("Failed to make identity from \n{}\n: {}".format(d, e.message))
elif isinstance(on, PartitionNumber):
ident = PartitionIdentity.from_dict(d)
else:
raise TypeError(
"Can't make identity from {}; object number is wrong type: {}".format(d, type(on)))
if 'md5' in d:
ident.md5 = d['md5']
return ident
@classmethod
def classify(cls, o):
"""Break an Identity name into parts, or describe the type of other
forms.
Break a name or object number into parts and classify them. Returns a named tuple
that indicates which parts of input string are name components, object number and
version number. Does not completely parse the name components.
Also can handle Name, Identity and ObjectNumbers
:param o: Input object to split
"""
# from collections import namedtuple
s = str(o)
if o is None:
raise ValueError("Input cannot be None")
class IdentityParts(object):
on = None
name = None
isa = None
name = None
vname = None
sname = None
name_parts = None
version = None
cache_key = None
# namedtuple('IdentityParts', ['isa', 'name', 'name_parts','on','version', 'vspec'])
ip = IdentityParts()
if isinstance(o, (DatasetNumber, PartitionNumber)):
ip.on = o
ip.name = None
ip.isa = type(ip.on)
ip.name_parts = None
elif isinstance(o, Name):
ip.on = None
ip.isa = type(o)
ip.name = str(o)
ip.name_parts = ip.name.split(Name.NAME_PART_SEP)
elif '/' in s:
# A cache key
ip.cache_key = s.strip()
ip.isa = str
elif cls.OBJECT_NUMBER_SEP in s:
# Must be a fqname
ip.name, on_s = s.strip().split(cls.OBJECT_NUMBER_SEP)
ip.on = ObjectNumber.parse(on_s)
ip.name_parts = ip.name.split(Name.NAME_PART_SEP)
ip.isa = type(ip.on)
elif Name.NAME_PART_SEP in s:
# Must be an sname or vname
ip.name = s
ip.on = None
ip.name_parts = ip.name.split(Name.NAME_PART_SEP)
ip.isa = Name
else:
# Probably an Object Number in string form
ip.name = None
ip.name_parts = None
ip.on = ObjectNumber.parse(s.strip())
ip.isa = type(ip.on)
if ip.name_parts:
last = ip.name_parts[-1]
try:
ip.version = sv.Version(last)
ip.vname = ip.name
except ValueError:
try:
ip.version = sv.Spec(last)
ip.vname = None # Specs aren't vnames you can query
except ValueError:
pass
if ip.version:
ip.name_parts.pop()
ip.sname = Name.NAME_PART_SEP.join(ip.name_parts)
else:
ip.sname = ip.name
return ip
def to_meta(self, md5=None, file=None):
"""Return a dictionary of metadata, for use in the Remote api."""
# from collections import OrderedDict
if not md5:
if not file:
raise ValueError('Must specify either file or md5')
md5 = md5_for_file(file)
size = os.stat(file).st_size
else:
size = None
return {
'id': self.id_,
'identity': json.dumps(self.dict),
'name': self.sname,
'fqname': self.fqname,
'md5': md5,
# This causes errors with calculating the AWS signature
'size': size
}
def add_md5(self, md5=None, file=None):
# import json
if not md5:
if not file:
raise ValueError("Must specify either file or md5")
md5 = md5_for_file(file)
self.md5 = md5
return self
#
# Naming, paths and cache_keys
#
def is_valid(self):
self._name.is_valid()
@property
def on(self):
"""Return the object number obect."""
return self._on
@property
def id_(self):
"""String version of the object number, without a revision."""
return str(self._on.rev(None))
@property
def vid(self):
"""String version of the object number."""
return str(self._on)
@property
def name(self):
"""The name object."""
return self._name
@property
def sname(self):
"""The name of the bundle, as a string, excluding the revision."""
return str(self._name)
@property
def vname(self):
""""""
return self._name.vname # Obsoleted by __getattr__??
@property
def fqname(self):
"""The fully qualified name, the versioned name and the vid.
This is the same as str(self)
"""
return str(self)
@property
def path(self):
"""The path of the bundle source.
Includes the revision.
"""
self.is_valid()
return self._name.path
@property
def source_path(self):
"""The path of the bundle source.
Includes the revision.
"""
self.is_valid()
return self._name.source_path
# Call other values on the name
def __getattr__(self, name):
if hasattr(self._name, name):
return getattr(self._name, name)
else:
raise AttributeError( 'Identity does not have attribute {} '.format(name))
@property
def cache_key(self):
"""The name in a form suitable for use as a cache-key"""
self.is_valid()
return self._name.cache_key
@property
def dict(self):
d = self._name.dict
d['vid'] = str(self._on)
d['id'] = str(self._on.rev(None))
d['revision'] = int(self._on.revision)
d['cache_key'] = self.cache_key
if self.md5:
d['md5'] = self.md5
return d
@property
def names_dict(self):
"""A dictionary with only the generated names, name, vname and fqname."""
INCLUDE_KEYS = ['name', 'vname', 'vid']
d = {k: v for k, v in iteritems(self.dict) if k in INCLUDE_KEYS}
d['fqname'] = self.fqname
return d
@property
def ident_dict(self):
"""A dictionary with only the items required to specify the identy,
excluding the generated names, name, vname and fqname."""
SKIP_KEYS = ['name','vname','fqname','vid','cache_key']
return {k: v for k, v in iteritems(self.dict) if k not in SKIP_KEYS}
@staticmethod
def _compose_fqname(vname, vid):
assert vid is not None
assert vname is not None
return vname + Identity.OBJECT_NUMBER_SEP + vid
def as_partition(self, partition=0, **kwargs):
"""Return a new PartitionIdentity based on this Identity.
:param partition: Integer partition number for PartitionObjectNumber
:param kwargs:
"""
assert isinstance(self._name, Name), "Wrong type: {}".format(type(self._name))
assert isinstance(self._on, DatasetNumber), "Wrong type: {}".format(type(self._on))
name = self._name.as_partition(**kwargs)
on = self._on.as_partition(partition)
return PartitionIdentity(name, on)
def add_partition(self, p):
"""Add a partition identity as a child of a dataset identity."""
if not self.partitions:
self.partitions = {}
self.partitions[p.vid] = p
def add_file(self, f):
"""Add a partition identity as a child of a dataset identity."""
if not self.files:
self.files = set()
self.files.add(f)
self.locations.set(f.type_)
@property
def partition(self):
"""Convenience function for accessing the first partition in the
partitions list, when there is only one."""
if not self.partitions:
return None
if len(self.partitions) > 1:
raise ValueError(
"Can't use this method when there is more than one partition")
return list(self.partitions.values())[0]
def rev(self, rev):
"""Return a new identity with the given revision"""
d = self.dict
d['revision'] = rev
return self.from_dict(d)
def __str__(self):
return self._compose_fqname(self._name.vname, self.vid)
def _info(self):
"""Returns an OrderedDict of information, for human display."""
d = OrderedDict()
d['vid'] = self.vid
d['sname'] = self.sname
d['vname'] = self.vname
return d
def __hash__(self):
return hash(str(self))
class PartitionIdentity(Identity):
"""Subclass of Identity for partitions."""
is_bundle = False
is_partition = True
_name_class = PartitionName
def is_valid(self):
self._name.is_valid()
if self._name.format:
assert self.format_name() == self._name.format_name(), "Got format '{}', expected '{}'".format(
self._name.format_name(), self.format_name)
@classmethod
def from_dict(cls, d):
"""Like Identity.from_dict, but will cast the class type based on the
format. i.e. if the format is hdf, return an HdfPartitionIdentity.
:param d:
:return:
"""
name = PartitionIdentity._name_class(**d)
if 'id' in d and 'revision' in d:
# The vid should be constructed from the id and the revision
on = (ObjectNumber.parse(d['id']).rev(d['revision']))
elif 'vid' in d:
on = ObjectNumber.parse(d['vid'])
else:
raise ValueError("Must have id and revision, or vid")
try:
return PartitionIdentity(name, on)
except TypeError as e:
raise TypeError(
"Failed to make identity from \n{}\n: {}".format(
d,
e.message))
@property
def table(self):
return self._name.table
def as_dataset(self):
"""Convert this identity to the identity of the corresponding
dataset."""
on = self.on.dataset
on.revision = self.on.revision
name = Name(**self.name.dict)
return Identity(name, on)
def as_partition(self, partition=0, **kwargs):
raise NotImplementedError(
"Can't generated a PartitionIdentity from a PartitionIdentity")
@property
def sub_path(self):
"""The portion of the path excluding the bundle path."""
self.is_valid()
return self._name.sub_path
@classmethod
def format_name(cls):
return cls._name_class.FORMAT
@classmethod
def extension(cls):
return cls._name_class.PATH_EXTENSION
class NumberServer(object):
def __init__(self, host='numbers.ambry.io', port='80', key=None, **kwargs):
"""
:param host:
:param port:
:param key: Key to set the assignment class. The number servers redis server mush have the
key value set to the assignment class, such as:
set assignment_class:<key> authoritative
Two values are supported, "authoritative" and "registered". If neither value is set, the
assignment class is "unregistered"
:param kwargs: No used; sucks up other parameters that may be in the configuration when the
object is constructed with the config, as in NumberServer(**get_runconfig().group('numbers'))
"""
self.host = host
self.port = port
self.key = key
self.port_str = ':' + str(port) if port else ''
self.last_response = None
self.next_time = None
def __next__(self):
if self.key:
params = dict(access_key=self.key)
else:
params = dict()
url = 'http://{}{}/next'.format(self.host, self.port_str)
r = requests.get(url, params=params)
r.raise_for_status()
d = r.json()
self.last_response = d
self.next_time = time.time() + self.last_response.get('wait', 0)
return ObjectNumber.parse(d['number'])
def find(self, name):
if self.key:
params = dict(access_key=self.key)
else:
params = dict()
r = requests.get(
'http://{}{}/find/{}'.format(self.host, self.port_str, name), params=params)
r.raise_for_status()
d = r.json()
self.last_response = d
try:
self.next_time = time.time() + self.last_response['wait']
except TypeError:
pass # wait time is None, can be added to time.
return ObjectNumber.parse(d['number'])
def sleep(self):
"""Wait for the sleep time of the last response, to avoid being rate
limited."""
if self.next_time and time.time() < self.next_time:
time.sleep(self.next_time - time.time())
# port to python2
NumberServer.next = NumberServer.__next__
|
Match report A game inevitably curtailed by rain ended in a draw with Brentwood claiming 9 points and Gidea Park claiming 9.
In a stop start innings Brentwood were 36-2 with Matt Sutton claiming both the opening batsmen caught in the slips.A stand of 64 between Ayres and Damien Brandy helped Brentwood to start to take control. Andy Berry then came into the attack and immediately bowled Ayres for 30. Brandy showed his class and power with a masterful 65 before falling also to Berry well caught in the deep by Toogood. Chris Sains was run out for 18 after some sharp work by Chris Swainland. Berry claimed his 3rd wicket having Muwas caught by Filmalter for 27. A dropped catch from Sutherland was to be expensive as he scored a rapid 57 to accelerate Brentwood to 241-7 from their alloted 51 overs.
Park could not have had a worse start with Shah being run out for 0 unfortunately without facing a ball. Swainland tried to make amends for the early loss with a fine partnership with Churchill of 74 in quick time and some fine shots by both. In stepped the Brentwood spin twins. Belchamber removed Swainland for 36. Churchill and Hyam continued Parks aggressive attempt to reduce the run deficit. Hyam was caught for 16 off impressive Sutherland and with Churchill falling shortly after for a fine 47 again off Sutherland Park were reduced to 102-3. With the rain closing in Park attempted to claim what points were available in what was obviously going to be abandoned. Filmalter came and went for 17 and with the innings closing with Collard on 14 not out and Jason Toogood 4 not out the rain arrived with Park on 143-5 from 32 overs.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.