text
stringlengths 29
850k
|
|---|
import requests
class Riot:
api_key = None
region = None
valid_regions = None
summoner_version = 'v1.4'
base_api_url = 'https://{0}.api.pvp.net'
base_summoner_suffix = '/api/lol/{0}/{1}/summoner'
base_summoner_url = None
MAX_ID_LIST = 40
def check_region(self):
if not self.region or self.region not in self.valid_regions:
raise Exception('You need to provide a valid region for this call.')
def init_valid_regions(self):
self.valid_regions = ['br', 'eune', 'euw', 'kr', 'lan', \
'las', 'na', 'oce', 'ru', 'tr' \
]
def init_base_url(self):
self.check_region()
self.base_api_url = self.base_api_url.format(self.region)
def init_summoner_url(self):
if self.base_summoner_url:
return True
self.check_region()
base_summoner_suffix = self.base_summoner_suffix.format(self.region \
,self.summoner_version \
)
self.base_summoner_url = self.base_api_url + base_summoner_suffix
def get_api_key_query_string(self):
return '?api_key={0}'.format(self.api_key)
def set_region(self, region):
self.region = self.standardize_name(region)
def __init__(self, api_key, region=None):
self.api_key = api_key
self.set_region(region)
self.init_valid_regions()
self.init_base_url()
def standardize_name(self, name):
if not name or not isinstance(name, str):
return False
return name.replace(' ', '').lower()
def parse_name_list(self, names):
if not names:
return False
if isinstance(names, list):
names = ','.join(names)
return self.standardize_name(names)
def parse_id_list(self, ids):
if not ids:
return False
exceeded_exception = Exception('You are querying the server for more than ' \
+ str(self.MAX_ID_LIST) + 'names.')
if isinstance(ids, list):
if len(ids) > self.MAX_ID_LIST:
raise exceeded_exception
ids = [str(_id) for _id in ids]
return ','.join(ids)
elif isinstance(ids, str):
if ids.count(',') > (self.MAX_ID_LIST - 1):
raise exceeded_exception
return ids.replace(' ', '')
def get_summoner_by_name(self, names):
self.init_summoner_url()
names = self.parse_name_list(names)
if not names:
raise Exception('Riot: No name provided.')
url = self.base_summoner_url + '/by-name'
url += '/' + names + self.get_api_key_query_string()
return requests.get(url).text
def user_exists_by_name(self, name):
result = self.get_summoner_by_name(name)
if result.find('HTTP ERROR 404') == -1:
return True
else:
return False
def get_summoner_by_id(self, ids):
self.init_summoner_url()
ids = self.parse_id_list(ids)
if not ids:
raise Exception('Id list provided not valid.')
url = self.base_summoner_url
url += '/' + ids + self.get_api_key_query_string()
return requests.get(url).text
|
Box Size: 14.5" x 11" x 35"
Dragon’s announcement of a new Space Collection item at the Tokyo Hobby Show created a huge buzz! The cause was a 1/72 scale replica of a Saturn V rocket. We can’t say it’s a miniature model, for it’s absolutely enormous even in 1/72 scale! Indeed, the fully built-up model stands 1.5m tall.
As the longest, heaviest and most powerful rocket ever produced, the original Saturn V stood 110.6m high and had a diameter of 10.1m. The Saturn V was at the heart of NASA’s Apollo and Skylab programs between 1967 and 1973. It was able to launch a 45-tonne payload into space beyond Low Earth Orbit. A total of 13 Saturn V launches took place, every one of them a success, and they safely delivered 24 astronauts to the Moon.
As impressive as the real Saturn rocket system is, viewers of this Space Collection item from Dragon will cause jaws to drop and draw gasps of astonishment! While the model makes use of the Command/Service Module (CSM) and Launch Escape System produced earlier, the rest of this monstrous 1/72 scale rocket comes from brand new toolings. All the relevant detail is carefully reproduced on the three rocket stages, and the completed model comes with accurate painting and markings. The Saturn V is most suitable for display at home as a centerpiece of any space fan’s collection. It comes with a stable circular base to allow it to be freestanding on the floor. Impress and be impressed with this fantastic model!
|
#!/usr/bin/env python
import gzip
import json
import re
import urllib2
import psycopg2
import time
from StringIO import StringIO
from multiprocessing.pool import ThreadPool
from itertools import islice
from functools import wraps
# See LICENSE.md in /scripts/python folder
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
@retry(urllib2.URLError, tries=4, delay=3, backoff=2)
def urlopen_with_retry(request):
return urllib2.urlopen(request)
def partition(data, SIZE=100):
it = iter(data)
for i in xrange(0, len(data), SIZE):
yield dict((k, data[k]) for k in islice(it, SIZE))
def getSolarSystemIdNameDict():
result = {}
cur.execute('SELECT * FROM "mapSolarSystems"')
solarSystems = cur.fetchall()
for solarSystem in solarSystems:
result[solarSystem[2]] = solarSystem[3]
return result
def getCharacterIdNameDict():
result = {}
cur.execute('SELECT DISTINCT characterid, character FROM "zwbAggregateChar"')
characters = cur.fetchall()
for character in characters:
result[character[0]] = character[1]
return result
def getCorporationIdNameDict():
result = {}
cur.execute('SELECT DISTINCT corporationid, corporation FROM "zwbAggregateCorp"')
corporations = cur.fetchall()
for corporation in corporations:
result[corporation[0]] = corporation[1]
return result
def getCharacterNameESI(characterId):
request = urllib2.Request("https://esi.evetech.net/latest/universe/names/?datasource=tranquility")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
query = "[" + str(characterId) + "]"
response = urllib2.urlopen(url=request, data=query)
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
return json.loads(data)[0]["name"]
def getCorporationNameESI(corporationId):
request = urllib2.Request("https://esi.evetech.net/latest/universe/names/?datasource=tranquility")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
query = "[" + str(corporationId) + "]"
response = urllib2.urlopen(url=request, data=query)
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
return json.loads(data)[0]["name"]
def getCharacterNameForId(charId):
characterId = int(charId)
if characterId not in dictCharacterIdName:
dictCharacterIdName[characterId] = getCharacterNameESI(characterId)
return dictCharacterIdName[characterId]
def getCorporationNameForId(corpId):
corporationId = int(corpId)
if corporationId not in dictCorporationIdName:
dictCorporationIdName[corporationId] = getCorporationNameESI(corporationId)
return dictCorporationIdName[corporationId]
def getKillmailHashes(date):
request = urllib2.Request("https://zkillboard.com/api/history/" + date + "/")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
request.add_header("User-Agent", "http://fetox-developments.com/wormboard/ Maintainer: fetox74 EMail: odittrich@gmx.de")
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError as err:
print err
return []
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
return json.loads(data)
def getESI(tupleIdHash):
request = urllib2.Request("https://esi.tech.ccp.is/latest/killmails/" + tupleIdHash[0] + "/" + tupleIdHash[1] + "/?datasource=tranquility")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
try:
response = urlopen_with_retry(request)
except urllib2.HTTPError as err:
print err
return []
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
return json.loads(data)
def getZKB(id, solarSystemId):
if id in mapIdKillmail:
return mapIdKillmail[id]
# todo: this should actually only be done if the solar system has not been read for the current date already (add set of solarsystemid's, make sure to reset for next date)
for page in range(1, 11):
request = urllib2.Request(
"https://zkillboard.com/api/no-items/no-attackers/solarSystemID/" + str(solarSystemId) + "/startTime/" + str(date) + "0000/endTime/" + str(date) +
"2400/page/" + str(page) + "/")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
request.add_header("User-Agent", "http://fetox-developments.com/wormboard/ Maintainer: fetox74 EMail: odittrich@gmx.de")
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError as err:
print err
return None
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
killmails = json.loads(data)
if len(killmails) > 0:
for killmail in killmails:
mapIdKillmail[killmail["killmail_id"]] = killmail["zkb"]
else:
break
if id in mapIdKillmail:
return mapIdKillmail[id]
else:
return getSingleKillmailZKB(id)
def getSingleKillmailZKB(id):
request = urllib2.Request("https://zkillboard.com/api/no-items/no-attackers/killID/" + str(id) + "/")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
request.add_header("User-Agent", "http://fetox-developments.com/wormboard/ Maintainer: fetox74 EMail: odittrich@gmx.de")
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError as err:
print err.headers
return None
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
result = json.loads(data)
if len(result) > 0:
return result[0]["zkb"]
else:
return None
def getFinalHitCorpAndUpdateAttackers(attackers, value):
for attacker in attackers:
if "character_id" in attacker:
characterid = attacker["character_id"]
character = getCharacterNameForId(characterid)
updateCharacter(characterid, character, 1, 0, value, 0.0)
for attacker in attackers:
if attacker["final_blow"]:
if "corporation_id" in attacker:
corporationid = attacker["corporation_id"]
return corporationid, getCorporationNameForId(corporationid)
else:
return -1, ""
def getIskLossForCorp(corporationid):
if corporationid in lossDict:
return lossDict[corporationid]["isklost"]
else:
return 0.0
def getLossesForCorp(corporationid):
if corporationid in lossDict:
return lossDict[corporationid]["losses"]
else:
return 0
def getAttackersOfCorp(attackers, corporationid):
result = set()
for attacker in attackers:
if "corporation_id" in attacker and attacker["corporation_id"] == corporationid and "character_id" in attacker:
result.add(attacker["character_id"])
return result
def addNumberToHourDict(datetimestring, dict, number):
hour = datetimestring[11:13]
dict[hour] = dict[hour] + number
def createHourDict():
result = {}
for i in range(24):
result[str(i).zfill(2)] = 0
return result
def updateCharacter(characterid, character, kills, losses, iskwon, isklost):
if characterid in characterDict:
characterDict[characterid]["kills"] = characterDict[characterid]["kills"] + kills
characterDict[characterid]["losses"] = characterDict[characterid]["losses"] + losses
characterDict[characterid]["iskwon"] = characterDict[characterid]["iskwon"] + iskwon
characterDict[characterid]["isklost"] = characterDict[characterid]["isklost"] + isklost
else:
characterDict[characterid] = {"character": character, "kills": kills, "losses": losses, "iskwon": iskwon, "isklost": isklost}
def updateDictionaries(killmailESI, killmailZKB):
if killmailZKB:
finalHitCorpId, finalHitCorp = getFinalHitCorpAndUpdateAttackers(killmailESI["attackers"], killmailZKB["totalValue"])
victimCorpId = killmailESI["victim"]["corporation_id"]
victimCorp = getCorporationNameForId(victimCorpId)
if "character_id" in killmailESI["victim"]:
characterid = killmailESI["victim"]["character_id"]
character = getCharacterNameForId(characterid)
updateCharacter(characterid, character, 0, 1, 0.0, killmailZKB["totalValue"])
if finalHitCorpId != -1:
attackersOfFinalHitCorp = getAttackersOfCorp(killmailESI["attackers"], finalHitCorpId)
if finalHitCorpId in masterDict:
masterDict[finalHitCorpId]["kills"] = masterDict[finalHitCorpId]["kills"] + 1
masterDict[finalHitCorpId]["iskwon"] = masterDict[finalHitCorpId]["iskwon"] + killmailZKB["totalValue"]
masterDict[finalHitCorpId]["active"] = masterDict[finalHitCorpId]["active"] | attackersOfFinalHitCorp
masterDict[finalHitCorpId]["sumonkills"] = masterDict[finalHitCorpId]["sumonkills"] + len(attackersOfFinalHitCorp)
else:
masterDict[finalHitCorpId] = {"corporation": finalHitCorp, "kills": 1, "iskwon": killmailZKB["totalValue"], "active": attackersOfFinalHitCorp,
"sumonkills": len(attackersOfFinalHitCorp), "killsinhour": createHourDict(), "sumonkillsinhour": createHourDict()}
addNumberToHourDict(killmailESI["killmail_time"], masterDict[finalHitCorpId]["killsinhour"], 1)
addNumberToHourDict(killmailESI["killmail_time"], masterDict[finalHitCorpId]["sumonkillsinhour"], len(attackersOfFinalHitCorp))
if victimCorpId not in masterDict:
masterDict[victimCorpId] = {"corporation": victimCorp, "kills": 0, "iskwon": 0.0, "active": set(),
"sumonkills": 0, "killsinhour": createHourDict(), "sumonkillsinhour": createHourDict()}
if victimCorpId in lossDict:
lossDict[victimCorpId]["losses"] = lossDict[victimCorpId]["losses"] + 1
lossDict[victimCorpId]["isklost"] = lossDict[victimCorpId]["isklost"] + killmailZKB["totalValue"]
else:
lossDict[victimCorpId] = {"losses": 1, "isklost": killmailZKB["totalValue"]}
else:
print "kill id " + str(killmailESI["killmail_id"]) + " seems not to exist on zKillboard.."
def queryAggregateAlreadyInDB(cur, date, corp):
cur.execute('SELECT * FROM "zwbAggregateCorp" WHERE "date" = ' + date + ' AND "corporation" = ' "'" + corp + "'")
if len(cur.fetchall()) > 0:
return True
else:
return False
def updateDB(cur, date):
cur.execute('DELETE FROM "zwbAggregateCorp" WHERE "date" = %i' % int(date))
cur.execute('DELETE FROM "zwbAggregateChar" WHERE "date" = %i' % int(date))
for key, value in masterDict.items():
cur.execute(
'''INSERT INTO "zwbAggregateCorp" ("date", "corporationid", "corporation", "kills", "losses", "iskwon", "isklost", "active", "numactive", "sumonkills",
"killsinhour00", "killsinhour01", "killsinhour02", "killsinhour03", "killsinhour04", "killsinhour05", "killsinhour06", "killsinhour07",
"killsinhour08", "killsinhour09", "killsinhour10", "killsinhour11", "killsinhour12", "killsinhour13", "killsinhour14", "killsinhour15",
"killsinhour16", "killsinhour17", "killsinhour18", "killsinhour19", "killsinhour20", "killsinhour21", "killsinhour22", "killsinhour23",
"sumonkillsinhour00", "sumonkillsinhour01", "sumonkillsinhour02", "sumonkillsinhour03", "sumonkillsinhour04", "sumonkillsinhour05",
"sumonkillsinhour06", "sumonkillsinhour07", "sumonkillsinhour08", "sumonkillsinhour09", "sumonkillsinhour10", "sumonkillsinhour11",
"sumonkillsinhour12", "sumonkillsinhour13", "sumonkillsinhour14", "sumonkillsinhour15", "sumonkillsinhour16", "sumonkillsinhour17",
"sumonkillsinhour18", "sumonkillsinhour19", "sumonkillsinhour20", "sumonkillsinhour21", "sumonkillsinhour22", "sumonkillsinhour23")
VALUES (%i, %i, %s, %i, %i, %f, %f, %s, %i, %i,
%i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i,
%i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i)''' % (
int(date),
key,
"'" + value["corporation"].replace("'", "''") + "'",
value["kills"],
getLossesForCorp(key),
value["iskwon"],
getIskLossForCorp(key),
"'" + ",".join(map(str, value["active"])) + "'",
len(value["active"]),
value["sumonkills"],
value["killsinhour"]["00"],
value["killsinhour"]["01"],
value["killsinhour"]["02"],
value["killsinhour"]["03"],
value["killsinhour"]["04"],
value["killsinhour"]["05"],
value["killsinhour"]["06"],
value["killsinhour"]["07"],
value["killsinhour"]["08"],
value["killsinhour"]["09"],
value["killsinhour"]["10"],
value["killsinhour"]["11"],
value["killsinhour"]["12"],
value["killsinhour"]["13"],
value["killsinhour"]["14"],
value["killsinhour"]["15"],
value["killsinhour"]["16"],
value["killsinhour"]["17"],
value["killsinhour"]["18"],
value["killsinhour"]["19"],
value["killsinhour"]["20"],
value["killsinhour"]["21"],
value["killsinhour"]["22"],
value["killsinhour"]["23"],
value["sumonkillsinhour"]["00"],
value["sumonkillsinhour"]["01"],
value["sumonkillsinhour"]["02"],
value["sumonkillsinhour"]["03"],
value["sumonkillsinhour"]["04"],
value["sumonkillsinhour"]["05"],
value["sumonkillsinhour"]["06"],
value["sumonkillsinhour"]["07"],
value["sumonkillsinhour"]["08"],
value["sumonkillsinhour"]["09"],
value["sumonkillsinhour"]["10"],
value["sumonkillsinhour"]["11"],
value["sumonkillsinhour"]["12"],
value["sumonkillsinhour"]["13"],
value["sumonkillsinhour"]["14"],
value["sumonkillsinhour"]["15"],
value["sumonkillsinhour"]["16"],
value["sumonkillsinhour"]["17"],
value["sumonkillsinhour"]["18"],
value["sumonkillsinhour"]["19"],
value["sumonkillsinhour"]["20"],
value["sumonkillsinhour"]["21"],
value["sumonkillsinhour"]["22"],
value["sumonkillsinhour"]["23"]))
for key, value in characterDict.items():
cur.execute(
'INSERT INTO "zwbAggregateChar" ("date", "characterid", "character", "kills", "losses", "iskwon", "isklost") VALUES (%i, %i, %s, %i, %i, %f, %f)' %
(int(date), key, "'" + value["character"].replace("'", "''") + "'", value["kills"], value["losses"], value["iskwon"], value["isklost"]))
conn.commit()
DATES = ["20180101", "20180102", "20180103", "20180104", "20180105", "20180106", "20180107", "20180108", "20180109", "20180110", "20180111", "20180112", "20180113", "20180114", "20180115", "20180116", "20180117", "20180118", "20180119", "20180120", "20180121", "20180122", "20180123", "20180124", "20180125", "20180126", "20180127", "20180128", "20180129", "20180130", "20180131",
"20180201", "20180202", "20180203", "20180204", "20180205", "20180206", "20180207", "20180208", "20180209", "20180210", "20180211", "20180212", "20180213", "20180214", "20180215", "20180216", "20180217", "20180218", "20180219", "20180220", "20180221", "20180222", "20180223", "20180224", "20180225", "20180226", "20180227", "20180228",
"20180301", "20180302", "20180303", "20180304", "20180305", "20180306", "20180307", "20180308", "20180309", "20180310", "20180311", "20180312", "20180313", "20180314", "20180315", "20180316", "20180317", "20180318", "20180319", "20180320", "20180321", "20180322", "20180323", "20180324", "20180325", "20180326", "20180327", "20180328", "20180329", "20180330", "20180331",
"20180401", "20180402", "20180403", "20180404", "20180405", "20180406", "20180407", "20180408", "20180409", "20180410", "20180411", "20180412", "20180413", "20180414", "20180415", "20180416", "20180417", "20180418", "20180419", "20180420", "20180421", "20180422", "20180423", "20180424", "20180425", "20180426", "20180427", "20180428", "20180429", "20180430",
"20180501", "20180502", "20180503", "20180504", "20180505", "20180506", "20180507", "20180508", "20180509", "20180510", "20180511", "20180512", "20180513", "20180514", "20180515", "20180516", "20180517", "20180518", "20180519", "20180520", "20180521", "20180522", "20180523", "20180524", "20180525", "20180526", "20180527", "20180528", "20180529", "20180530", "20180531",
"20180601", "20180602", "20180603", "20180604", "20180605", "20180606", "20180607", "20180608", "20180609", "20180610", "20180611", "20180612", "20180613", "20180614", "20180615", "20180616", "20180617", "20180618", "20180619", "20180620", "20180621", "20180622", "20180623", "20180624", "20180625", "20180626", "20180627", "20180628", "20180629", "20180630",
"20180701", "20180702", "20180703", "20180704", "20180705", "20180706", "20180707", "20180708", "20180709", "20180710", "20180711", "20180712", "20180713", "20180714", "20180715", "20180716", "20180717", "20180718", "20180719", "20180720", "20180721", "20180722", "20180723", "20180724", "20180725", "20180726", "20180727", "20180728", "20180729", "20180730", "20180731",
"20180801", "20180802", "20180803", "20180804", "20180805", "20180806", "20180807", "20180808", "20180809", "20180810", "20180811", "20180812", "20180813", "20180814", "20180815", "20180816", "20180817", "20180818", "20180819", "20180820", "20180821", "20180822", "20180823", "20180824", "20180825", "20180826", "20180827", "20180828", "20180829", "20180830", "20180831",
"20180901", "20180902", "20180903", "20180904", "20180905", "20180906", "20180907", "20180908", "20180909", "20180910", "20180911", "20180912", "20180913", "20180914", "20180915", "20180916", "20180917", "20180918", "20180919", "20180920", "20180921", "20180922", "20180923", "20180924", "20180925", "20180926", "20180927", "20180928", "20180929", "20180930",
"20181001", "20181002", "20181003", "20181004", "20181005", "20181006", "20181007", "20181008", "20181009", "20181010", "20181011", "20181012", "20181013", "20181014", "20181015", "20181016", "20181017", "20181018", "20181019", "20181020", "20181021", "20181022", "20181023", "20181024", "20181025", "20181026", "20181027", "20181028", "20181029", "20181030", "20181031",
"20181101", "20181102", "20181103", "20181104", "20181105", "20181106", "20181107", "20181108", "20181109", "20181110", "20181111", "20181112", "20181113", "20181114", "20181115", "20181116", "20181117", "20181118", "20181119", "20181120", "20181121", "20181122", "20181123", "20181124", "20181125", "20181126", "20181127", "20181128", "20181129", "20181130",
"20181201", "20181202", "20181203", "20181204", "20181205", "20181206", "20181207", "20181208", "20181209", "20181210", "20181211", "20181212", "20181213", "20181214", "20181215", "20181216", "20181217", "20181218", "20181219", "20181220", "20181221", "20181222", "20181223", "20181224", "20181225", "20181226", "20181227", "20181228", "20181229", "20181230", "20181231"]
reJMail = re.compile("J[0-9]{6}")
try:
conn = psycopg2.connect("dbname='staticdump' user='postgres' host='localhost' password='bollox'")
except:
print "Unable to connect to the database"
exit(-1)
cur = conn.cursor()
dictSolarSystemIdName = getSolarSystemIdNameDict()
dictCharacterIdName = getCharacterIdNameDict()
dictCorporationIdName = getCorporationIdNameDict()
for date in DATES:
counter = 0
jMailCounter = 0
dictKillmailIdHash = getKillmailHashes(date)
masterDict = {}
characterDict = {}
lossDict = {}
mapIdKillmail = {}
print "processing " + date
chunks = partition(dictKillmailIdHash)
for chunk in chunks:
pool = ThreadPool(100)
results = pool.map(getESI, chunk.items())
pool.close()
pool.join()
for killmailESI in results:
if killmailESI != [] and killmailESI["solar_system_id"] in dictSolarSystemIdName and (reJMail.match(dictSolarSystemIdName[killmailESI["solar_system_id"]] or dictSolarSystemIdName[killmailESI["solar_system_id"]] == "J1226-0")):
updateDictionaries(killmailESI, getZKB(killmailESI["killmail_id"], killmailESI["solar_system_id"]))
jMailCounter += 1
elif not killmailESI: # 20160824 has the problematic first Keepstar kill that does not appear on CREST (ESI unchecked), this (and the above killmailESI != []) is a temporary fix..
print("[] error...")
counter += 1
print "total kills: %i" % counter
print "total WH kills: %i" % jMailCounter
updateDB(cur, date)
conn.close()
|
From release 7.2 of Global Trade Services (GTS) it is possible to configure reasons for releasing blocked documents and Business partners in the area of compliance management. These reasons are then available from a dropdown menu when a document or BP is released.
Dropdown menu is not displayed when a user releases a document from "Manually release blocked documents" transaction /SAPSLL/SPL_BLRL.
|
from utils import *
from geoutils import *
from curvebuilder import *
from circle import *
class GeoQuadNode:
def __init__(self,geoquad,bbox):
self.mgeoquad = geoquad
self.mbbox = bbox
self.msubnodes = []
def contain(self,point):
return self.mgeoquad.containpoint(point)
def split(self):
subgeoquads = self.mgeoquad.xysplit()
subbboxes = self.mbbox.split4()
for (subgeoquad,subbox) in zip(subgeoquads,subbboxes):
self.msubnodes.append(GeoQuadNode(subgeoquad,subbox))
def leaf(self,point,minsize):
if not self.contain(point):
return None
if self.mgeoquad.length() > minsize:
self.split()
for subnode in self.msubnodes:
containsubresult = subnode.leaf(point,minsize)
if containsubresult != None:
return containsubresult
else:
return self.mbbox
class GeoQuadTree:
def __init__(self,geoquad):
self.mroot = GeoQuadNode(geoquad,BBox(0.0,0.0,1.0,1.0))
def leaf(self,point,minsize):
return self.mroot.leaf(point,minsize)
#
# Geometric contour defined by 2 x 2 curves
#
class GeoQuad:
def __init__(self,left,up,right,down):
self.mleft = left
self.mup = up
self.mright = right
self.mdown = down
self.interx = CR(self.mleft,self.mright)
self.intery = CR(self.mdown,self.mup)
self.mpolygon = self.mleft.concat(self.mup).concat(self.mright.reverse()).concat(self.mdown.reverse())
self.mgeoquadtree = None
def polygon(self):
return self.mpolygon
def polygons(self):
return [self.mleft,self.mup,self.mright.reverse(),self.mdown.reverse()]
def length(self):
return self.polygon().length()
def xcurve(self,x):
x = R(0.0,1.0).trim(x)
return self.interx.sample(x).maponpoints(self.mdown.sample(x),self.mup.sample(x))
def ycurve(self,y):
y = R(0.0,1.0).trim(y)
return self.intery.sample(y).maponpoints(self.mleft.sample(y),self.mright.sample(y))
def xpoint(self,p):
x = R(0.0,1.0).trim(p.x())
y = R(0.0,1.0).trim(p.y())
return self.xcurve(x).sample(y)
def ypoint(self,p):
x = R(0.0,1.0).trim(p.x())
y = R(0.0,1.0).trim(p.y())
return self.ycurve(y).sample(x)
def containpoint(self,point):
return self.polygon().containpoint(point)
@staticmethod
def square(center = Point(0.5,0.5),size = 1.0,npointsperface = 10):
faces = [Polygon(Polygon([p1,p2]).points(npointsperface)) for (p1,p2) in pairs(Polygon.square(center,size).points())]
return GeoQuad(faces[0],faces[1],faces[2].reverse(),faces[3].reverse())
@staticmethod
def rectangle(x1,y1,x2,y2,npointsperface):
faces = [Polygon(Polygon([p1,p2]).points(npointsperface)) for (p1,p2) in pairs(Polygon.rectangle(x1,y1,x2,y2).points())]
return GeoQuad(faces[0],faces[1],faces[2].reverse(),faces[3].reverse())
@staticmethod
def circle(center = Point(0.5,0.5),size = 1.0,npointsperface = 10):
polygon = Circle(center,size/2.0).polygon(npointsperface*4).close()
return GeoQuad(polygon.subline(0.0,0.25),polygon.subline(0.25,0.5),polygon.subline(0.5,0.75).reverse(),polygon.subline(0.75,1.0).reverse())
#
# split the quad in 2 horizontally, returning 2 subquads
#
def ysplit(self,abscissa=0.5):
(newleft1,newleft2) = self.mleft.split(abscissa=abscissa)
(newright1,newright2) = self.mright.split(abscissa=abscissa)
newup1 = self.ycurve(abscissa)
return (GeoQuad(newleft1,newup1,newright1,self.mdown),GeoQuad(newleft2,self.mup,newright2,newup1))
def xsplit(self,abscissa=0.5,bylength=None):
if abscissa != None:
(newup1,newup2) = self.mup.split(abscissa=abscissa)
(newdown1,newdown2) = self.mdown.split(abscissa=abscissa)
newleft1 = self.xcurve(abscissa)
return (GeoQuad(self.mleft,newup1,newleft1,newdown1),GeoQuad(newleft1,newup2,self.mright,newdown2))
if bylength != None:
abs1 = bylength/self.mup.length()
abs2 = bylength/self.mdown.length()
(newup1,newup2) = self.mup.split(abscissa=abs1)
(newdown1,newdown2) = self.mdown.split(abscissa=abs2)
newleft1 = self.mappolygon(Polygon([Point(abs2,0.0),Point(abs1,1.0)]))
return (GeoQuad(self.mleft,newup1,newleft1,newdown1),GeoQuad(newleft1,newup2,self.mright,newdown2))
def xysplit(self,x=0.5,y=0.5):
newx1,newx2 = self.xsplit(x)
result = []
for newx in self.xsplit(x):
ysplit = newx.ysplit(y)
result.extend([ysplit[0],ysplit[1]])
return result
def reduce(self,amount):
margin = (1.0 - amount) * 0.5
frame = [self.xcurve(margin),
self.ycurve(1.0-margin),
self.xcurve(1.0-margin),
self.ycurve(margin)]
frame = [curve.subline(margin,1.0-margin) for curve in frame]
return GeoQuad(frame[0],frame[1],frame[2],frame[3])
#
# mapping
#
def mappolygon(self,polygon):
return Polygon([self.ypoint(p) for p in polygon.points()])
def unmappolygon(self,polygon):
newpoints = [self.unmappoint(point) for point in polygon.points()]
return Polygon(newpoints)
def unmappoint(self,point):
if self.mgeoquadtree == None:
puts("build mgeoquadtree")
self.mgeoquadtree = GeoQuadTree(self)
puts("mgeoquadtree built")
leaf = self.mgeoquadtree.leaf(point,self.mpolygon.length()/1000.0)
if leaf != None:
puts("point ", point.coords(),"found")
return leaf.center()
return None
#
#
#
def transverses2polygon(self,abs1,curve1,abs2,curve2):
up = self.mup.subline(abs1,abs2)
down = self.mdown.subline(abs1,abs2)
curve1new = curve1.maponpoints(Point(abs1,0.0),Point(abs1,1.0))
curve2new = curve2.maponpoints(Point(abs2,0.0),Point(abs2,1.0))
return Polygon.allconcat([self.mappolygon(curve1new),up,self.mappolygon(curve2new).reverse(),down.reverse()])
|
Simperium is a [Y Combinator](/company/y-combinator)-funded company that builds a platform for syncing content among people and their devices. They released [Simplenote](/product/simplenote) as a showcase of this platform.
EquityZen does not have an affiliation with, formal relationship with, or endorsement from Simperium or any companies featured above.
|
"""Setuptools file for a MultiMarkdown Python wrapper."""
import os
import re
from distutils.core import setup
from setuptools import find_packages
import pypandoc
with open(os.path.join('scriptorium', '_version.py'), 'r') as vfp:
vtext = vfp.read()
v_re = r"__version__ = \"(?P<ver>.*)\""
mo = re.search(v_re, vtext)
VER = mo.group("ver")
LONG_DESC = pypandoc.convert_file('README.md', 'rst')
setup(
name='scriptorium',
version=VER,
description='Multimarkdown and LaTeX framework for academic papers.',
long_description=LONG_DESC,
license='MIT',
author='Jason Ziglar',
author_email='jasedit@gmail.com',
url="https://github.com/jasedit/scriptorium",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Text Processing :: Markup',
'Topic :: Text Processing :: Filters',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
],
packages=find_packages(),
entry_points={
'console_scripts': ['scriptorium = scriptorium:main'],
},
package_data={'scriptorium': ['data/gitignore']},
install_requires=[
'pyyaml',
'argcomplete',
'pymmd>=0.3'
]
)
|
This item was available on CD but is now sold out on all formats, sorry. Take a look at what else we have in by Efdemin, check out some related artists, head over to our new releases or knock yourself out reading our latest music news & album reviews.
USED CD on Dial, EX+/EX.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-7-22 上午12:41
# @Author : tom.lee
# @docs : http://old.sebug.net/paper/books/scipydoc/numpy_intro.html
# @File : study_numpy.py
# @Software: PyCharm
"""
numpy
Numpy是Python的一个科学计算的库,提供了矩阵运算的功能,其一般与Scipy,matplotlib一起使用.
NumPy提供了两种基本的对象:
ndarray(N-dimensional array object)ndarray(数组)是存储单一数据类型的多维数组;
ufunc(universal function object)而 ufunc则是能够对数组进行处理的函数。
"""
import numpy as np
def split_line():
print '*' * 6 ** 2
def np_version():
"""
版本
:return:
"""
print np.version.version
def np_list():
"""
numpy 数组 :
只能存储一种数据结构,
使用 "numpy.array()"来创建,
使用" dtype = numpy.类型" 来显示指定
:return:
"""
# 创建
l = np.array([1, 2, 3], dtype=np.int8)
a = np.array([1, 2, 3, 4])
b = np.array((5, 6, 7, 8))
c = np.array([[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]])
print 'l:', l
print 'a:', a
print 'b:', b
print 'c:', c
split_line()
# 类型
print l.dtype, c.dtype
split_line()
# 大小: 数组a的shape只有一个元素,因此它是一维数组。
# 而数组c的shape有两个元素,因此它是二维数组,其中第0轴的长度为3,第1轴的长度为4
print l.shape, c.shape
split_line()
# 改变数组每个轴的长度 : 只是改变每个轴的大小,数组元素在内存中的位置并没有改变
c.shape = 4, 3
print c
split_line()
# 当某个轴的元素为-1时,将根据数组元素的个数自动计算此轴的长度,因此下面的程序将数组c的shape改为了(2,6)
c.shape = 2, -1
print c
split_line()
# 使用数组的reshape方法,可以创建一个改变了尺寸的新数组,原数组的shape保持不变
# 注意此时数组a和d其实共享数据存储内存区域
d = a.reshape((2, 2))
print 'a:', a
print 'd:', d
split_line()
def np_list_create():
# 使用xrange创建一维数组 [start,end,步长)包含起始位置,不包含终止位置,
# 元素个数: (end-start)/步长
np_lst = np.arange(0, 10, 1)
print np_lst
print '大小:%d' % np_lst.shape
split_line()
# 等差数列
# linspace(strat,end,size), [start,end]包含起始位置和终止位置,一共创建size个元素
# 可以通过endpoint关键字指定是否包括终值
print np.linspace(0, 1, 12)
split_line()
# 等比数列
# logspace(开始指数,结束指数,数量,底数默认10)
print np.logspace(0, 2, 20)
split_line()
def np_list_by_byte():
"""
使用frombuffer, fromstring, fromfile等函数可以从字节序列创建数组
使用时一定要传入dtype参数
Python的字符串实际上是字节序列,每个字符占一个字节,
因此如果从字符串s创建一个8bit的整数数组的话,所得到的数组正好就是字符串中每个字符的ASCII编码
:return:
"""
s = 'abcdefg'
print np.frombuffer(s, dtype=np.int8)
split_line()
print np.fromstring(s, dtype=np.int8)
split_line()
# 如果从字符串s创建16bit的整数数组,那么两个相邻的字节就表示一个整数,
# 把字节98和字节97当作一个16位的整数, 它的值就是98*256+97 = 25185。
# 可以看出内存中是以little endian(低位字节在前)方式保存数据的。
# 所以字符串的长度必须是偶数
print np.fromstring('abcdefgh', dtype=np.int16)
split_line()
def np_list_by_func():
"""
通过函数创建数组
:return:
"""
# fromfunction 传入一个函数,和表示一个维度大小的可迭代对象(元组,列表)
# 即(10,)表示一维数组,一维元素10个,此时函数接收一个参数
# (5,6)表示二维数组,一维元素5个,二维元素6个,此时函数接收2个参数
print np.fromfunction(lambda x: x + 1, (10,))
print np.fromfunction(lambda x, y: (x + 1) * (y + 1), (5, 6))
split_line()
def np_list_opt():
"""
numpy 列表基本操作和python list基本一致
:return:
"""
l = np.arange(10, 1, -1)
print l
print '做小值:', l.min()
print '最大值:', l.max()
print '下标0的元素:', l[0]
split_line()
# 高级用法,不会共享内存空间,以上操作会共享内存空间
print l[np.array([1, 5, 3])] # 使用数组获取下标元素
print l[[1, 5, 3]] # 使用列表获取下标元素
split_line()
# 列表直接过滤
print l[l > 3] # 直接获取列表大于3的值
print l > 3 # 判断列表元素是否大于3返回一个boolean 列表
split_line()
if __name__ == '__main__':
# np_version()
# np_list()
np_list_create()
# np_list_by_byte()
# np_list_by_func()
# np_list_opt()
print np.fromfunction(lambda x: x, (10,))
|
‘Solidarity Line’ uses the bodily formation of the defence line, that instead of defending territory, becomes a transient line – a moving border, a living architectural element. The participants, without speaking, draw on an attentiveness to each other and the environment, navigating through the circulatory spaces of Tate Modern by collective intelligence. Individuality is constantly re-negotiated according to the needs and demands of the group, and in response to built form. Staff of Tate from across the different teams including cleaning and security staff, invigilators, administrators and curators were invited to take part. The actions proposes new orders of movement that aimed to foster new relationships between the building and those who use it, and an interconnectedness between building members and their individual spheres of operation.
‘Days of Action’ event series at Tate Modern uses collective devices from civic resistance and direct democratic assembly to explore particular relations of ’them’ and ‘us’, ‘I’ and ‘we’. Architecture is exchanged for the ‘other’ (the counter protest or authority), to be negotiated not as an antagonist but as an affective force that can generate new social and spatial possibilities.
|
#!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
from noisicaa import node_db
MetronomeDescription = node_db.NodeDescription(
uri='builtin://metronome',
display_name='Metronome',
type=node_db.NodeDescription.PROCESSOR,
node_ui=node_db.NodeUIDescription(
type='builtin://metronome',
),
builtin_icon='node-type-builtin',
processor=node_db.ProcessorDescription(
type='builtin://metronome',
),
ports=[
node_db.PortDescription(
name='out:left',
direction=node_db.PortDescription.OUTPUT,
types=[node_db.PortDescription.AUDIO],
),
node_db.PortDescription(
name='out:right',
direction=node_db.PortDescription.OUTPUT,
types=[node_db.PortDescription.AUDIO],
),
]
)
|
Welcome to the Mirada restaurant dining guide! Here you'll find the best Mirada restaurants, as well as the newest and most recently reviewed Mirada-area restaurants.
Dine out using the Mirada restaurant reservations list or order in from Mirada food delivery restaurants.
your key making service is superb. its so fast and very cheap. you are the best on sport service provider.
Do you blog about dining in Mirada? Share your experiences with Menuism users!
What restaurants do you recommend in Mirada?
People found this by searching for: Mirada Restaraunt Menu.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-30 23:26
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_change_at', models.DateTimeField(blank=True, editable=False, verbose_name='Modifi\xe9 pour la derni\xe8re fois \xe0')),
('old_id', models.IntegerField(blank=True, null=True, unique=True, verbose_name=b'old id')),
('dues_amount', models.FloatField(verbose_name='Montant de cotisation')),
('newspaper_amount', models.FloatField(blank=True, null=True, verbose_name='Montant Canal N7')),
('tender_type', models.IntegerField(choices=[(1, 'Esp\xe8ce'), (2, 'Ch\xe8que'), (4, 'Carte bancaire'), (5, 'Virement'), (6, 'Autre')], verbose_name='Mode de paiement')),
('validated', models.BooleanField(default=False, verbose_name='Valid\xe9')),
('date', models.DateTimeField(blank=True, null=True, verbose_name="date d'adh\xe9sion")),
('start_year', models.IntegerField(verbose_name='d\xe9but (ann\xe9e)')),
('end_year', models.IntegerField(verbose_name='fin (ann\xe9e)')),
('start_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
('newspaper_subscription', models.BooleanField(default=False, verbose_name='Adh\xe9rer \xe0 Canal N7 - 15 euros/an')),
('user_authenticated', models.BooleanField(default=True)),
],
options={
'ordering': ['id'],
'verbose_name': 'Adh\xe9sions',
},
),
migrations.CreateModel(
name='SubscriptionConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.IntegerField(choices=[(0, 'Promotions ant\xe9rieures \xe0 2011'), (1, 'Promotions 2011 \xe0 2015'), (2, 'Retrait\xe9'), (3, 'Bienfaiteur'), (4, 'Sans emploi (sur justificatif)'), (5, '\xc9tudiant(e), trois ans'), (6, '\xc9tudiant(e), deux ans'), (7, '\xc9tudiant(e), un an'), (8, 'Couple'), (9, 'Soutien'), (10, '\xc9tudiant(e)')], verbose_name='Type')),
('dues_amount', models.IntegerField(verbose_name='Montant de cotisation')),
('newspaper_amount', models.IntegerField(blank=True, null=True, verbose_name='Montant Canal N7')),
('duration', models.IntegerField(default=1, verbose_name='Dur\xe9e')),
('year', models.IntegerField(verbose_name='Ann\xe9e')),
],
options={
'verbose_name': 'Configuration',
},
),
migrations.CreateModel(
name='SubscriptionKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.UUIDField(default=uuid.uuid4, editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('expire_at', models.DateTimeField(editable=False)),
],
),
]
|
Recycle your Canon EF 35mm f/2 and get the best cash price guaranteed. Comparing prices from trusted mobile phone recyclers ensures you’ll always get more money for your old mobile phone.
Simply choose your preferred phone buyer from the list below to sell your Canon EF 35mm f/2 today. Click Recycle Now to be transferred the buyer’s website to complete your sale.
Compare the best prices to recycle your Canon EF 35mm f/2 online.
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import csv
import sys
import json
import graph
from math import sin, cos, sqrt
reader = csv.reader(sys.stdin)
class Star:
"""Hold data about a star."""
def __init__(self):
self.name = "[unknown star]"
self.spherical = {
'ascension': 0,
'declination': 0,
'distance': 0
}
self.rectilinear = {
'x': 0,
'y': 0,
'z': 0
}
def __str__(self):
return "Star " + self.name + " at " + str(self.rectilinear) + "."
def findPosition(self):
a = self.spherical['ascension']
d = self.spherical['declination']
D = self.spherical['distance']
self.rectilinear = {
'x': D * cos(a) * cos(d),
'y': D * sin(a) * cos(d),
'z': D * sin(d)
}
def forJSON(self):
return {"position": self.rectilinear, "spectral": self.spectral}
def distance(self, star):
x, y, z = [
(self.rectilinear['x'] - star.rectilinear['x']),
(self.rectilinear['y'] - star.rectilinear['y']),
(self.rectilinear['z'] - star.rectilinear['z'])
]
return x*x + y*y + z*z
@staticmethod
def fromWiki(line):
star = Star()
star.name = line[2]
star.spectral = line[4]
star.spherical = {
'ascension': parseDMS(line[-4]),
'declination': parseHMS(line[-5]),
'distance': float(line[1])
}
star.findPosition()
return star
def parseDMS(dms):
d, ms = dms.split('d')
m, s = ms.split('m')
s = s[0:2]
return float(d) + (float(m)/60) + (float(s) / 3600)
def parseHMS(hms):
h, ms = hms.split('h')
m, s = ms.split('m')
s = ms[0:2]
return float(h) + (float(m)/60) + (float(s) / 3600)
def main():
stars = {}
for entry in reader:
star = Star.fromWiki(entry)
stars[star.name] = star
wormholes = graph.walk(stars)
for n, s in stars.iteritems():
stars[n] = s.forJSON()
print "Stars = " + json.dumps(stars)
print "Wormholes = " + json.dumps(wormholes)
if __name__ == "__main__":
main()
|
Publish by in Category interior design at September 23rd, 2018. Tagged with black grey and white table runner. gray and white table runner. grey and white chevron table runner. grey and white table runner. white and grey table runner.
Grey And White Table Runner have 50 picture of interior design, it's including Grey And White Table Runner Amazing Acheter Trellis Mat Topper Aothpher Classic Interior Design 1. Grey And White Table Runner Unconvincing Oversized Gray Melange Khadi World Market Interior Design 2. Grey And White Table Runner Awesome Buy From Bed Bath Beyond Interior Design 3. Grey And White Table Runner Amaze Amazon Com Uphome 1pc Classical Chevron Zig Zag Pattern Interior Design 4. Grey And White Table Runner Prodigious Z Gallerie Bukhara Charcoal Interior Design 5.
Tags : black grey and white table runner. gray and white table runner. grey and white chevron table runner. grey and white table runner. white and grey table runner.
|
#!/usr/bin/env python3
# Requires Python 3
"""
March Madness prediction script
Copyright (C) 2013-2019 Kyle Barlow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Python standard library import statements
import argparse
import time
import os
import sys
import random
import copy
import multiprocessing
import queue
import pickle
import threading
import urllib.request
import itertools
import collections
# NumPy
import numpy as np
import pandas as pd
# Constants
use_multiprocessing = True
program_description = 'Python script to generate march madness brackets from ELO input (as in the format of, but not necessarily, the 538 data)'
default_output_file = 'output.txt'
source_url = 'https://projects.fivethirtyeight.com/march-madness-api/2021/fivethirtyeight_ncaa_forecasts.csv'
default_data_file = 'fivethirtyeight_ncaa_forecasts.csv' # Caches url results
region_pairings = ( ('east', 'west'), ('midwest', 'south') )
# How fast ELO changes
elo_k_factor = 2.5 # Based on not-so precise guessing in order to get statistics after many simulations to match 538 closely enough
# Mapping for strings describing each round to an integer (for indexing)
round_dictionary = {
0 : 'FIRST FOUR',
1 : 'ROUND OF 64',
2 : 'ROUND OF 32',
3 : 'ROUND OF 16',
4 : 'ELITE 8',
5 : 'FINAL 4',
6 : 'FINALS',
}
seed_pairs_by_round = {
1 : {
1:16, 16:1,
8:9, 9:8,
5:12, 12:5,
4:13, 13:4,
6:11, 11:6,
3:14, 14:3,
7:10, 10:7,
2:15, 15:2,
},
2 : {
1:8, 8:1,
4:5, 5:4,
3:6, 6:3,
2:7, 7:2,
},
3 : {
1:4, 4:1,
2:3, 3:2,
},
4 : {
1:2, 2:1,
},
}
class MonteCarloBracketSimulator(object):
def __init__(self, starting_bt):
self.highest_bt = starting_bt.copy()
self.last_bt = starting_bt.copy()
self.highest_score = starting_bt.expected_score()
self.last_score = self.highest_score
self.temperature = 100.0
def set_last_bt(self, bt):
self.last_bt = bt.copy()
self.last_score = bt.expected_score()
def boltzmann(self, bt):
bt_score = bt.expected_score()
score_delta = self.last_score - bt_score
boltz_factor = ( -1 * score_delta / self.temperature )
probability = np.exp( min(40.0, max(-40.0, boltz_factor) ) )
if probability < 1:
if random.random() > probability:
# print ( 'reject', probability, self.last_score, bt_score )
return False # reject
# else:
# print ( 'MC accept', probability, self.last_score, bt_score )
# else:
# print ( 'accept', probability, self.last_score, bt_score )
# Accept
self.last_bt = bt.copy()
self.last_score = bt_score
if self.highest_score == None or self.last_score > self.highest_score:
self.highest_score = self.last_score
self.highest_bt = bt.copy()
return True
def copy(self):
return pickle.loads( pickle.dumps(self) )
class Team(object):
def __init__(self, name, region, seed, elo, win_prob_by_round):
self.region = region.lower()
self.seed = seed
self.name = name
self.starting_elo = elo
self.elo = elo
self.win_prob_by_round = win_prob_by_round
# Keeps track of past ELO changes so we can undo them
self.elo_history = {}
@classmethod
def init_from_row(cls, row, separator_character = ','):
name = row['team_name']
region = row['team_region']
seed = row['team_seed']
win_prob_by_round = {}
for round_key in range(0, 7):
win_prob_by_round[round_key] = float( row[ 'rd%d_win' % (round_key + 1) ] )
if seed.endswith('a') or seed.endswith('b'):
seed = seed[:-1]
try:
seed = int(seed)
elo = float(row['team_rating'])
except ValueError:
print ('Error parsing this line:')
print (row)
raise
return cls(name, region, seed, elo, win_prob_by_round)
def __repr__(self):
return self.name
def __eq__(self, other):
# Only check equality based on names
return self.name == other.name
def __lt__(self, other):
return self.elo < other.elo
def update_elo(self, number_wins, win_prob, round_number):
elo_change = elo_k_factor * (number_wins - win_prob)
self.elo += elo_change
assert( round_number not in self.elo_history ) # We can only have played one match per round
self.elo_history[round_number] = elo_change
def undo_elo_update(self, starting_round_number):
'''
Undo changes to ELO in self for specific round, and all rounds greater than that round
'''
for round_number in range(starting_round_number, max( round_dictionary.keys() ) + 1 ):
if round_number in self.elo_history:
# Later round numbers may not be in history if team lost earlier, so we use this if to check
self.elo -= self.elo_history[round_number]
del self.elo_history[round_number]
def probability_of_victory(self, other, use_starting=False):
if use_starting:
prob = 1.0 / (1.0 + 10.0 ** ( (other.starting_elo - self.starting_elo) * 30.464 / 400.0) )
else:
prob = 1.0 / (1.0 + 10.0 ** ( (other.elo - self.elo) * 30.464 / 400.0) )
# print( 'prob_v', self, other, other.elo, self.elo, '%.2f' % prob )
return prob
def play_match(self, other, round_number, rigged = False, threshold_win_prob = None):
'''
Returns true if we beat other team, otherwise false
Will randomly pick winner based on ELO, unless is rigged (in which case self wins)
Updates ELOs
If threshold_win_prob is not None, then team must have at least that chance of winning to win
'''
win_prob = self.probability_of_victory(other)
number_wins = 0
if rigged:
number_wins += 1
elif threshold_win_prob != None and 1.0 - win_prob < threshold_win_prob:
number_wins += 1
elif random.random() < win_prob:
number_wins += 1
self.update_elo( number_wins, win_prob, round_number )
other.update_elo( 1 - number_wins, 1.0 - win_prob, round_number )
if number_wins == 1:
return True
else:
return False
class BracketTree(object):
def __init__(self, round_number, region_name = None, seeds = None):
self._children = []
self._parent = None
self._round_name = round_dictionary[round_number]
self._round_number = round_number
self._region_name = region_name
self._seeds = seeds
self._teams = []
self._winning_team_index = None
def copy(self):
# Return fast copy by pickling
return pickle.loads( pickle.dumps(self) )
def visualize(self, spacer_len = 0, print_score = True, view_by_round = False, top_level_call = True):
vis_lines = []
if print_score:
vis_lines.append( 'Expected score: %.2f' % self.expected_score() )
vis_lines.append( '{}{}'.format(spacer_len * '-', self._round_name) )
if self._winning_team_index == None:
for team in self._teams:
vis_lines.append( '{}{}'.format(spacer_len * ' ', team.name) )
else:
vis_lines.append( '{}{} ({}) def. {} ({})'.format(spacer_len * ' ', self._teams[self._winning_team_index].name, int(self._teams[self._winning_team_index].seed), self._teams[1-self._winning_team_index].name, int(self._teams[1-self._winning_team_index].seed)) )
for child in self._children:
if view_by_round:
vis_lines.extend( child.visualize( spacer_len = 0, print_score = False, view_by_round = True, top_level_call = False ) )
else:
vis_lines.extend( child.visualize( spacer_len = spacer_len + 2, print_score = False, view_by_round = False, top_level_call = False ) )
if top_level_call and view_by_round:
score_line = ''
if print_score:
score_line = vis_lines[0]
vis_lines = vis_lines[1:]
last_round_line = None
lines_by_round = collections.OrderedDict()
for i, vis_line in enumerate(vis_lines):
if i % 2 == 0:
last_round_line = vis_line
if last_round_line not in lines_by_round:
lines_by_round[last_round_line] = []
else:
lines_by_round[last_round_line].append( vis_line )
return_round_lines = []
if print_score:
return_round_lines.append(score_line)
for round_line in lines_by_round:
return_round_lines.append(round_line)
for team_line in lines_by_round[round_line]:
return_round_lines.append(team_line)
return_round_lines.append('')
return return_round_lines
return vis_lines
def add_team(self, team):
self._teams.append( team )
def add_child(self, child):
assert( child._round_number + 1 == self._round_number )
if self._region_name != None:
assert( child._region_name == self._region_name )
child.set_parent( self )
self._children.append(child)
def set_parent(self, parent):
self._parent = parent
def _init_add_children(self, regional_teams, seeds, cls):
# Helper function used by init_starting_bracket
assert( len(seeds) == len(regional_teams) )
assert( len(seeds) >= 2 and len(seeds) % 2 == 0 )
if len(seeds) > 2:
for winning_seed in seeds[:2]:
child = cls( self._round_number - 1, region_name = self._region_name )
child_seeds = [winning_seed]
current_round = self._round_number - 1
while current_round > 0:
new_child_seeds = [ seed_pairs_by_round[current_round][s] for s in child_seeds]
child_seeds.extend( new_child_seeds )
current_round -= 1
child_seeds.sort()
child._init_add_children(
{ k : regional_teams[k] for k in regional_teams if k in child_seeds },
child_seeds, cls,
)
self.add_child( child )
else:
for seed in seeds:
if len(regional_teams[seed]) > 1:
# First four seed, add one more child
child = cls( self._round_number - 1, region_name = self._region_name )
for team in regional_teams[seed]:
child.add_team(team)
self.add_child( child )
else:
# Not a first four seed
for team in regional_teams[seed]:
self.add_team( team )
@classmethod
def init_starting_bracket(cls):
'''
Uses round_dictionary to initialize a full bracket. Bracket is filled in according to results so far.
'''
teams = {}
min_seed = None
max_seed = None
if not os.path.isfile(default_data_file):
urllib.request.urlretrieve(source_url, default_data_file)
df = pd.read_csv(default_data_file)
df = df.loc[ df['gender'] == 'mens' ].copy().sort_values('forecast_date', ascending = False )
df = df.loc[ df['forecast_date'] == df.iloc[0]['forecast_date'] ].copy()
df = df.loc[ df['team_alive'] == 1 ].copy()
df = df.drop_duplicates( ['team_name'] )
# Read in team data
for index, row in df.iterrows():
team = Team.init_from_row(row)
if min_seed == None or team.seed < min_seed:
min_seed = team.seed
if max_seed == None or team.seed > max_seed:
max_seed = team.seed
if team.region not in teams:
teams[team.region] = {}
if team.seed not in teams[team.region]:
teams[team.region][team.seed] = [team]
else:
teams[team.region][team.seed].append( team )
# Initialize root node (finals) and semifinals
max_round = max(round_dictionary.keys())
finals = cls(max_round)
for region_names in region_pairings:
final_four = cls(max_round-1)
for region_name in region_names:
elite_eight = cls(max_round-2, region_name = region_name)
seeds = list( range(min_seed, max_seed + 1) )
elite_eight._init_add_children( teams[region_name], seeds, cls )
final_four.add_child( elite_eight )
finals.add_child( final_four )
return finals
def random_perturb(self, pop_size):
nodes = random.sample( self.all_nodes(), pop_size )
for node in nodes:
node.swap_winner()
# Run final verification after all swaps are complete
self.verify_bracket()
def single_random_perturb(self):
node = random.choice( self.all_nodes() )
node.swap_winner()
def all_nodes(self):
nodes = [ self ]
for child in self._children:
nodes.extend( child.all_nodes() )
return nodes
def all_teams(self):
all_teams = []
for node in self.all_nodes():
all_teams.extend( node._teams )
return all_teams
def swap_winner(self, threshold_win_prob = None):
assert( len(self._teams) == 2 )
current_winner = self._teams[ self._winning_team_index ]
current_loser = self._teams[ 1 - self._winning_team_index ]
loser_win_prob = current_loser.probability_of_victory(current_winner)
if threshold_win_prob != None and loser_win_prob < threshold_win_prob:
return
for team in self._teams:
team.undo_elo_update(self._round_number)
if self._parent != None:
self._parent.remove_team_upwards( self._teams[self._winning_team_index], self._teams[ 1 - self._winning_team_index] )
self._winning_team_index = 1 - self._winning_team_index
# Update ELOs according to swapped result
self._teams[self._winning_team_index].play_match( self._teams[ 1 - self._winning_team_index], self._round_number, rigged = True )
def remove_team_upwards(self, old_winner, new_winner):
'''
Removes a team that previously won in a child game
Resimulates new winner in new random match
'''
our_old_winner = self._teams[self._winning_team_index]
self._teams.remove( old_winner )
self._teams.append( new_winner )
assert( len(self._teams) == 2 )
# Undo ELO updates before new match
for team in self._teams:
team.undo_elo_update(self._round_number)
# Play match
if self._teams[0].play_match( self._teams[1], self._round_number ):
self._winning_team_index = 0
else:
self._winning_team_index = 1
# Recursive call upwards
if self._parent != None:
self._parent.remove_team_upwards( our_old_winner, self._teams[self._winning_team_index] )
def verify_bracket(self):
'''
Ensures that a bracket is valid and filled
Checks that if a team won a lower round, is present in the upper round
'''
assert( self._winning_team_index != None )
assert( len(self._teams) == 2 )
prev_round_winners = sorted( self._teams )
children_winners = sorted( [ child._teams[child._winning_team_index] for child in self._children ] )
if len( self._children ) == 2:
assert( prev_round_winners == children_winners )
elif len( self._children ) == 1:
assert( children_winners[0] in prev_round_winners )
for child in self._children:
child.verify_bracket()
def simulate_fill(self):
# Randomly fills in bracket based on ELO simulation
# Fills in blanks
assert( self._winning_team_index == None )
for child in self._children:
child.simulate_fill()
self._teams.append( child._teams[child._winning_team_index] )
assert( len( self._teams ) == 2 )
if self._teams[0].play_match( self._teams[1], self._round_number ):
self._winning_team_index = 0
else:
self._winning_team_index = 1
def all_team_names(self):
teams = set()
for child in self._children:
teams.update( child.all_team_names() )
teams.update( [team.name for team in self._teams] )
return teams
def winners_vector(self):
'''
Returns vector representing how far teams advanced
'''
winners_dict = self.winners_dict()
v = np.zeros( (len(winners_dict), len(round_dictionary)) )
team_names = sorted( winners_dict.keys() )
for i, team_name in enumerate(team_names):
if winners_dict[team_name] >= 0:
for j in range(0, winners_dict[team_name]+1):
v[i][j] += 1
return v
def team_names(self):
return sorted( self.winners_dict().keys() )
def winners_dict(self, furthest_round = None):
if furthest_round == None:
min_round = min(round_dictionary.keys())
furthest_round = {name : min_round - 1 for name in self.all_team_names()}
for team in self._teams:
if self._round_number - 1 > furthest_round[team.name]:
furthest_round[team.name] = self._round_number - 1
winning_team_name = self._teams[self._winning_team_index].name
if self._round_number > furthest_round[winning_team_name]:
furthest_round[winning_team_name] = self._round_number
for child in self._children:
child.winners_dict( furthest_round )
return furthest_round
def total_probability(self):
assert( len(self._teams) == 2 )
winning_team = self._teams[self._winning_team_index]
losing_team = self._teams[1-self._winning_team_index]
return_prob = winning_team.probability_of_victory(losing_team)
if len(self._children) == 2: # Skip first 4
for child in self._children:
return_prob = return_prob * child.total_probability()
if return_prob > 1.0 or return_prob < 0.0:
print( winning_team, losing_team, self._round_number, winning_team.probability_of_victory(losing_team), child_with_winner.total_probability(), self._children[0].total_probability(), self._children[1].total_probability(), winning_team.elo, losing_team.elo )
print( return_prob )
raise Exception()
return return_prob
# return 0
# for child in self._children:
# probability_of_victory *= child.total_probability()
# assert( self._winning_team_index != None )
# assert( len(self._teams) == 2 )
# winning_team = self._teams[self._winning_team_index]
# losing_team = self._teams[1-self._winning_team_index]
# probability_of_victory *= winning_team.probability_of_victory(losing_team)
# return probability_of_victory
def round_cbs_score(self):
# This dictionary is used to calculate the expected score of a bracket in leagues where
# additional points are awarded for correct picks in later rounds. Each key corresponds
# to the number of a round (see round_dictionary) above, and each value corresponds to
# the weight for each correct pick in that round. For example, a key/value pair of
# 3:2 would mean that a correct pick in the third round is worth twice as much as the baseline
# The seed of winner is also added to score (to give more points for picking low seeds)
default_cbs_scores = {
0:0,
1:1,
2:2,
3:3,
4:4,
5:6,
6:8
}
assert( self._winning_team_index != None )
assert( len(self._teams) == 2 )
winning_team = self._teams[self._winning_team_index]
return default_cbs_scores[self._round_number] + winning_team.seed
def round_yahoo_score(self):
default_yahoo_scores = {
0:0,
1:1,
2:2,
3:4,
4:8,
5:16,
6:32
}
assert( self._winning_team_index != None )
assert( len(self._teams) == 2 )
winning_team = self._teams[self._winning_team_index]
losing_team = self._teams[1-self._winning_team_index]
return max( [0, winning_team.seed - losing_team.seed] ) + default_yahoo_scores[self._round_number]
def expected_score(self):
# Expected value of our winner beating all possible opponents, recursive
score = 0.0
winning_team = self._teams[self._winning_team_index]
losing_team = self._teams[1-self._winning_team_index]
if len(self._children) == 2:
# Only recurse if two children (to avoid first four games)
child_with_loser = None
if self._children[0]._teams[0].name == losing_team.name or self._children[0]._teams[1].name == losing_team.name:
child_with_loser = self._children[0]
if self._children[1]._teams[0].name == losing_team.name or self._children[1]._teams[1].name == losing_team.name:
assert( child_with_loser == None )
child_with_loser = self._children[1]
assert( child_with_loser != None )
for possible_opponent in child_with_loser.all_teams():
prob_opponent = possible_opponent.win_prob_by_round[self._round_number-1]
score += winning_team.probability_of_victory(possible_opponent, use_starting=True) * prob_opponent
for child in self._children:
score += child.expected_score()
else:
score += self.round_score() * winning_team.probability_of_victory(losing_team, use_starting=True)
return score
def round_score(self):
# Have to change score function manually below for now
return self.round_cbs_score()
def score(self):
score = self.round_score()
for child in self._children:
score += child.round_score()
return score
def simulate_winners_vector(bt_pickle):
bt_copy = pickle.loads(bt_pickle)
bt_copy.simulate_fill()
return bt_copy.winners_vector()
class CallbackVectorQueue(object):
def __init__(self, initial_v):
self.q = queue.Queue()
self.v = initial_v
self.trials = 0
self.thread = threading.Thread(target=self.thread_run)
self.thread.daemon = True # Daemonize thread
self.thread.start()
def thread_run(self):
while True:
self.v += self.q.get()
self.trials += 1
def callback(self, v):
self.q.put(v)
def close(self):
while not self.q.empty():
time.sleep(0.001)
def run_stats( number_simulations = 10000 ):
bt = BracketTree.init_starting_bracket()
# Initial simulation to initialize vector
bt_pickle = pickle.dumps( bt )
initial_v = simulate_winners_vector(bt_pickle)
v_callback = CallbackVectorQueue(initial_v)
if use_multiprocessing:
pool = multiprocessing.Pool()
for sim_num in range(0, number_simulations):
if use_multiprocessing:
pool.apply_async( simulate_winners_vector, args = (bt_pickle,), callback = v_callback.callback )
else:
v_callback.callback( simulate_winners_vector(bt_pickle) )
if use_multiprocessing:
pool.close()
pool.join()
v_callback.close()
v = v_callback.v
v /= float( number_simulations )
print_list = []
# Run simulation to fill in team names
bt.simulate_fill()
for i, team_name in enumerate( bt.team_names() ):
champion_percentage = v[i][ len(round_dictionary) - 1 ]
l = list( reversed( v[i] ) )
l.append( team_name )
print_list.append( l )
print_list.sort( reverse = True )
for row in print_list:
line = ''
for x in row:
if isinstance(x, str):
line += x
else:
line += '%.2f ' % x
print ( line )
print ( 'Total trials: %d' % v_callback.trials )
def run_monte_carlo_helper(temp_steps, max_perturbations, mc, blank_bt):
# chance of fresh bracket start
if random.random() >= 0.95:
bt = blank_bt.copy()
bt.simulate_fill()
mc.set_last_bt( bt )
for temperature in temp_steps:
bt = mc.last_bt.copy()
# Perturb
bt.random_perturb( random.randint(1, max_perturbations) )
# bt.single_random_perturb()
# Score
mc.temperature = temperature
mc.boltzmann( bt )
return mc
def run_monte_carlo( num_trials = 10000, view_by_round = False ):
# Parameters for MC simulation
max_perturbations = 10
starting_temp = 20.0
ending_temp = 1.0
low_temp_final_steps = 500
# Output parameters
highest_mc_bt_cache = os.path.join('cache', 'highest_mc_bt.pickle') # Saves best bracket for reloading as starting point in later simulations
highest_vis_output = os.path.join('cache', 'highest_bracket.txt')
blank_bt = BracketTree.init_starting_bracket()
if os.path.isfile( highest_mc_bt_cache ):
with open(highest_mc_bt_cache, 'rb') as f:
bt = pickle.load(f)
else:
if not os.path.isdir( os.path.dirname( highest_mc_bt_cache ) ):
os.makedirs( os.path.dirname( highest_mc_bt_cache ) )
# Initial simulation
bt = blank_bt.copy()
bt.simulate_fill()
mc = MonteCarloBracketSimulator( bt )
temp_steps = list( np.arange(starting_temp, ending_temp, -0.005) )
temp_steps.extend( [ending_temp for x in range(low_temp_final_steps) ] )
def callback(thread_mc):
nonlocal mc
if thread_mc.highest_score > mc.highest_score:
mc = thread_mc
for trial in range(num_trials):
if use_multiprocessing:
pool = multiprocessing.Pool()
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
for cpu_count in range(cpu_count):
if use_multiprocessing:
pool.apply_async( run_monte_carlo_helper, args = (temp_steps, max_perturbations, mc.copy(), blank_bt), callback = callback )
else:
callback( run_monte_carlo_helper( temp_steps, max_perturbations, mc.copy(), blank_bt ) )
if use_multiprocessing:
pool.close()
pool.join()
print ( 'MC simulation complete (round {})'.format(trial) )
print ( 'Highest score: %.2f' % mc.highest_score )
print ( 'Last score: %.2f\n' % mc.last_score )
with open(highest_mc_bt_cache, 'wb') as f:
pickle.dump(mc.highest_bt, f)
with open(highest_vis_output, 'w') as f:
for line in mc.highest_bt.visualize():
f.write( line + '\n' )
if view_by_round:
print( '\n'.join( mc.highest_bt.visualize( view_by_round = True ) ) )
def run_quick_pick( score_thresh, view_by_round = False ):
while True:
bt = BracketTree.init_starting_bracket()
bt.simulate_fill()
if score_thresh == None or bt.expected_score() >= score_thresh:
break
print ( '\n'.join( bt.visualize( view_by_round = view_by_round ) ) )
def predictor():
# Setup argument parser
parser = argparse.ArgumentParser(description=program_description)
parser.add_argument(
'-s', '--stats',
type = int,
default = 0,
help = "Run many times to get statistics"
)
parser.add_argument(
'-m', '--monte_carlo',
type = int,
default = 0,
help = "How many outer loops of ramping monte carlo simulation"
)
parser.add_argument(
'-q', '--quick_pick',
default = False,
action = 'store_true',
help = 'Generate a "quick pick" style bracket'
)
parser.add_argument(
'--quick_thresh',
default = None,
type = float,
help = 'If running a quick pick, you can specify a minimum expected score threshold here'
)
parser.add_argument(
'--view_by_round',
default = False,
action = 'store_true',
help = 'Print output by round'
)
args = parser.parse_args()
if args.quick_pick:
run_quick_pick( args.quick_thresh, view_by_round = args.view_by_round )
if args.stats > 0:
run_stats( args.stats )
if args.monte_carlo > 0:
run_monte_carlo( args.monte_carlo, view_by_round = args.view_by_round )
if __name__ == "__main__":
predictor()
|
The Osprey are back fishing at Kootenay Lake and checking out the nest sites that they left last year. The buds are coming out on the trees, the grass is greening up and I can hear the creek roaring with spring melt. It may be rainy and chilly this morning but I do like April.
I watched an Osprey hover and then plummet into the water and then emerge with a fish yesterday. Osprey do not actually dive into the water but rely on their formidable claws to pierce and grab the fish. Their powerful wings can pull them from the water’s surface and back into the air with the fish. Their claws “lock” into the fish and it can be difficult for an Osprey to release a fish that is too large or powerful to lift from the water. I have seen an Osprey with a fish bullied out of its prize by a Bald Eagle. Osprey and Bald Eagles often seem to confront each other often over resources such as fish or nesting sites and nesting materials.
The female Osprey is larger than the male and spends more time incubating eggs and brooding young. You can often hear the female calling from the nest until the male comes in with a fish. The male does most of the fishing until the young are much larger and more demanding and then both adults are kept busy fishing. The male also brings fish to the female on the nest during “courting” – fish is important to maintaining the pair bond. Osprey pairs stay together for several seasons.
Osprey eggs hatch asynchronously – the first hatched will be larger than the second hatched who is larger than the third hatched. In a good year, all the young may survive, but if resources are limited, then the largest young demand and get the most food. It may seem harsh, but the resources go to the most likely to survive.
I hope that the pair I watch most often will have young this year. There are crows nesting nearby and people often camp overnight beneath the nest. If the birds are frightened from the nest, the crows can steal in and eat the eggs. But the Osprey have raised young two out of the last three seasons and perhaps this year will be another good year.
I have posted some of my favourite Osprey pictures here.
all writing and photos copyright Joanne Siderius 2013.
Beautiful photos! Just last week, I watched an Osprey take a drive into the lake in our backyard. He munched on the fish for over an hour.
You’re right, he did tackle the head first. One of these days, I’d like to invest in a better camera. Thanks for the terrific pictures.
I was at Crescent Valley a short while ago, saw an Osprey across the river, stopped to take a shot, and it dove right in front of me.
very, very, very, cool photos Derek! I have yet to capture an osprey diving – at least with non-blurry photos! It is wonderful watching an osprey in action – magnificent birds. Thanks for sharing the photos!
« The Bears are Out!
|
# Patchwork - automated patch tracking system
# Copyright (C) 2008 Jeremy Kerr <jk@ozlabs.org>
#
# SPDX-License-Identifier: GPL-2.0-or-later
import codecs
from datetime import datetime as dt
from datetime import timedelta
from email.utils import make_msgid
import os
from django.contrib.auth.models import User
from patchwork.models import Bundle
from patchwork.models import Check
from patchwork.models import Comment
from patchwork.models import CoverLetter
from patchwork.models import Patch
from patchwork.models import Person
from patchwork.models import Project
from patchwork.models import Series
from patchwork.models import SeriesReference
from patchwork.models import State
from patchwork.tests import TEST_PATCH_DIR
SAMPLE_DIFF = """--- /dev/null 2011-01-01 00:00:00.000000000 +0800
+++ a 2011-01-01 00:00:00.000000000 +0800
@@ -0,0 +1 @@
+a
"""
SAMPLE_CONTENT = 'Hello, world.'
def read_patch(filename, encoding=None):
"""Read a diff from a file."""
file_path = os.path.join(TEST_PATCH_DIR, filename)
if encoding is not None:
f = codecs.open(file_path, encoding=encoding)
else:
f = open(file_path)
result = f.read()
f.close()
return result
error_strings = {
'email': 'Enter a valid email address.',
}
def create_project(**kwargs):
"""Create a 'Project' object."""
num = Project.objects.count()
values = {
'linkname': 'test-project-%d' % num,
'name': 'Test Project %d' % num,
'listid': 'test%d.example.com' % num,
'listemail': 'test%d@example.com' % num,
'subject_match': '',
}
values.update(kwargs)
return Project.objects.create(**values)
def create_person(**kwargs):
"""Create a 'Person' object."""
num = Person.objects.count()
values = {
'email': 'test_person_%d@example.com' % num,
'name': 'test_person_%d' % num,
'user': None,
}
values.update(kwargs)
return Person.objects.create(**values)
def create_user(link_person=True, **kwargs):
"""Create a 'User' object.
Args:
link_person (bool): If true, create a linked Person object.
"""
num = User.objects.count()
values = {
'username': 'test_user_%d' % num,
'email': 'test_user_%d@example.com' % num,
'first_name': 'Tester',
'last_name': 'Num%d' % num,
}
values.update(kwargs)
# this one must be done rather specifically
user = User.objects.create_user(values['username'], values['email'],
values['username'],
first_name=values['first_name'],
last_name=values['last_name'])
if link_person:
# unfortunately we don't split on these
values['name'] = ' '.join([values.pop('first_name'),
values.pop('last_name')])
values.pop('username')
create_person(user=user, **values)
return user
def create_maintainer(project=None, **kwargs):
"""Create a 'User' and set as maintainer for provided project."""
if not project:
project = create_project()
user = create_user(**kwargs)
profile = user.profile
profile.maintainer_projects.add(project)
profile.save()
return user
def create_state(**kwargs):
"""Create 'State' object."""
num = State.objects.count()
values = {
'name': 'state_%d' % num,
'ordering': num,
'action_required': True,
}
values.update(kwargs)
return State.objects.create(**values)
def create_bundle(**kwargs):
"""Create 'Bundle' object."""
num = Bundle.objects.count()
values = {
'owner': create_user() if 'owner' not in kwargs else None,
'project': create_project() if 'project' not in kwargs else None,
'name': 'test_bundle_%d' % num,
}
values.update(kwargs)
return Bundle.objects.create(**values)
def create_patch(**kwargs):
"""Create 'Patch' object."""
num = Patch.objects.count()
# NOTE(stephenfin): Even though we could simply pass 'series' into the
# constructor, we don't as that's not what we do in the parser and not what
# our signal handlers (for events) expect
if 'series' in kwargs:
series = kwargs.pop('series')
else:
series = create_series(project=kwargs.pop('project', create_project()))
if 'number' in kwargs:
number = kwargs.pop('number', None)
elif series:
number = series.patches.count() + 1
# NOTE(stephenfin): We overwrite the provided project, if there is one, to
# maintain some degree of sanity
if series:
kwargs['project'] = series.project
values = {
'submitter': create_person() if 'submitter' not in kwargs else None,
'delegate': None,
'project': create_project() if 'project' not in kwargs else None,
'msgid': make_msgid(),
'state': create_state() if 'state' not in kwargs else None,
'name': 'testpatch%d' % num,
'headers': '',
'content': 'Patch testpatch%d' % num,
'diff': SAMPLE_DIFF,
}
values.update(kwargs)
if 'patch_project' not in values:
values['patch_project'] = values['project']
patch = Patch.objects.create(**values)
if series:
number = number or series.patches.count() + 1
series.add_patch(patch, number)
return patch
def create_cover(**kwargs):
"""Create 'CoverLetter' object."""
num = CoverLetter.objects.count()
# NOTE(stephenfin): Despite first appearances, passing 'series' to the
# 'create' function doesn't actually cause the relationship to be created.
# This is probably a bug in Django. However, it's convenient to do so we
# emulate that here. For more info, see [1].
#
# [1] https://stackoverflow.com/q/43119575/
if 'series' in kwargs:
series = kwargs.pop('series')
else:
series = create_series(project=kwargs.pop('project', create_project()))
# NOTE(stephenfin): We overwrite the provided project, if there is one, to
# maintain some degree of sanity
if series:
kwargs['project'] = series.project
values = {
'submitter': create_person() if 'person' not in kwargs else None,
'project': create_project() if 'project' not in kwargs else None,
'msgid': make_msgid(),
'name': 'testpatch%d' % num,
'headers': '',
'content': '',
}
values.update(kwargs)
cover = CoverLetter.objects.create(**values)
if series:
series.add_cover_letter(cover)
return cover
def create_comment(**kwargs):
"""Create 'Comment' object."""
values = {
'submitter': create_person() if 'submitter' not in kwargs else None,
'submission': create_patch() if 'submission' not in kwargs else None,
'msgid': make_msgid(),
'content': SAMPLE_CONTENT,
}
values.update(kwargs)
return Comment.objects.create(**values)
def create_check(**kwargs):
"""Create 'Check' object."""
values = {
'patch': create_patch() if 'patch' not in kwargs else None,
'user': create_user() if 'user' not in kwargs else None,
'date': dt.utcnow(),
'state': Check.STATE_SUCCESS,
'target_url': 'http://example.com/',
'description': '',
'context': 'jenkins-ci',
}
values.update(**kwargs)
return Check.objects.create(**values)
def create_series(**kwargs):
"""Create 'Series' object."""
values = {
'project': create_project() if 'project' not in kwargs else None,
'date': dt.utcnow(),
'submitter': create_person() if 'submitter' not in kwargs else None,
'total': 1,
}
values.update(**kwargs)
return Series.objects.create(**values)
def create_series_reference(**kwargs):
"""Create 'SeriesReference' object."""
values = {
'series': create_series() if 'series' not in kwargs else None,
'msgid': make_msgid(),
}
values.update(**kwargs)
return SeriesReference.objects.create(**values)
def _create_submissions(create_func, count=1, **kwargs):
"""Create 'count' Submission-based objects.
Args:
count (int): Number of patches to create
kwargs (dict): Overrides for various patch fields
"""
values = {
'project': create_project() if 'project' not in kwargs else None,
'submitter': create_person() if 'submitter' not in kwargs else None,
}
values.update(kwargs)
date = dt.utcnow()
objects = []
for i in range(0, count):
obj = create_func(date=date + timedelta(minutes=i),
**values)
objects.append(obj)
return objects
def create_patches(count=1, **kwargs):
"""Create 'count' unique patches.
This differs from 'create_patch', in that it will ensure all
patches have at least the same project and submitter. In addition,
it is possible to set other fields to the same value, by passing
them as kwargs.
Args:
count (int): Number of patches to create
kwargs (dict): Overrides for various patch fields
"""
values = {
'state': create_state() if 'state' not in kwargs else None
}
values.update(kwargs)
return _create_submissions(create_patch, count, **values)
def create_covers(count=1, **kwargs):
"""Create 'count' unique cover letters.
This differs from 'create_cover', in that it will ensure all cover
letters have at least the same project and submitter. In addition,
it is possible to set other fields to the same value, by passing
them as kwargs.
Args:
count (int): Number of cover letters to create
kwargs (dict): Overrides for various cover letter fields
"""
return _create_submissions(create_cover, count, **kwargs)
|
We provide the software and hardware solutions you need to help make your current or future business more efficient and productive.
What do you want your Website and/or Mobile App to do?
Here is how we can accelerate your ideas.
Power your customer impact through quality mobile applications. We develop both for IOS & Android all-inclusively.
We create custom websites aimed and optimized to seamlessly work with your business workflow.
© Kirusoft Dev, LLC - All rights reserved.
|
from PIL import Image
from colorPixelListPIL import *
def negative_color(pic,n):
span = 256/n
width, height = pic.size
ni = Image.new("RGB", (width,height),(255,255,255))
for w in range(width):
for h in range(height):
pix = pic.getpixel((w,h))
(r,g,b) = pix
newR = int(n-r/span)*256//n #+ 128//n
newG = int(n-g/span)*256//n #+ 128//n
newB = int(n-b/span)*256//n #+ 128//n
ni.putpixel((w,h),(newR, newG, newB))
return ni
def grayscale(pic):
width, height = pic.size
ni = pic.copy()
for w in range(width):
for h in range(height):
pix = pic.getpixel((w,h))
(r,g,b) = pix
avg = int((r+g+b)/3)
ni.putpixel((w,h),(avg,avg,avg))
return ni
def negative_range(pic):
width, height = pic.size
ni = pic.copy()
c1 = bisque4
c2 = SteelBlue2
c3 = CadetBlue
c4 = dark_sea_green
c5 = PaleTurquoise4
c6 = OliveDrab1
for w in range(width):
for h in range(height):
pix = pic.getpixel((w,h))
(r,g,b) = pix
if r <256/6*1:
pix = c1
elif r <256/6*2:
pix = c2
elif r <256/6*3:
pix = c3
elif r <256/6*4:
pix = c4
elif r <256/6*5:
pix = c5
elif r <256/6*6:
pix = c6
ni.putpixel((w,h),pix)
return ni
a = Image.open("original.jpg")
b = negative_color(a,7)
b.save("negated.jpg")
c = grayscale(a)
d = negative_range(c)
d.save("negated_1.jpg")
|
Heidi and Rolland started over 20 years ago a program at the dump in Maputo, where many young people have been brought to the Lord and also taken away from the dump to the Center and integrated to the community. This program is still going on still today, but a tragedy hit the dump during the heavy rains in Maputo that dragged a lot of rubbish to the nearby houses and destroyed the houses while people were sleeping, 16 people died on the spot, one at the hospital and many others injured. More than 200 houses were affected and most of them destroyed by the power of the duty, The Iris Church was also destroyed during the duty slide.
We have had teams deployed and responding to families that have been impacted by this tragedy! We are continuing to pray for the healing and recovery process. Thank you for standing and praying with us!!
|
# Copyright (c) 2011,2013 Turbulenz Limited
from warnings import warn
from decorator import decorator
from simplejson import JSONEncoder, JSONDecoder
from pylons import request, response
from urlparse import urlparse
from turbulenz_local.lib.exceptions import PostOnlyException, GetOnlyException
# pylint: disable=C0103
_json_encoder = JSONEncoder(encoding='utf-8', separators=(',',':'))
_json_decoder = JSONDecoder(encoding='utf-8')
# pylint: enable=C0103
@decorator
def postonly(func, *args, **kwargs):
try:
_postonly()
return func(*args, **kwargs)
except PostOnlyException as e:
return e
def _postonly():
if request.method != 'POST':
headers = response.headers
headers['Content-Type'] = 'application/json; charset=utf-8'
headers['Cache-Control'] = 'no-store, no-cache, max-age=0'
headers['Allow'] = 'POST'
response.status_int = 405
raise PostOnlyException('{"ok":false,"msg":"Post Only!"}')
def _getonly():
if request.method != 'GET':
headers = response.headers
headers['Content-Type'] = 'application/json; charset=utf-8'
headers['Cache-Control'] = 'no-store, no-cache, max-age=0'
headers['Allow'] = 'GET'
response.status_int = 405
raise GetOnlyException('{"ok":false,"msg":"Get Only!"}')
@decorator
def jsonify(func, *args, **kwargs):
return _jsonify(func(*args, **kwargs))
def _jsonify(data):
# Sometimes we get back a string and we don't want to double-encode
# Checking for basestring instance catches both unicode and str.
if not isinstance(data, basestring):
if isinstance(data, (list, tuple)):
msg = "JSON responses with Array envelopes are susceptible to " \
"cross-site data leak attacks, see " \
"http://pylonshq.com/warnings/JSONArray"
warn(msg, Warning, 2)
data = _json_encoder.encode(data)
if 'callback' in request.params:
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
cbname = str(request.params['callback'])
data = '%s(%s);' % (cbname, data)
else:
response.headers['Content-Type'] = 'application/json; charset=utf-8'
return data
@decorator
def secure_get(func, *args, **kwargs):
try:
_getonly()
return _secure(request.GET, func, *args, **kwargs)
except GetOnlyException as e:
return e.value
@decorator
def secure_post(func, *args, **kwargs):
try:
_postonly()
return _secure(request.POST, func, *args, **kwargs)
except PostOnlyException as e:
return e.value
def _secure(requestparams, func, *args, **kwargs):
if 'data' in requestparams:
data = _json_decoder.decode(requestparams['data'])
if data is None:
data = dict()
else:
data = dict()
data.update(requestparams)
args = args[:-1] + (data,)
func_result = func(*args, **kwargs)
# pylint: disable=E1101
func_result['requestUrl'] = urlparse(request.url).path
# pylint: enable=E1101
return _jsonify(func_result)
|
According to Ellen:Odette from the Spin Master team has been a friend for years. When we firstcame up with the idea of a giving Gala, Odette jumped on board. She puts everything into her generosity. With Odette comes passion, compassion and a contagious energy. Her whole team is there to donate skids and skids of toys, set up a Spin Master Kids Zone and play with all of our young attendees.
1. How did you come to be involved with Jacob’s ladder? To be honest I don’t even remember it was so long ago. We were approached by Ellen and her family and I knew from the moment I met them I wanted to be involved and contribute where we could. We knew we could help develop the vision of Ellen and Jacob.
2. What does your affiliation with Jacob’s Ladder and (Jake’s Gigantic Give) bring to you? To Spin Master employees it means a day of giving. Getting outside of the office and seeing the toys we are in the business of creating bring smiles to so many children touches everyone’s hearts. The staff looks forward to the event each year.
5. Which living person do you most admire? Any person with a disability or a challenge that they manage to overcome each and every day.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Polygon
from matplotlib import rc
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.ticker import LogLocator
P = 8.0*np.arctan(1.0)*4.0*np.sqrt(2.0)
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
#rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
rc('font', family='serif')
mpl.rcParams['ps.usedistiller'] = 'xpdf'
mpl.rcParams['font.size'] = 11
IQRs = np.loadtxt('IQRs_N400.dat')
LF_50_IQRs = IQRs[0:7,0:2]
TJ_50_IQRs = IQRs[7:14,0:2]
Cha_50_IQRs = IQRs[14:21,0:2]
RKNa14_50_IQRs = IQRs[21:28,0:2]
RKNb11_50_IQRs = IQRs[28:35,0:2]
RKNac1_50_IQRs = IQRs[35:42,0:2]
RKNbc1_50_IQRs = IQRs[42:49,0:2]
RKNbr1_50_IQRs = IQRs[49:56,0:2]
RKNar1_50_IQRs = IQRs[56:63,0:2]
RKNb6_50_IQRs = IQRs[77:84,0:2]
metcf_LF = 1
metcf_Cha = 12.01
metcf_TJ = 3.004
metcf_RKNb5 = 5.003
metcf_RKNb6 = 6.024
metcf_RKNb11 = 11.03
metcf_RKNa14 = 14.04
metcf_RKNar1b = 21.73
metcf_RKNar1 = 5.005
metcf_RKNbr1 = 4.997
metcf_RKNac1 = 30.36
metcf_RKNbc1 = 28.62
fig=plt.figure(figsize=(9,6))
#fig=plt.figure()
ax=fig.add_subplot(111)
#ax.loglog(RKNb11_50_IQRs[:,0], RKNb11_50_IQRs[:,1], label='RKNb11')
#ax.loglog(RKNbc1_50_IQRs[:,0], RKNbc1_50_IQRs[:,1], label='RKNbc1')
#ax.loglog(RKNar1_50_IQRs[:,0], RKNar1_50_IQRs[:,1], label='RKNar1')
ax.loglog(0.01/LF_50_IQRs[:,0] *metcf_LF , LF_50_IQRs[:,1] , 'v-', label='Leapfrog')
ax.loglog(0.01/TJ_50_IQRs[:,0] *metcf_TJ , TJ_50_IQRs[:,1] , '+-', label='Triple Jump')
ax.loglog(0.01/Cha_50_IQRs[:,0] *metcf_Cha , Cha_50_IQRs[:,1] , '^-', label='Chambers')
ax.loglog(0.01/RKNb6_50_IQRs[:,0] *metcf_RKNb6 , RKNb6_50_IQRs[:,1 ], '*-', label='RKNb6' )
ax.loglog(0.01/RKNa14_50_IQRs[:,0]*metcf_RKNa14, RKNa14_50_IQRs[:,1], 'o-', label='RKNa14')
ax.loglog(0.01/RKNbr1_50_IQRs[:,0]*metcf_RKNbr1, RKNbr1_50_IQRs[:,1], 'p-', label='RKNbr1')
ax.loglog(0.01/RKNac1_50_IQRs[:,0]*metcf_RKNac1, RKNac1_50_IQRs[:,1], 's-', label='RKNac1')
ax.set_xlim(2e-2, 1e2)
#ax.set_ylim(1e-16,2e-2)
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(16)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(16)
plt.legend(loc='lower left')
plt.xlabel('CPU time (normalized to LF, $\delta t = 0.01$)', fontsize=18)
plt.ylabel('Inter quartile range for $r-r_\mathrm{GBS}$', fontsize=18)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=14)
majorLocator = LogLocator(100)
#majorFormatter = FormatStrFormatter('%d')
minorLocator = LogLocator(10)
ax.yaxis.set_major_locator(majorLocator)
#ax.xaxis.set_major_formatter(majorFormatter)
#for the minor ticks, use no labels; default NullFormatter
ax.yaxis.set_minor_locator(minorLocator)
plt.savefig('400body_CPUtime_plot.eps',
orientation='landscape',bbox_inches='tight')
plt.show()
|
PROFILE: MICHAEL D HIGGINS:MICHAEL D HIGGINS has always railed against the predictable and lazy groove that people get into, with no brow-furrowing or thinking required, when describing events, people or phenomena.
But then there are the things in an eventful past that rarely, if ever, get mentioned these days, and don’t just weave in so handily to the common perception of him: Clare man, farm boy, chairman of the Kevin Barry Cumann of Fianna Fáil; leading member of the Legion of Mary in Galway; diehard supporter of Galway United football club. And so on.
What is agreed between all who speak of Higgins is this: now, at the age of 70, he has reached a status where his party colleagues see him as an icon and treasure (evidenced by his victory in last Sunday’s selection convention). There is huge affection for him within the party.
He is an instantly familiar figure with first-name recognition (Michael D is as recognisable as Enda, as Bertie, as Charlie). And the slightly high-pitched voice and the accent that mixes posh with rural Clare has made him an easy target for mimics. It all adds up to a populist reach that extends beyond the confines of his party. Between now and November, he will learn if that reach is wide enough to take him to Áras an Uachtaráin.
His other great trait is passion. When Higgins became involved, there were no half-measures. He is perhaps the last of the great orators left in the Dáil, and a passionate Michael D speech is still something to behold.
But he has flaws. He can be long-winded and over the top. He doesn’t take direction or advice, preferring to rely on his own instincts. And sometimes they are not in tune with a changed political environment. And, say colleagues, he can be thin-skinned, short-tempered and too quick to rage and insult.
He has cut a mellower figure in recent times. But last year a right-wing American commentator with whom he was debating on the Right Hook show on Newstalk so enraged him that he called him a w***er live on air. It was hardly the most appropriate sound bite for an apprentice Uachtaráin na hÉireann.
Higgins, who retired as TD for Galway West earlier this year, has had ambitions for the presidency for well over a decade. When Mary Robinson’s term was coming towards an end, in 1997, his name was widely mentioned. In the event, the party reached outside for the anti-nuclear campaigner Adi Roche.
He pressed to become the party’s nominee in 2004, only to be denied. The party had just come out of a mediocre election and wasn’t flush with funds. It saw its priorities elsewhere.
It led to a peripatetic journey, with his own father opening a business in Limerick. But his father’s health deteriorated (alcohol was a factor), and his mother, finding it difficult to cope, had to make a heart-breaking decision. The oldest girls, who were twins, stayed in Limerick but Michael, then aged five, and his four-year-old brother, John, were sent to a farmhouse near Newmarket-on-Fergus, where they were reared by an unmarried aunt and uncle, who were loving and protective. Their mother visited every chance she got.
“It was a broken-hearted experience for my mother, who saw half her children disappear. It was loss. When I write [poetry] now there are parts of myself that were damaged by the experience,” he recalled recently.
The young Clare man moved to Galway to begin his first job, as a clerk with the ESB. He was writing articles and poetry in his spare time, and through the Legion of Mary he met a man named Redmond Corbett, who was so impressed by Higgins that he gave him a loan of £200 to allow him go to university. And so, at the age of 20, he enrolled as a mature student.
He threw himself into the role. He won scholarships; was auditor of the literary and debating society; became president of the student council; and even became chairman of the Fianna Fáil cumann at UCG.
In the late 1960s he met Noel Browne (an idol for Higgins), who asked him to join the Labour Party. “It was entirely consistent with my experience, what had happened in the State, bad housing, poverty,” he later said.
He met his wife, the actor Sabina Coyne, at a house party in Dublin. Coyne, with whom he has three grown-up children, was the most formative influence on his nascent career as a politician and academic, encouraging him, orchestrating his campaigns and strategy, and corralling overly enthusiastic flights of fancy.
During the 1970s and 1980s, Higgins would have been seen as a radical but not as hard left. He wasn’t on Labour’s militant left but defend- ed that group passionately when the party was expelling its members in the late 1980s.
A few former members of the party have claimed Higgins’s ideology was tempered by opportunism, and some point to his change of tack on entering coalition. Higgins virulently opposed coalition during the 1980s, but in a very quick transformation he accepted a ministry when it was offered to him in 1992.
Be that as it may, Higgins, as Ireland’s first full minister for arts and culture, threw himself into the role. Des O’Malley said that Higgins would “go mad in government” but it did not happen. He introduced tax incentives for film, set up TG4, and hugely improved budgets for literature, theatre, dance and visual arts.
And then there is his poetry. It’s often parodied by the mimics, but he has had three collections published. The academic Declan Kiberd, writing about the new State that was formed in the 1920s, said Ireland was not a place for poets. Higgins will discover in November if that also holds true for the Áras.
|
##
## This file is part of the sigrok-meter project.
##
## Copyright (C) 2013 Uwe Hermann <uwe@hermann-uwe.de>
## Copyright (C) 2014 Jens Steinhauser <jens.steinhauser@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import datamodel
import os.path
import qtcompat
import samplingthread
import textwrap
QtCore = qtcompat.QtCore
QtGui = qtcompat.QtGui
class EmptyMessageListView(QtGui.QListView):
'''List view that shows a message if the model im empty.'''
def __init__(self, message, parent=None):
super(self.__class__, self).__init__(parent)
self._message = message
def paintEvent(self, event):
m = self.model()
if m and m.rowCount():
super(self.__class__, self).paintEvent(event)
return
painter = QtGui.QPainter(self.viewport())
painter.drawText(self.rect(), QtCore.Qt.AlignCenter, self._message)
class MainWindow(QtGui.QMainWindow):
'''The main window of the application.'''
def __init__(self, context, drivers):
super(self.__class__, self).__init__()
self.context = context
self.delegate = datamodel.MultimeterDelegate(self, self.font())
self.model = datamodel.MeasurementDataModel(self)
self.model.rowsInserted.connect(self.modelRowsInserted)
self.setup_ui()
self.thread = samplingthread.SamplingThread(self.context, drivers)
self.thread.measured.connect(self.model.update)
self.thread.error.connect(self.error)
self.thread.start()
def setup_ui(self):
self.setWindowTitle('sigrok-meter')
# Resizing the listView below will increase this again.
self.resize(350, 10)
p = os.path.abspath(os.path.dirname(__file__))
p = os.path.join(p, 'sigrok-logo-notext.png')
self.setWindowIcon(QtGui.QIcon(p))
actionQuit = QtGui.QAction(self)
actionQuit.setText('&Quit')
actionQuit.setIcon(QtGui.QIcon.fromTheme('application-exit'))
actionQuit.setShortcut('Ctrl+Q')
actionQuit.triggered.connect(self.close)
actionAbout = QtGui.QAction(self)
actionAbout.setText('&About')
actionAbout.setIcon(QtGui.QIcon.fromTheme('help-about'))
actionAbout.triggered.connect(self.show_about)
menubar = self.menuBar()
menuFile = menubar.addMenu('&File')
menuFile.addAction(actionQuit)
menuHelp = menubar.addMenu('&Help')
menuHelp.addAction(actionAbout)
self.listView = EmptyMessageListView('waiting for data...')
self.listView.setFrameShape(QtGui.QFrame.NoFrame)
self.listView.viewport().setBackgroundRole(QtGui.QPalette.Window)
self.listView.viewport().setAutoFillBackground(True)
self.listView.setMinimumWidth(260)
self.listView.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.listView.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.listView.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.listView.setItemDelegate(self.delegate)
self.listView.setModel(self.model)
self.listView.setUniformItemSizes(True)
self.listView.setMinimumSize(self.delegate.sizeHint())
self.setCentralWidget(self.listView)
self.centralWidget().setContentsMargins(0, 0, 0, 0)
def closeEvent(self, event):
self.thread.stop()
event.accept()
@QtCore.Slot()
def show_about(self):
text = textwrap.dedent('''\
<div align="center">
<b>sigrok-meter 0.1.0</b><br/><br/>
Using libsigrok {} (lib version {}).<br/><br/>
<a href='http://www.sigrok.org'>
http://www.sigrok.org</a><br/>
<br/>
License: GNU GPL, version 3 or later<br/>
<br/>
This program comes with ABSOLUTELY NO WARRANTY;<br/>
for details visit
<a href='http://www.gnu.org/licenses/gpl.html'>
http://www.gnu.org/licenses/gpl.html</a>
</div>
'''.format(self.context.package_version, self.context.lib_version))
QtGui.QMessageBox.about(self, 'About sigrok-meter', text)
@QtCore.Slot(str)
def error(self, msg):
'''Error handler for the sampling thread.'''
QtGui.QMessageBox.critical(self, 'Error', msg)
self.close()
@QtCore.Slot(object, int, int)
def modelRowsInserted(self, parent, start, end):
'''Resize the list view to the size of the content.'''
rows = self.model.rowCount()
dh = self.delegate.sizeHint().height()
self.listView.setMinimumHeight(dh * rows)
|
For everybody who essay help chat isn’t wholesome, you will likely not just enjoy the dollars, you could have won, as there will not ever be the occasion to shell out it. So that the student should choose a topic with superior care. In actual fact, school students typically are unable to undertake their jobs for a number of facets.
For instance, a preliminary essay service inspection will likely be exceedingly crucial. An individual may order customized essays and papers with filling in the essential form on its website. Sometimes using different sources of information demonstrates that the scholar is well rounded and the info is credible.
The major issue is that they might not be reliable, so you should search for http://holyword.hk/a-guide-to-my-narrative-essay/ the websites which work with human experts who truly understand and generate the essays which make sense. A superior site is your keystone for most tactics. Our special strategy and application of learning objectives will change how you produce and develop eLearning courses now and later on.
If you wish to purchase our essay services online, you should know our writers have the next capabilities. Writers may begin searching for tasks centered on date or pay range in a lot of classes.
There are lots of free scientific journals on the internet. All the important creative writing websites supply the thesis generator software as the most popular study programs. Through using college research paper examples, students will be in a position to understand the suitable format of how the in text citation and references ought to be carried out.
Also, you need to always buy only research papers online with no plagiarism since uniqueness of content will make sure your work isn’t copied from different references. The resources offered for internet essay serve the objective of unique heights of the academic curricula. No matter the topic might be, all you have to do is to enter the topic and you’ll immediately find an essay on your computer screen.
When you stick with your intended language from the beginning, you http://home.shannonparker.org/narrative-essay-reviews-amp-guide/ will produce the essay more consistent to the conventions of that language. Creating an essay can pose a number of difficulties, especially if it truly is true you never have a lot of expertise. If you don’t care what the essay says then utilize software, but should you will need something which is readable and makes sense then you must use an individual to do your rewriting.
An essay title generator is an easy and handy tool that supplies the multiple suggestions for your essay titles utilizing the advanced algorithms that gather the most creative titles structures which can be applied to your topic. Automated bibliography and reference creation can help save you an enormous quantity of time with your academic writing and assist you to avert formatting errors.
Your even compact experience of writing will be also very helpful. There are lots of writers that you could come across online. While you ought not go so far as hiring someone to compose the essay for you, it is fine to receive a small support and the web delivers plenty of resources to help you compose a stellar essay.
Experienced cheap essay writer on the internet is at your services. Company blogging is also a considerable devotion. If you’re the kind of writer who’s more creative than systemic, you’ll find it tough to be successful in a technical writing job.
What’s more, your teacher has shared her or his private opinion about your topic. The very first resource is media sources like CNN sites. Our teams are efficiently seasoned law experts that are acquainted with all details needed for perfect writing.
Its easy to purchase the favored meals essay on the sitehereand you can be assured, we would essay writing websites free include http://home-sol.com/?p=2812 your whole commentary and supply you with the top rated high quality essay on your granted subject issue. Despite how sad it might sound, there aren’t any absolutely free essay writers, as nobody is ready to do homework for someone at no cost. Some easy ways to make your essay stick out from the rest is to create sure it isn’t dull.
The job is to offer an argumentative essay that’s fair, honest, and not bias. As soon as you have the subject, you’ve got to work out the direction for your essay and compose a thesis statement. If you’re not so acquainted with the subject of the assigned essay it’s going to be even harder to think of valuable thoughts and thought provoking concepts.
If you’re readily available to pay extra for essay writing, you can have a look at our website and satisfy we to find the best essayists. The totally free article template stipulates the crucial ways that an essay could be constructed. When you start writing your essay you’ll have all info you have to make accurate direct quotations.
Much like in a written essay, you will need to choose what you want to explain or argue. An essay should also be in possession of a very good body. It should be descriptive.
|
import pygame
from pygame.locals import *
from Colors import *
class Ball:
def __init__(self, surface, color, position):
self.layer = surface
self.color = color
self.init_X = position[0]
self.init_Y = position[1]
self.var_X = self.init_X
self.var_Y = self.init_Y
self.radius = position[2]
self.width = position[3]
self.change_X = 5
self.change_Y = 5
''' CREATING BALL '''
def create_Ball(self):
self.center = [self.var_X, self.var_Y]
pygame.draw.circle(self.layer,self.color,self.center,self.radius,self.width)
''' BALL MOVEMENT '''
def play_ball(self,direction=1):
self.var_X += self.change_X
self.var_Y += self.change_Y
def bounce_backX(self):
self.change_X *= -1
def bounce_backY(self):
self.change_Y *= -1
def score(self):
self.var_X = self.init_X
self.var_Y = self.init_Y
''' BALL POSITION (CENTER) '''
def get_Pos(self):
return {"x" : self.var_X, "y" : self.var_Y}
''' MANUAL CONTROL FUNCTIONS (DEBUGGING) '''
def moveUp(self):
self.var_Y -= self.change_Y
if self.var_Y <= 16:
self.var_Y += self.change_Y
def moveDown(self):
self.var_Y += self.change_Y
if (self.var_Y+self.radius) >= 486:
self.var_Y -= self.change_Y
def moveLeft(self):
self.var_X -= self.change_X
if self.var_X <= 16:
self.var_X += self.change_X
def moveRight(self):
self.var_X += self.change_X
if (self.var_X+self.radius) >= 566:
self.var_X -= self.change_X
|
I confess, the idea of creating a mission statement for myself or my family seemed like a "touchy-feely" getting to know you kind of concept. I know what I believe, I know what is important to me in my life, why do I need to formalize it in writing? However, my husband has encouraged me to do just that, and with his prompting and my reading 7 Habits last year (I am still slogging through the family version - so much to digest), I became convinced of the advantages. I will say, it was not an easy thing to do. How do you boil down who you are and what your "mission" is in a focused way? After much thought and jotting down of ideas over several weeks, I came to the conclusion that the structure of my mission statement would be based on the 5 P's from Mother's Rule of Life. Those five areas encompass everything I wanted to define in my life, so I took each one and wrote a paragraph stating who I am and what I am striving for within each of those realms. While I realize that I will fall short of this daily, it is a good reminder of that which I am striving to attain, and a good check for myself in evaluating what activities to pursue. So, I want to encourage you if you are at a point where you feel lost, directionless or overwhelmed (hee hee, that probably describes us all right!), then take time to create a mission statement. It can be very revealing, and it certainly can help to focus your priorities. If you are not sure how to go about it, pick up one of the 7 Habits books, there are ideas there as well as examples of others' mission statements.
My little princess turns 8 today. We are having a joint birthday party after Pascha for her and the crocodile hunter (he gets to celebrate turning 10 on Holy Pascha this year!). So since I don't have any party photos for this year, I decided to post some from her birthday last year that I never got around to posting :) Now, I am known for going overboard when it comes to party planning, but since we don't really buy them a gift for their birthday (with 5 sets of aunts and uncles and 2 sets of grandparents, it really is not necessary), I consider the party to be their gift. Not to mention it's just so much fun! Last year she wanted an Alice in Wonderland party, which is a great theme to work with. For invites I found a great picture of the White Rabbit and printed that off my computer, then backed them with a layer of red paper and a layer of gorgeous black and white diamond paper. Party favors were easy, since her birthday the month after Valentines day, I got lots of great heart related things on clearance - lollipops etc, and found really cute flamingo gummies and miniature playing cards from Oriental Trading Company.
The cake was the most fun one I have made in years. I saw something like this years ago at an ICES convention and always hoped someday to recreate it. The entire cake is edible, even the tea cup, mouse and roses are made of gumpaste and fondant.
We don't invite lots of friends, usually it is just family and one friend invited, but since there are 10 cousins it is quite a group. The guests came in their maddest hats, and the birthday girl had her very own Alice dress (completed by a very tired mommy at 2:00 am the morning of the party).
The food was of course traditional tea fare - devonshire cream and scones, raspberries, cucumber and watercress sandwiches, lemonade served in tea cups and cookies that said "Eat Me". And to finish it all off, a round of flamingo croquet (put pink socks over croquet mallets, and set out a few flamingo lawn ornaments among the wickets) on the very wet and dreary lawn (weather is not very cooperative for March birthdays!) So that was a peek at last year. This year - Charlotte's Web!
Some of you may have known Mary from a previous blog - I used to love reading all of her wonderful ideas. She is the reason I started blogging - when she closed her blog, I was so sad to see such a great resource gone, and that inspired me to share my own ideas with others. Her blog introduced me to Waldorf style learning and wonderful ways of incorporating that into our school day, and she created wonderful projects for commemorating the many feasts of the church. Well, she is back and I just stumbled upon her again - so go check it out. This is one blog you will want to bookmark!
I still had a few butternut squash left over from last fall (who knew butternut squash would keep that long?) In an attempt to use them before they finally went bad, I found two great recipes I wanted to share, both of which are perfect for Lent.
The first is a wonderful soup - made even better with the addition of some heavy whipping cream garnish during those non-fasting periods.
The second, a pasta dish (just leave off the parmesan and use margarine not butter) even the kids enjoyed. These were not hard to make - and I made them both for company a few weeks ago - the soup can be made up early in the day then rewarmed, and the pasta takes about 20 minutes to cook.
Dear Lord, this bread that we have baked represents each one of us in this family and in our parish. We are offering ourselves to Thee, our very life, in humble obedience and total commitment. We place ourselves on Thy holy altar through this bread to be used by Thee in anyway that will enlarge Thy kingdom. Accept our gift and make us worthy to receive the greater gift that Thou will give to us when Thou consecrate this bread and give it back to us as Thy most precious body. Amen.
hilarious article! WARNING - "facetards" may find offense at the linked article. Those of you who still resist the "the Borg" will find it side-splittingly hysterical!
I am so very blessed! I have spent all of clean week at an icon workshop, and wanted to share my experience. This is the second workshop I have had the opportunity to take (the first was taught by Susan), and the timing for this one could not have been more appropriate. Learning to paint/write an icon is such an amazing and humbling experience. There is a closeness to God that I cannot describe, and an overwhelming sense of unworthiness as I attempt to allow Him to work through me. The workshop was from 8:30-4:30 each day, then I attended clean week services each night, so I feel as if I have been immersed in the life of the church in such an amazing way this week. We finished today (though my icon still lacks a few finishing details before blessing), and how appropriate to leave the workshop this afternoon and go to the Akathist to the Theotokos whose image I have gazed upon all week. Even more exciting is the thought of having my newly finished icon in time for Sunday of Orthodoxy, the time when the Orthodox church celebrates the defeat of the iconoclasts and the reaffirmation of the place of icons in the church. I cannot tell you the love I feel right now for the holy Theotokos, and the amazing beauty of this icon - one of the few that depicts any emotion. The prototype is The Virgin of the Passion, and it is the image of Mary, pointing Christ towards His path - the angel on the right holds the cross and nails, the angel on the left holds the spear that will pierce His side, and the sponge and vinegar offered to Him at the cross. The Christ child grasps His mother's thumb in a moment of anxiousness, and the face of the Theotokos shows her sadness in knowing she will some day have to let go of Him and witness her Son sacrificed for the world. As a mother, my heart aches for her, and as a sinner, my heart thanks and praises her for her strength and willingness to answer God's call, to bear, raise, and witness the death of her precious Son, the Son of God, that we may live through his glorious resurrection!
Here is a look at the bag I pack to help get us through the long Great Canon Services this week. These items are chosen so the kids can be kept quietly busy while still singing along with the responses to the Canon. The wonderful green book is a homemade coloring book created by the little princess' godfather (Erin's wonderful husband). It contains line drawings of every imaginable saint and feast day. The kids enjoy coloring these, and they also love to take blank sheets of paper and trace the images - great practice for future iconographers :) I try to rotate out a good selection of books, preferably ones that pertain to Lent or Pascha, but not always. (My favorite new book right now is The Man and the Vine - a beautifully illustrated rhyme about a man growing the grapes for the Eucharist wine - it is a very moving children's book). The crocodile hunter likes to have a prayer book, and the Queen has crayons and a few small wooden figures to play with. All of the books you see are available through St. Nectarios Press or Conciliar Press.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-26 06:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('interface', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Build',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ref', models.TextField()),
('sha', models.TextField()),
('status', models.TextField(choices=[(b'success', b'success'), (b'error', b'error'), (b'pending', b'pending'), (b'cancelled', b'cancelled')], default=b'pending')),
('result', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('finished_at', models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Repo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.TextField()),
('name', models.TextField()),
('webhook_url', models.URLField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='repos', to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='Result',
),
migrations.AddField(
model_name='build',
name='repo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='interface.Repo'),
),
]
|
Telangana TS DSC Recruitment 2017 DEO Ranga Reddy Teacher Posts Vacancies List : DEO Ranga Reddy District Official website , Teachers seniority, Vacancy Position, Download Rationalization List, Mandal wise vacancy , DEO Ranga Reddy Teachers Transfers seniority 2017 . Vacancy Position, Download Rationalization List, Mandal wise vacancy. District Educational Officer(DEO), Ranga Reddy has been released the Teachers vacancies category wise i.e., School Assistant Bio.Science ,English ,Social , Phy.Science, Secondary Grade Teacher(SGT), LP SA Telugu,LP SA Hindi, LFL HM and School Rationalization List of UP, High School, Primary Schools. Rationalization cadre wise list are available in the links provided below. This only tentative list of Particulars of Cadre wise list under rationalization and Primary, UP, High Schools. Stay tuned this web page for latest updates of Ranga Reddy Teachers Vacancies, Recruitment, Promotions, Transfers.
TSPSC TRT 2017 notification released for 8792 Teachers' Appointments as per 31 districts of Telangana state.
|
# _*_ coding=utf-8 _*_
"""
Django settings for simple_django_blog project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '35*mn&ihlm(0&&)v_8xfddc%c7&271yyiks_cgqf)45vi!#_1g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'accounts',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'simple_django_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'blog', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'simple_django_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, 'blog', 'static'),
)
|
A Placerville Woman was arrested on Tuesday, August 11, 2015 at approximately 1:36 PM by the California Highway Patrol after allegedly brandishing a firearm at another motorist traveling, eastbound on Highway 20 near Walnut Drive, in Williams.
According the California Highway Patrol, officers were dispatched after a receiving a report of a female passenger in a Ford Expedition brandishing a firearm at another motorist.
California Highway Patrol Officers and a Williams Police Officer observed the Expedition on Highway 20 and made a high-risk stop near the Southbound Interstate 5 on-ramp.
A 17-year-old driver and her 49-year-old passenger were both detained without incident.
Officers located a loaded revolver inside a fanny pack that was on the front passenger floorboard.
The juvenile was questioned at the scene and released.
49-year-old Colleen Brusseau was questioned and taken into custody. Brusseau was arrested for exhibiting a firearm in a motor vehicle, carrying a concealed weapon on her person in a vehicle and carrying a loaded firearm in a vehicle without being the registered owner. She was booked into the Colusa County Jail, and bail was set at $15,000.
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
class BrowserOptionsTest(unittest.TestCase):
def testDefaults(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.x, 3) # pylint: disable=E1101
def testDefaultsPlusOverride(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any', '-x', 10])
self.assertEquals(options.x, 10) # pylint: disable=E1101
def testDefaultsDontClobberPresetValue(self):
options = browser_options.BrowserOptions()
setattr(options, 'x', 7)
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.x, 7) # pylint: disable=E1101
def testCount0(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='count', dest='v')
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.v, None) # pylint: disable=E1101
def testCount2(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='count', dest='v')
parser.parse_args(['--browser', 'any', '-xx'])
self.assertEquals(options.v, 2) # pylint: disable=E1101
def testOptparseMutabilityWhenSpecified(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', dest='verbosity', action='store_true')
options_ret, _ = parser.parse_args(['--browser', 'any', '-x'])
self.assertEquals(options_ret, options)
self.assertTrue(options.verbosity)
def testOptparseMutabilityWhenNotSpecified(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', dest='verbosity', action='store_true')
options_ret, _ = parser.parse_args(['--browser', 'any'])
self.assertEquals(options_ret, options)
self.assertFalse(options.verbosity)
|
Women and motorcycles don't mix!
Subject: Women and motorcycles don't mix!
While we wait for Unify to work.....here's a funny!!!!
|
"""Utilities for manipulating sequence sets.
"""
import logging
import os.path
from collections import Counter
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
def clean_accession(rec_id):
"""Extract an accession key from one SeqRecord.
The id of the given record is processed to remove domain location info
added by HMMer. Most other records won't have a '/' character in the FASTA
header key.
"""
return rec_id.rsplit('/', 1)[0]
# Obsolete, but left here for backward compatibility for now
def clean_accessions(records):
"""Extract accession keys from an iterable of SeqRecords."""
return (clean_accession(rec.id) for rec in records)
def intersect_keys(keys, reffile, cache=False, clean_accs=False):
"""Extract SeqRecords from the index by matching keys.
keys - an iterable of sequence identifiers/accessions to select
reffile - name of a FASTA file to extract the specified sequences from
cache - save an index of the reference FASTA sequence offsets to disk?
clean_accs - strip HMMer extensions from sequence accessions?
"""
# Build/load the index of reference sequences
index = None
if cache:
refcache = reffile + '.sqlite'
if os.path.exists(refcache):
if os.stat(refcache).st_mtime < os.stat(reffile).st_mtime:
logging.warn("Outdated cache; rebuilding index")
else:
try:
index = (SeqIO.index_db(refcache,
key_function=clean_accession)
if clean_accs
else SeqIO.index_db(refcache))
except Exception:
logging.warn("Skipping corrupted cache; rebuilding index")
index = None
else:
refcache = ':memory:'
if index is None:
# Rebuild the index, for whatever reason
index = (SeqIO.index_db(refcache, [reffile], 'fasta',
key_function=clean_accession)
if clean_accs
else SeqIO.index_db(refcache, [reffile], 'fasta'))
# Extract records by key
if clean_accs:
keys = (clean_accession(k) for k in keys)
for key in keys:
try:
record = index[key]
except LookupError:
# Missing keys are rare, so it's faster not to check every time
logging.info("No match: %s", repr(key))
continue
yield record
def aa_frequencies(seq, gap_chars='-.'):
"""Calculate the amino acid frequencies in a sequence set."""
aa_counts = Counter(seq)
# Don't count gaps
for gap_char in gap_chars:
if gap_char in aa_counts:
del aa_counts[gap_char]
# Reduce to frequencies
scale = 1.0 / sum(aa_counts.values())
return dict((aa, cnt * scale) for aa, cnt in aa_counts.iteritems())
|
My Japanese is not perfect... What should I do?
Would it be the best remedy to the rainy season blues?
Volunteers shortage for the 2020 Olympics?
2019 university graduates will have the chance to be picky!
Working visa in Japan: What's new?
Japanese language requirements to be eased in the future!
|
"""
Experiment for NN4(RI)
Aim: To find the best max_epochs for NN4(*, 1024, 1024, 1024) + RI(k = 3, m = 200)
max_epochs: [22, 24, ... ,98, 140]
Averaging 20 models
Summary
epochs 88 , loss 0.421860471364
Time:3:40:30 on i7-4790k 32G MEM GTX660
I got a different result, epochs 112 loss 0.422868, before I reinstalled ubuntu 14.04 LTS.
So I chose max_epochs = 112.
"""
import numpy as np
import scipy as sp
import pandas as pd
from pylearn2.models import mlp
from pylearn2.models.mlp import RectifiedLinear, Softmax, MLP
from pylearn2.costs.mlp.dropout import Dropout
from pylearn2.training_algorithms import sgd, learning_rule
from pylearn2.termination_criteria import EpochCounter
from pylearn2.datasets import DenseDesignMatrix
from pylearn2.train import Train
from theano.compat.python2x import OrderedDict
import theano.tensor as T
from theano import function
import pickle
import sklearn.preprocessing as pp
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import scale
from sklearn.metrics import log_loss
from sklearn.grid_search import ParameterGrid
from datetime import datetime
import os
from utility import *
from predict import predict
import pylab
path = os.getcwd() + '/'
path_log = path + 'logs/'
file_train = path + 'train.csv'
training = pd.read_csv(file_train, index_col = 0)
num_train = training.shape[0]
y = training['target'].values
yMat = pd.get_dummies(training['target']).values
X = training.iloc[:,:93].values
scaler = pp.StandardScaler()
X2 = scaler.fit_transform(X ** .6)
kf = cross_validation.StratifiedKFold(y, n_folds=5, shuffle = True, random_state = 345)
for train_idx, valid_idx in kf:
break
y_train = yMat[train_idx]
y_valid = yMat[valid_idx]
training = DenseDesignMatrix(X = X2[train_idx], y = y_train)
valid = DenseDesignMatrix(X = X2[valid_idx], y = y_valid)
# [l1, l2, l3, l4, output]
nIter = 20
# Params for RI
m = 200
k = 3
# Params for NN
epochs = 20
epochs_add = 2
n_add = 60
bs = 64
mm = .97
lr = .01
dim2 = 1024
ir1 = .01
ir2 = .05
ip = .8
ir_out = .05
mcn_out = 2.5
scores = []
t0 = datetime.now()
predAll = [np.zeros(y_valid.shape) for s in range(n_add)]
for i in range(nIter):
seed = i + 3819
R = RImatrix(X.shape[1], m, k, rm_dup_cols = True, seed = seed)
R = np.abs(R.todense().astype(np.float32))
dim1 = R.shape[1]
l1 = RectifiedLinear(layer_name='l1', irange = ir1, dim = dim1, mask_weights = R)
l2 = RectifiedLinear(layer_name='l2', irange = ir2, dim = dim2, max_col_norm = 1.)
l3 = RectifiedLinear(layer_name='l3', irange = ir2, dim = dim2, max_col_norm = 1.)
l4 = RectifiedLinear(layer_name='l4', irange = ir2, dim = dim2, max_col_norm = 1.)
output = Softmax(layer_name='y', n_classes = 9, irange = ir_out,
max_col_norm = mcn_out)
mdl = MLP([l1, l2, l3, l4, output], nvis = X2.shape[1])
trainer = sgd.SGD(learning_rate=lr,
batch_size=bs,
learning_rule=learning_rule.Momentum(mm),
cost=Dropout(input_include_probs = {'l1':1.},
input_scales = {'l1':1.},
default_input_include_prob=ip,
default_input_scale=1/ip),
termination_criterion=EpochCounter(epochs),seed = seed)
decay = sgd.LinearDecayOverEpoch(start=2, saturate=20, decay_factor= .1)
experiment = Train(dataset = training, model=mdl, algorithm=trainer, extensions=[decay])
experiment.main_loop()
epochs_current = epochs
for s in range(n_add):
del mdl.monitor
trainer = sgd.SGD(learning_rate=lr * .1,
batch_size=bs,
learning_rule=learning_rule.Momentum(mm),
cost=Dropout(input_include_probs = {'l1':1.},
input_scales = {'l1':1.},
default_input_include_prob=ip,
default_input_scale=1/ip),
termination_criterion=EpochCounter(epochs_add),seed = seed)
experiment = Train(dataset = training, model=mdl, algorithm=trainer)
experiment.main_loop()
epochs_current += epochs_add
pred_train = predict(mdl, X2[train_idx].astype(np.float32))
pred_valid = predict(mdl, X2[valid_idx].astype(np.float32))
predAll[s] += pred_valid
scores.append({'epochs':epochs_current, 'nModels':i + 1, 'seed':seed,
'train':log_loss(y_train, pred_train),
'valid':log_loss(y_valid, pred_valid),
'valid_avg':log_loss(y_valid, predAll[s] / (i + 1))})
print scores[-1], datetime.now() - t0
df = pd.DataFrame(scores)
if os.path.exists(path_log) is False:
print 'mkdir', path_log
os.mkdir(path_log)
df.to_csv(path_log + 'exp_NN4_RI_max_epochs.csv')
keys = ['epochs']
grouped = df.groupby(keys)
print 'epochs',grouped['valid_avg'].last().idxmin(),', loss',grouped['valid_avg'].last().min()
# epochs 88 , loss 0.421860471364
g = grouped[['train', 'valid']].mean()
g['valid_avg'] = grouped['valid_avg'].last()
print g.iloc[[0,1,32,33,34,58,59],:]
# train valid valid_avg
# epochs
# 22 0.319737 0.468458 0.436766
# 24 0.313538 0.468300 0.435694
# 86 0.193640 0.486078 0.422321
# 88 0.190694 0.487625 0.421860
# 90 0.187374 0.487897 0.421998
# 138 0.134388 0.512527 0.423662
# 140 0.132642 0.514666 0.425003
ax = g.plot()
ax.set_title('NN4(RI) m=200, k=3')
ax.set_ylabel('Logloss')
fig = ax.get_figure()
fig.savefig(path_log + 'exp_NN4_RI_max_epochs.png')
|
What is a Pi Widget? Well, to be truthful, we made it up to describe the small, widget-like segments which form our June General Meeting. We start with a Question & Answer Panel, Club news, a few words about our Special Interest Group activities, some self-help demonstrations, and One More Thing. Check out the agenda to see what is happening. And note the new start time.
10:00 - Questions & Answers - A Pi panel of experts will take your questions, which you can email to them beforehand! Send your questions to feedback@wap.org today!
10:50 - Club Stuff - the new Pi President has a speaking part!
Question & Answer Session: Advance questions are being solicited. This is a new wrinkle in our regular Q & A format, and it has a two-fold purpose. First, for members who cannot attend the meeting, we are offering them a chance to get their questions addressed, and second, with questions submitted beforehand, the experts will have more time to research the inquiry and provide a more thorough answer.
Send your questions to feedback@wap.org today!
Members Helping Members Session: During the main portion of the meeting, we will demonstrate how easy it is to upgrade the memory, hard drive, and, in most tower model Macs, the optical drive. We will be taking this demonstration a step further by providing direct help in upgrading your Mac if you bring it and the parts needed to the meeting. If you aren't quite ready, we can set up an appointment for next month's meeting or an upcoming Clubhouse Saturday.
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Service action implementations"""
import logging
from novaclient import api_versions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class DeleteService(command.Command):
_description = _("Delete compute service(s)")
def get_parser(self, prog_name):
parser = super(DeleteService, self).get_parser(prog_name)
parser.add_argument(
"service",
metavar="<service>",
nargs='+',
help=_("Compute service(s) to delete (ID only). If using "
"``--os-compute-api-version`` 2.53 or greater, the ID is "
"a UUID which can be retrieved by listing compute services "
"using the same 2.53+ microversion.")
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
result = 0
for s in parsed_args.service:
try:
compute_client.services.delete(s)
except Exception as e:
result += 1
LOG.error(_("Failed to delete compute service with "
"ID '%(service)s': %(e)s"), {'service': s, 'e': e})
if result > 0:
total = len(parsed_args.service)
msg = (_("%(result)s of %(total)s compute services failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListService(command.Lister):
_description = _("List compute services. Using "
"``--os-compute-api-version`` 2.53 or greater will "
"return the ID as a UUID value which can be used to "
"uniquely identify the service in a multi-cell "
"deployment.")
def get_parser(self, prog_name):
parser = super(ListService, self).get_parser(prog_name)
parser.add_argument(
"--host",
metavar="<host>",
help=_("List services on specified host (name only)")
)
parser.add_argument(
"--service",
metavar="<service>",
help=_("List only specified service binaries (name only). For "
"example, ``nova-compute``, ``nova-conductor``, etc.")
)
parser.add_argument(
"--long",
action="store_true",
default=False,
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
if parsed_args.long:
columns = (
"ID",
"Binary",
"Host",
"Zone",
"Status",
"State",
"Updated At",
"Disabled Reason"
)
else:
columns = (
"ID",
"Binary",
"Host",
"Zone",
"Status",
"State",
"Updated At"
)
data = compute_client.services.list(parsed_args.host,
parsed_args.service)
return (columns,
(utils.get_item_properties(
s, columns,
) for s in data))
class SetService(command.Command):
_description = _("Set compute service properties")
def get_parser(self, prog_name):
parser = super(SetService, self).get_parser(prog_name)
parser.add_argument(
"host",
metavar="<host>",
help=_("Name of host")
)
parser.add_argument(
"service",
metavar="<service>",
help=_("Name of service (Binary name), for example "
"``nova-compute``")
)
enabled_group = parser.add_mutually_exclusive_group()
enabled_group.add_argument(
"--enable",
action="store_true",
help=_("Enable service")
)
enabled_group.add_argument(
"--disable",
action="store_true",
help=_("Disable service")
)
parser.add_argument(
"--disable-reason",
default=None,
metavar="<reason>",
help=_("Reason for disabling the service (in quotes). "
"Should be used with --disable option.")
)
up_down_group = parser.add_mutually_exclusive_group()
up_down_group.add_argument(
'--up',
action='store_true',
help=_('Force up service. Requires ``--os-compute-api-version`` '
'2.11 or greater.'),
)
up_down_group.add_argument(
'--down',
action='store_true',
help=_('Force down service. Requires ``--os-compute-api-version`` '
'2.11 or greater.'),
)
return parser
@staticmethod
def _find_service_by_host_and_binary(cs, host, binary):
"""Utility method to find a compute service by host and binary
:param host: the name of the compute service host
:param binary: the compute service binary, e.g. nova-compute
:returns: novaclient.v2.services.Service dict-like object
:raises: CommandError if no or multiple results were found
"""
services = cs.list(host=host, binary=binary)
# Did we find anything?
if not len(services):
msg = _('Compute service for host "%(host)s" and binary '
'"%(binary)s" not found.') % {
'host': host, 'binary': binary}
raise exceptions.CommandError(msg)
# Did we find more than one result? This should not happen but let's
# be safe.
if len(services) > 1:
# TODO(mriedem): If we have an --id option for 2.53+ then we can
# say to use that option to uniquely identify the service.
msg = _('Multiple compute services found for host "%(host)s" and '
'binary "%(binary)s". Unable to proceed.') % {
'host': host, 'binary': binary}
raise exceptions.CommandError(msg)
return services[0]
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
cs = compute_client.services
if (parsed_args.enable or not parsed_args.disable) and \
parsed_args.disable_reason:
msg = _("Cannot specify option --disable-reason without "
"--disable specified.")
raise exceptions.CommandError(msg)
# Starting with microversion 2.53, there is a single
# PUT /os-services/{service_id} API for updating nova-compute
# services. If 2.53+ is used we need to find the nova-compute
# service using the --host and --service (binary) values.
requires_service_id = (
compute_client.api_version >= api_versions.APIVersion('2.53'))
service_id = None
if requires_service_id:
# TODO(mriedem): Add an --id option so users can pass the service
# id (as a uuid) directly rather than make us look it up using
# host/binary.
service_id = SetService._find_service_by_host_and_binary(
cs, parsed_args.host, parsed_args.service).id
result = 0
enabled = None
try:
if parsed_args.enable:
enabled = True
if parsed_args.disable:
enabled = False
if enabled is not None:
if enabled:
args = (service_id,) if requires_service_id else (
parsed_args.host, parsed_args.service)
cs.enable(*args)
else:
if parsed_args.disable_reason:
args = (service_id, parsed_args.disable_reason) if \
requires_service_id else (
parsed_args.host,
parsed_args.service,
parsed_args.disable_reason)
cs.disable_log_reason(*args)
else:
args = (service_id,) if requires_service_id else (
parsed_args.host, parsed_args.service)
cs.disable(*args)
except Exception:
status = "enabled" if enabled else "disabled"
LOG.error("Failed to set service status to %s", status)
result += 1
force_down = None
if parsed_args.down:
force_down = True
if parsed_args.up:
force_down = False
if force_down is not None:
if compute_client.api_version < api_versions.APIVersion(
'2.11'):
msg = _('--os-compute-api-version 2.11 or later is '
'required')
raise exceptions.CommandError(msg)
try:
args = (service_id, force_down) if requires_service_id else (
parsed_args.host, parsed_args.service, force_down)
cs.force_down(*args)
except Exception:
state = "down" if force_down else "up"
LOG.error("Failed to set service state to %s", state)
result += 1
if result > 0:
msg = _("Compute service %(service)s of host %(host)s failed to "
"set.") % {"service": parsed_args.service,
"host": parsed_args.host}
raise exceptions.CommandError(msg)
|
Located in Napa, Vino Bello Resort is minutes from Trinitas Cellars and close to Napa Symphony. This 4-star resort is within close proximity of Napa Golf Course at Kennedy Park and Napa Valley College.
Pamper yourself with a visit to the spa, which offers massages, body treatments, and facials. You’re sure to appreciate the recreational amenities, which include 2 outdoor swimming pools, a steam room, and a fitness center. Additional features at this resort include complimentary wireless Internet access, concierge services, and a picnic area. Getting to nearby attractions is a breeze with the complimentary area shuttle.
Enjoy a meal at a restaurant or in a coffee shop/café. Or stay in and take advantage of the resort’s 24-hour room service. Relax with a refreshing drink from a poolside bar or one of the 2 bars/lounges.
We booked the hotel online and it looked nice. Until we got there. They send you to check in at the Meritage, which is much nicer, but you have to figure it out by yourself. No one helps.
The front desk staff will insist you talk the the concierge and we gladly did because we wanted dinner recommendations, however... it's a trap! They are super pushy super annoying time share salespeople. The only recommendations they give you are the hotel restaurants and the "Village" in front. They don't help their guests at all, they are just interested in selling, why they call it concierge is beyond me.
After the first day we found out nobody fixed up our room, when we called they told us they don't do it unless you ask because the hotel is "Eco-friendly" (what does making a bed and fixing the room a bit have to do with being eco-friendly?), we complained that nobody told us that and the staff just answered "they should've told you" (yeah, thanks for that).
So basically they never forget to sell you timeshares or recommend the hotel restaurants, but do forget other important information you need. They also charge you a ridiculous 27 USD per day for a resort fee that doesn't make any sense.
AVOID AVOID AVOID. You can do much better than this.
We have been here for only one day and have really enjoyed our stay. The resort is really nice, grounds are pretty, peaceful and large. Our one bedroom is really nice, large, with a very large jetted tub. The pool area is very nice with a pool for each area. Large wine tasting right across the street. The staff are real nice, attentive, and really make sure you have a nice stay.
My friends and I stayed here recently at the recommendation of my sister in law. This place is a beautiful resort area and the spa is amazing! Rooms were very clean, staff extremely friendly. We ate at the Siena Restaurant for breakfast. You have the option of off the menu or the buffet, which looked beautiful. Their eggs Benedict is probably the best I’ve ever had. We also had an evening meal there as well. Which also had very good food. Each room comes with 2 bottles of water and complementary bottle of wine. I would definitely stay here again.
My wife and I have always had good experiences at Vino Bello Resort. We think the decor is nice, our rooms have always been well maintained, and the location works for us. Noting some other reviews, though, I'd like to clarify a few things, some of which I really do not understand. Check-in is shared with the Meritage, which makes for confusion about which parts of the property are "Vino Bello" and which are "Meritage." To add to the confusion, "Vista Collina" has been built across the street, and the three properties clearly share some kind of affiliation. The Vino Bello is undergoing renovations, so some reviewers are commenting on rooms that are due for renovation while others are commenting on renovated rooms. The property is located "in a vineyard. From my perspective, the property developers planted some grape vines on the property. That's not quite the same thing. Also, although there are vineyards in the area, the character of the property is influenced by the adjacent office park (definitely not an industrial park) as much as it is influenced by the grape vines. All of those comments notwithstanding, we think it is a very nice resort and will be returning at least twice this year.
Now as for the "concierge" game, don't be misled. Shell Vacations uses the ruse at many (all?) of their properties of requiring visitors to see the concierge to get a parking pass. The concierge has the primary function of getting guests to sign up for a timeshare sales pitch, even to those of us who already own significant "points." The sign-up request normally follows the offering of some incentives, such as two-for one wine tasting coupons. Only upon accepting the offer of incentives does it become apparent that they are available only if you sit through a sales pitch.
After sitting through several of the sales pitches -- and after having bought several "points," I have come to realize at least two things: The benefits we actually purchase are never what are promised, and to get what we think we bought always requires a purchase of more points. If the parent company focused as much on service as it does on sales, it would be a much better company.
This property has become a money maker for Shell Vacations Club owner who are now renting out their units to Air B & B guest making it impossible for real owners to book units for their own use. The units are made up so guest must place their towels on the racks and place kitchen items in their respective drawers if they are to cook in the unit. Everything is now placed on a shelf and guest must put out everything they use on racks and drawers. Some drawers are crammed together with kitchen items. In the past, everything was placed making everything accessible, Now it's not an outstanding property as it once was. The Tuscan decor is gone and the units look like any other hotels. I used to recommend this property but now I recommend staying in the Desert Rose property where it's more like what the Vino Bello used to be like. There is a real business and recreation room that is nowhere to be found in the Vino Bello.
|
from django.contrib.gis.db import models
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class HanaGeometryColumns(models.Model):
"""
Maps to the HANA ST_GEOMETRY_COLUMNS view.
"""
schema_name = models.CharField(max_length=256, null=False)
table_name = models.CharField(max_length=256, null=False)
column_name = models.CharField(max_length=256, null=False)
srs_id = models.IntegerField(null=False)
srs_name = models.CharField(max_length=256)
data_type_name = models.CharField(max_length=16)
class Meta:
app_label = 'gis'
db_table = 'ST_GEOMETRY_COLUMNS'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'column_name'
def __str__(self):
return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)
class HanaSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
Maps to the SAP HANA SYS.ST_SPATIAL_REFERENCE_SYSTEMS view.
"""
owner_name = models.CharField(max_length=256)
srs_id = models.IntegerField(null=False)
srs_name = models.CharField(max_length=256, null=False)
round_earth = models.CharField(max_length=7, null=False)
axis_order = models.CharField(max_length=12, null=False)
snap_to_grid = models.FloatField()
tolerance = models.FloatField()
semi_major_axis = models.FloatField()
semi_minor_axis = models.FloatField()
inv_flattening = models.FloatField()
min_x = models.FloatField()
max_x = models.FloatField()
min_y = models.FloatField()
max_y = models.FloatField()
min_z = models.FloatField()
max_z = models.FloatField()
organization = models.CharField(max_length=256)
organization_coordsys_id = models.IntegerField(null=False)
srs_type = models.CharField(max_length=11, null=False)
linear_unit_of_measure = models.CharField(max_length=256, null=False)
angular_unit_of_measure = models.CharField(max_length=256)
polygon_format = models.CharField(max_length=16, null=False)
storage_format = models.CharField(max_length=8, null=False)
definition = models.CharField(max_length=5000)
transform_definition = models.CharField(max_length=5000)
objects = models.GeoManager()
class Meta:
app_label = 'gis'
db_table = 'ST_SPATIAL_REFERENCE_SYSTEMS'
managed = False
@property
def wkt(self):
return self.definition
@classmethod
def wkt_col(cls):
return 'definition'
|
Washington(CNN) Nine high-profile Russians, including several high-profile diplomats, have died over the nine months since the US presidential election on November 8.
Among the recent deaths were six Russian diplomats. Some of the deaths appeared natural and governments have ruled out foul play.
In some cases, though, questions remain. That's either because the facts have changed over time, details are hard to come by, or the deaths are still under investigation.
Self-proclaimed online sleuths and conspiracy theorists have filled the information void with speculation that the deaths were somehow related to Russia's interference in the 2016 presidential election. No evidence has surfaced to make such a connection.
In this photo taken on Monday, Feb. 27, 2017, Denis Voronenkov visits a movie theater in Kiev, Ukraine.
Russia's ambassador to Sudan, Migayas Shirinskiy, died in the Khartoum Wednesday August 23, according to Russia's Foreign Ministry.
He died while swimming in the pool of his residence, according to state broadcaster Russia 24, after manifesting symptoms of an acute heart attack.
Embassy spokesman Sergei Konyashin said the staff called an ambulance but doctors were not able to save the ambassador. He was appointed to the post in 2013.
The Sudanese police have ruled out the possibility of an assassination attempt, Russia 24 reported, quoting local law enforcement agencies.
Denis Voronenkov, 45, was gunned down Thursday outside a hotel in Kiev. Voronenkov and his wife both spoke out against Russian President Vladimir Putin after they left Russia for Ukraine in October.
Voronenkov also helped Ukraine in its ongoing fight against Russian influence, testifying in a treason trial against ex-Ukrainian President Viktor Yanukovych, who was perceived as a puppet politician for Putin.
Ukraine's president called the shooting a "Russian state terrorist act." Russian authorities denied the accusation.
Vitaly Churkin, 64, Russia's ambassador to the United Nations, died on February 20 of an apparent heart attack. He was "in his office fulfilling his duties" when he died, according to a statement from the Russian mission at the UN.
Alexander Kadakin, 67, the Russian ambassador to India, died on January 26.
A spokeswoman for the Russian embassy in New Delhi said that Kadakin died after a short illness and that there was nothing "special or extraordinary" about the circumstances that led to his death.
Kadakin had worked in India since 2009. Indian Prime Minister Narendra Modi described him as "a great friend of India" who worked hard to strengthen relations between the two countries.
Malanin, 54, was the head of the Russian embassy's consular section in Athens. Police sources told CNN that worried colleagues called authorities after Malanin didn't show up to work for a few days. Police entered his apartment on January 9 and found him dead on his bedroom floor.
Initial reports from Greek police suggested Malanin died suddenly from natural causes. Two Greek police officials said foul play was not suspected. An investigation remains underway.
Oleg Erovinkin, who had close ties to Russian intelligence, was found dead on December 26 sitting in his car on the streets of Moscow. Russian news outlets reported that he was 61 years old. Russian government agencies have not released an official cause of death.
He was a former general in the Russian law enforcement and intelligence agency known as the FSB. He also served as chief of staff to Igor Sechin, the president of state-owned oil giant Rosneft. Sechin enjoys a close relationship with Putin that dates back to the 1990s.
Because of Erovinkin's background, conspiracy theorists and Russia watchers have speculated that he might have been a source of information in the 35-page dossier that detailed alleged links between the Trump campaign and Russia. No evidence has emerged to firmly substantiate those claims.
Graphic content / This picture taken on December 19, 2016 shows Andrey Karlov, the Russian ambassador to Ankara, lying on the floor after being shot by a gunman during an attack during a public event in Ankara.
Russia's ambassador to Turkey, Andrey Karlov, 62, was assassinated in Ankara on December 20. He was shot at point-blank range by a gunman while speaking at an art exhibition. The shooter, who was a Turkish police officer, shouted "do not forget Syria" during the assassination.
The same day as Karlov's killing, Petr Polshikov, 56, a senior Russian diplomat, was shot to death in his Moscow home, according to Moscow newspaper Moskovskij Komsomolets. The paper said Polshikov's wife found him in their bedroom with a pillow over his head. Underneath the pillow, police found Polshikov with a head wound.
A spokesman from the Russian Foreign Ministry said Polshikov's death was likely an accident and had nothing to do with his official government duties, according to Russian news outlet REN-TV.
On the morning of the US election, November 8, about an hour after the first polls opened in New York City, police received a 911 call about an unconscious man inside the Russian consulate. When they arrived, they found Sergei Krivov, 63, unresponsive. Emergency responders declared him dead at the scene.
The initial police report filed on the day of the incident said Krivov was found "with an unknown trauma to the head," according to a New York Police Department spokesman.
However, after conducting an autopsy and finishing its investigation, the New York City Medical Examiner ruled that Krivov died from bleeding in the chest area, likely due to a tumor. Police sources said foul play wasn't suspected and that Krivov had been in poor health.
In March, a private Russian lawyer on an anti-corruption crusade reportedly fell from the fourth floor of his Moscow apartment.
Nikolai Gorokhov, 53, was near death with "severe head injuries" and remains in a hospital's intensive care unit, according to his friend, investor Bill Browder.
Gorokhov represented Sergei Magnitsky, a fellow Russian lawyer who exposed Russia's largest ever tax fraud -- and was later jailed and beaten to death in a Moscow detention center. Gorokhov continued his client's fight.
CNN's Medhavi Arora, and Elinda Labropoulou, and Simon Ostrovsky contributed to this report.
|
"""
Simple Linear Regression: y = w0 + w1*x
Objective: Estimate w0 and w1, given x and y
where x = input feature
y = output
w0 = intercept
w1 = slope
"""
# Imports
import numpy as np
# Functions
def simple_linear_regression(input_feature, output):
"""
Purpose: Compute intercept and slope
Input : input_feature (x), output (y)
Output : Estimate of intercept (w0) and slope (w1)
"""
mean_input_feature = np.mean(input_feature)
mean_output = np.mean(output)
slope = np.sum(input_feature * output - input_feature * mean_output)/np.sum(input_feature * input_feature - input_feature * mean_input_feature)
intercept = mean_output - slope * mean_input_feature
return(intercept, slope)
def get_regression_predictions(input_feature, intercept, slope):
"""
Purpose: Compute predictions
Input : input_feature (x), intercept (w0), slope (w1)
Output : Predicted output based on estimated intercept, slope and input feature
"""
predicted_output = intercept + slope * input_feature
return(predicted_output)
def get_residual_sum_of_squares(input_feature, output, intercept, slope):
"""
Purpose: Compute Residual Sum of Squares (RSS)
Input : input_feature (x), output (y),
intercept (w0), slope (w1)
Output : Residual sum of squares = sum((actual output (y) - predicted output)^2)
"""
RSS = np.sum((output - (intercept + slope * input_feature)) ** 2)
return(RSS)
def inverse_regression_predictions(output, intercept, slope):
"""
Purpose: Compute Residual Sum of Squares (RSS)
Input : output (y), intercept (w0), slope (w1)
Output : Estimate of input based on intercept, slope and given output
"""
estimated_input = (output - intercept)/slope
return(estimated_input)
|
It’s a complex issue with a lot of things to consider, but we hope to explain many of the questions we’ve been hearing to keep you informed.
Why do we need a new high school? Where will we build it? We’ll do a quick study on school finances, looking at how much each option will cost and how that decision will impact taxpayers. Finally, how will this decision benefit students? Because after all, schools are for kids.
Initially built in 1960, our current building as it stands today has 15 additions and 13 elevation changes. The first addition was put on in 1962, offices were added in 1963, the pool in 1965, and then a gymnasium was added on in 1966.
When Haila Architecture did their Phase 1 study, they found that with the exception of the pool and a few other areas, the overall exterior structure is actually okay. It’s the interior that’s becoming more of a functional challenge. Some of the major concerns cited in the Phase 1 study include the circulation of students within the building, accessibility, along with security features.
|
#!/usr/bin/python
import sys
from nwb import nwb_file
from nwb import nwb_utils as ut
"""
Test extension defining a link
"""
OUTPUT_DIR = "../created_nwb_files/"
file_name = __file__[0:-3] + ".nwb"
########################################################################
# create a new NWB file
settings = {}
settings["file_name"] = OUTPUT_DIR + file_name
settings["identifier"] = ut.create_identifier("test link extension.")
settings["mode"] = "w"
settings["start_time"] = "Sat Jul 04 2015 3:14:16"
settings["description"] = ("Test making a link in the /analysis group "
"that is defined by an extension.")
# specify the extension (Could be more than one. Only one used now).
settings['extensions'] = ["extensions/e-link_test.py"]
# create the NWB file object. this manages the file
print("Creating " + settings["file_name"])
f = nwb_file.open(**settings)
########################################################################
# This example, stores spike times for multiple sweeps
# create the group for the spike times
# The group ("aibs_spike_times") is defined in the extension
ast = f.make_group("aibs_spike_times")
# some sample data
times = [1.1, 1.2, 1.3, 1.4, 1.5]
#
pto = ast.set_dataset("pixel_time_offsets", times)
# now make the link
ptl = ast.set_dataset("pto_link", pto, attrs={"hello": "Natasha"})
ptl.set_attr("Mary", "bendrich")
# all done; close the file
f.close()
|
Disseminated at a hyperbolic rate through social media’s ‘telescreens’, the ‘Ministry of Truth’ distributes ‘Newspeak’. It’s not George Orwell’s 1984. It’s now.
Today, this type of truth has no intrinsic value, but it is shaping our world. Audiences no longer trust (or bother to check) the provenance of ‘facts’.
The illusion of truth is so much easier on the mind, according to Daniel Kahneman, author of Thinking, Fast and Slow, explaining why people voted for Trump or Brexit even if the ‘lies’ were obvious.
Instead, ‘alternative facts’ has entered popular vernacular and post-truth became the Oxford Dictionaries’ international word of 2016. Ordinarily, the US and UK editorial teams have different contenders for word of the year.
But from the moment that Brexit became reality and Trump was elected President of the United States both sides of the Pond agreed on the populist power of post truth.
The term used to mean ‘after the truth was known’. This is now redundant. Post truth is an adjective defined as ‘relating to or denoting circumstances in which objective facts are less influential in shaping public opinion than appeals to emotion and personal belief’.
The quality of being felt to be true is now winning hearts and minds. Even if not necessarily true, ‘truthiness’, a term coined by American comedian Stephen Colbert in 2005, is how elections are won.
So, if post-truth is now truthiness, have we entered the realm of lies? PolitiFact, a fact checking website, seems to think so. Fake news was the winner of its Lie of the Year in 2016. Yet some publishers, such as Time Inc., are unwilling to use the word lie unless they can prove the speaker’s intent.
Fake news, however, is not a new thing. After William Jennings Bryan lost the US presidential election to William McKinley in 1896, he started a newspaper to counter an ‘epidemic of fake news’. Today, Bryan would have just tweeted instead.
In 2016, fake news became a currency. It was the protagonist of ‘click bait’ fiction on fake news sites¹ built by people to make money and manipulate electoral outcomes (even if Google and Facebook eventually tried to stem this).
Lying has mutated. By relying on the widespread use of social media networks for news consumption—62% of US adults get news on social media—not only does the ‘lie’ go viral but it reaches hyper-connected super-hubs² of like-minded believers.
When Yochai Benkler wrote The Wealth of Networks in 2006, he could not have foreseen the power of algorithm-driven homophilous-sorting. In a similar fashion to Microsoft’s Tay, the power of like has taken a sinister turn.
In The Square and the Tower: The Rise and Fall and Rise of Networks, due out later this year, Niall Ferguson suggests that the global network is perilously unstable.
Politicians on both sides of the Pond have been caught spinning obvious yarns—cost of EU membership and the NHS and the size of Trump’s inauguration crowd³—but they did not seem to care.
So, why should we? As investors, our role is to discern the key truths that will determine the future prices of assets. As far back as 18 months ago, our view was that a macro-economic regime change was coming with respect to government bond yields after years of continuous deflation.
The view expressed in advance of Brexit was that populism and populist leaders had moved from a late-night TV show joke to political reality, convincing us that rising rates were about to become the new way of life.
But, maybe there’s a deeper and more obvious truth. Writing in the May 2008 Harper’s Magazine, Kevin Phillips reported that changes to the Consumer Price Index beginning in the 1960s have increasingly distorted official statistics.
If this ‘fact’ still holds true, then the real ‘fake news’ in today’s financial markets is that the CPI calculation methods employed by Western governments has systematically under-stated inflation.
In the mid ‘90s, the consumer household baskets used to calculate inflation were adjusted to take account of the effect of the continuing decline of equivalent technology (screens of all types – pc, tablet, TV).
The economic corollary of Moore’s Law. Now, even Wired thinks that Moore’s Law is on the wane and MIT’s Technology Review has pronounced it dead. Whose household expenditure on gadgets has declined anyway? The real cost of everything from food staples to education and healthcare is rising.
Inflation. It’s not only coming, it’s already here. Right in front of us. To quote Mr. Trump (misquoting Thomas Jefferson) “don’t believe the newspapers”.
|
##########################################################################################
# Program Name : Discord Bot
# Author : DMCTruong
# Last Updated : August 31, 2017
# License : MIT
# Description : A general purpose bot written for Discord
##########################################################################################
# To do List:
# - Add a calculator
# - Add a translator: https://pypi.python.org/pypi/googletrans
# - Add a better tutorial on how to install and get the keys for the configurations
# - Add better documentation of the code
# - Add better documentation of installations
# - Redo the restart command
# - Return the user's avatar?
# - Update pyrebase commands
import discord
from discord.ext import commands
import logging
import asyncio
import configurations
from modules import database
from modules import help
from modules import miscellaneous
from modules import music
from modules import owner_only
from modules import timer
# ------------- Logging --------------
if configurations.DISCORD_LOG in ["y", "Y", "yes", "Yes", "YES"]:
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
# if configurations.COMMAND_LOG in ["y", "Y", "yes", "Yes", "YES"]:
# if configurations.ERROR_LOG in ["y", "Y", "yes", "Yes", "YES"]:
# -------- Initialize the Bot --------
bot = commands.Bot(configurations.PREFIX)
bot.add_cog(database.Database(bot))
bot.add_cog(help.Help(bot))
bot.add_cog(miscellaneous.Miscellaneous(bot))
bot.add_cog(music.Music(bot))
bot.add_cog(owner_only.Owner_Only(bot))
bot.add_cog(timer.Time(bot))
print("\nPlease wait while the Bot logs in ...")
@bot.event
async def on_ready():
print("\nLogged in as:\n")
print("Username : " + bot.user.name)
print("ID Number: " + bot.user.id)
print('\nType ">>help" in the Discord chat for the list of commands.')
print("=============================================")
await bot.change_presence(game=discord.Game(name='>>help for commands!'))
bot.run(configurations.BOT_TOKEN)
|
Need to promote your science tech or education brand? Pens, coffee mugs & T-shirts are too yesterday? We’ve custom imprinted lab glassware, magnifiers, biology tool cases, test tube racks, even Petri dishes! Choose the imprint method that works for you.
Silk screen is the most popular method we offer for custom imprinting. Single colors, typically white or black are the most common but we can do multicolor logos on a variety of magnifiers. Two colors and occasionally more can be done on beakers and flasks but there are limitations. Send us your logo so we can advise you.
Silk screen printed logos on magnifiers, beakers and tool cases. Not shown are Erlenmeyer and boiling flasks and glass prisms.
Decals, or more correctly, stickers, are a cost effective way to do full color logos. They are also best for recessed areas as seen in the test tube rack and minimagnifier shown below. Contrary to popular belief, they are as difficult to remove as print is since the high tack glue grips more the longer it stays on.
Decals (stickers) are an inexpensive way to add full color logos to a wide variety of products including test tube racks, plastic linen testers, mini-magnifiers and petri dishes. Not shown are ruler magnifiers, plastic geology loupes and card magnifiers.
Certain items such as science glassware require special handling for multiple colors and/or fine detail. The 2 large flasks shown below were done with high definition pad printing which is can be expensive and not recommended for smaller than 1L sizes. The credit card magnifiers were done with a digital press and is best done in large volume.
Full color printing is the most expensive method but offers incomparable results. Not shown are boiling flasks and a variety of other magnifiers.
Laser etching is by its nature, monochromatic but has the advantage of being able to print your brand in nooks & crannies that regular print methods cannot reach such as the hand magnifier bottom upper right.
The 25mm (1″) metal linen tester shows the result of a fiber optic laser which is capable of delivering very high resolution etching.
Laser etching allows curved surfaces like magnifying lenses to be imprinted. The very fine details in the space shuttle are high definition quality.
Science-based promotional products have cachet. Our customers include breweries, distilleries, government agencies and boutiques, to name just a few. Science products with custom branding make a bold statement that stands out from the competition. Contact us and we can help you stand out too.
Not sure if we can do it? Send us your logos and what you want to print on. We can advise you within 24 hours M-F. JPEGS and other low resolution files are fine for evaluation but we do need vector art for the work itself.
Your school or high tech business might want something a bit different from the usual pens, coffee mugs and T-shirts to promote your brand but what? Consider lab glassware, magnifiers, test tubes racks; something that says science and is useful too.
|
#!/usr/bin/env python
# encoding: utf-8
import os, time, re
from metaphone import dm as double_metaphone
from relo.core.config import conf
from relo.core.log import logger
from relo.local import util
from relo.core.interfaces import Backend
from relo.yapsy.PluginManager import PluginManager
import hashlib
from progressbar import ProgressBar, RotatingMarker, Bar, Percentage, ETA, FormatLabel
from relo.core.backend import *
##### Inverted Index Variables #####
# Words which should not be indexed
STOP_WORDS = ("the", "of", "to", "and", "a", "in", "is", "it", "you", "that")
# Do not index any words shorter than this
MIN_WORD_LENGTH = 3
# Consider these characters to be punctuation (they will be replaced with spaces prior to word extraction)
PUNCTUATION_CHARS = ".,;:!?@£$%^&*()-–<>[]{}\\|/`~'\""
# A redis key to store a list of metaphones present in this project
REDIS_KEY_METAPHONES = "id:%(project_id)s:metaphones"
# A redis key to store a list of item IDs which have the given metaphone within the given project
REDIS_KEY_METAPHONE = "id:%(project_id)s:mp:%(metaphone)s"
# A redis key to store a list of documents present in this project
REDIS_KEY_DOCUMENTS = "id:%(project_id)s:docs"
# A redis key to store meta information which are associated with the document within the given project
REDIS_KEY_DOCUMENT = "id:%(project_id)s:doc:%(document)s"
# A redis key to store a list of projects stored in the database
REDIS_KEY_PROJECTS = "projects"
class CustomIndex(object):
def __init__(self):
pass
def setUpBackend(self):
self.backendManager = PluginManager(plugin_info_ext='relo')
self.backendManager.setPluginPlaces(["relo/core/backend"])
self.backendManager.locatePlugins()
self.backendManager.loadPlugins("<class 'relo.core.interfaces.Backend'>", ['redis'])
for plugin in self.backendManager.getAllPlugins():
self.backendManager.activatePluginByName(plugin.name)
for plugin in self.backendManager.getAllPlugins():
if plugin.name == conf.readConfig('core.index'):
self.db = plugin.plugin_object
self.db.init()
def setUpProject(self, type):
self.db.addProject(REDIS_KEY_PROJECTS, self.directory, type)
def listProject(self):
for root, subFolders, files in os.walk(self.directory):
for file in files:
if file.startswith('.'):
continue
itempath = os.path.join(root, file)
if os.path.islink(itempath):
#print "link found" + itempath
continue
self.db.addSet(REDIS_KEY_DOCUMENTS % {"project_id": self.directory}, itempath)
def run(self):
pass
def __end__(self):
pass
class MetaIndex(CustomIndex):
"""
Main indexing class
"""
def __init__(self, directory, hidden=False):
self.directory = os.path.abspath(directory)
logger.head("Relo Index | meta | " + directory)
self.setUpBackend()
def run(self):
sTime = time.time()
logger.log("Preparing Index...")
max = util.countFiles(self.directory)
logger.info("Indexing %d files..." % max)
pTime = time.time()
widgets = [FormatLabel(self.directory), ' ', Percentage(), ' ', Bar('/'), ' ', RotatingMarker(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=max).start()
for root, subFolders, files in os.walk(self.directory):
for file in files:
if file.startswith('.'):
continue
itempath = os.path.join(root, file)
if os.path.islink(itempath):
#print "link found" + itempath
continue
size = os.path.getsize(itempath)
md5 = hashlib.md5()
with open(itempath, 'rb') as f:
for chunk in iter(lambda: f.read(8192), ''):
md5.update(chunk)
hash = md5.digest()
modified = time.ctime(os.path.getmtime(itempath))
type = util.getFileType(itempath)
key = REDIS_KEY_DOCUMENT % {"project_id": self.directory, "document": itempath}
self.db.addMeta(key, modified, hash, size, type)
pbar.update(pbar.currval + 1)
#print "ADD:", itempath, modified, hash, size, type
pbar.finish()
eTime = time.time()
iTime = eTime - pTime
setupTime = pTime - sTime
tTime = eTime - sTime
logger.debug("(Setup : %0.2fs) - (Index : %0.2fs) - (Total : %0.2fs)" % (setupTime, iTime, tTime))
def __end__(self):
self.db.end()
class InvertedIndex(CustomIndex):
def __init__(self, directory, hidden=False):
self.directory = os.path.abspath(directory)
logger.head("| Relo Index | content | " + directory)
self.setUpBackend()
self.punctuation_regex = re.compile(r"[%s]" % re.escape(PUNCTUATION_CHARS))
super(InvertedIndex, self).__init__()
def setUpDocType(self, extList):
self.extList = extList
self.docTypeManager = PluginManager(plugin_info_ext='relo')
self.docTypeManager.setPluginPlaces(["relo/core/doctype"])
self.numPlugins = self.docTypeManager.locatePlugins()
self.docTypeManager.loadPlugins("<class 'relo.core.interfaces.DocType'>", extList=extList)
pluginList = []
for plugin in self.docTypeManager.getAllPlugins():
self.docTypeManager.activatePluginByName(plugin.name)
pluginList.append(plugin.plugin_object.meta())
def get_words_from_text(self, text):
"""Extract a list of words to index from the given text"""
if not text:
return []
text = self.punctuation_regex.sub(" ", text)
words = text.split()
words = [word for word in text.split() if len(word) >= MIN_WORD_LENGTH and word.lower() not in STOP_WORDS]
return words
def get_metaphones(self, words):
"""Get the metaphones for a given list of words"""
metaphones = set()
for word in words:
metaphone = double_metaphone(unicode(word, errors='ignore'))
metaphones.add(metaphone[0].strip())
if(metaphone[1]):
metaphones.add(metaphone[1].strip())
return metaphones
def index_item(self, item, content):
"""Indexes a certain content"""
words = self.get_words_from_text(content)
metaphones = self.get_metaphones(words)
for metaphone in metaphones:
self._link_item_and_metaphone(item, metaphone)
def _link_item_and_metaphone(self, item, metaphone):
# Add the item to the metaphone key
redis_key = REDIS_KEY_METAPHONE % {"project_id": self.directory, "metaphone": metaphone}
self.db.addSet(redis_key, item)
# Make sure we record that this project contains this metaphone
redis_key = REDIS_KEY_METAPHONES % {"project_id": self.directory}
self.db.addSet(redis_key, metaphone)
def remove_project(self):
"""Remove the existing index for the project"""
# Remove all the existing index data
redis_key = REDIS_KEY_METAPHONES % {"project_id": self.directory}
project_metaphones = self.db.smembers(redis_key)
if project_metaphones is None:
project_metaphones = []
self.db.delete(redis_key)
for project_metaphone in project_metaphones:
self.db.redis.delete(REDIS_KEY_METAPHONE % {"project_id": self.directory, "metaphone": project_metaphone})
return True
def load(self, itempath):
for plugin in self.docTypeManager.getAllPlugins():
if plugin.name == util.getFileType(itempath).upper():
return plugin.plugin_object.load(itempath)
plugin = self.docTypeManager.getPluginByName("DEFAULT")
return plugin.plugin_object.load(itempath)
def run(self):
sTime = time.time()
logger.log("Preparing Index...")
count = util.countFiles(self.directory)
size, list = util.recursiveListFiles(self.directory, False)
extList = ['default']
for item in list:
type = util.getFileType(item)
#print repr(item) + '----' + repr(type)
if type not in extList:
extList.append(type)
del list
self.setUpDocType(extList)
del extList
logger.info("Indexing %d files..." % count)
pTime = time.time()
widgets = [FormatLabel(self.directory), ' ', Percentage(), ' ', Bar('/'), ' ', RotatingMarker(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=count).start()
for root, subFolders, files in os.walk(self.directory):
for file in files:
if file.startswith('.'):
continue
itempath = os.path.join(root, file)
if os.path.islink(itempath):
#print "link found" + itempath
continue
content = self.load(itempath)
#logger.debug(itempath + ' loaded')
self.index_item(itempath, content)
#logger.debug(itempath + ' searched')
pbar.update(pbar.currval + 1)
pbar.finish()
eTime = time.time()
iTime = eTime - pTime
setupTime = pTime - sTime
tTime = eTime - sTime
logger.debug("(Setup : %0.2fs) - (Index : %0.2fs) - (Total : %0.2fs)" % (setupTime, iTime, tTime))
def __end__(self):
self.db.end()
|
A while back I was asked to document Ndamu & Basa’s engagement .
Ndamu requested that i stay unnoticed as he didn’t want Basa to be suspicious, so I hid in a bush with my trusty long range lens and created these beautiful pictures.
Being deeply loved by someone gives you strength ,while loving someone deeply gives you courage.
"I will love you until the stars go out, and the tides no longer turn".
Its only a few days to their wedding and we absolutely cant wait to shoot it.
Thank you. so much Dakalo & Dakalo for letting us capture the beautiful pictures.
Cant wait to upload you wedding pictures.
Thank you Lehlogonolo & Phokela for letting me capture this beautiful memories of your family.We killed 2 birds with 1 stone ,a family photoshoot plus a pre wedding shoot.It was so much fun and I can’t wait to shoot your wedding this coming saturday.
|
# -*- coding: utf-8 -*-
import math
import os
import wx
from wx.lib.mixins.listctrl import getListCtrlSelection
try:
from wx.lib.pubsub.pub import Publisher
except ImportError:
from wx.lib.pubsub import pub
import matplotlib.cm as cm
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
from matplotlib.backends.backend_wx import \
_load_bitmap
from FitDialog import FitDialog
import fit
import mbox
class MyCustomToolbar(NavigationToolbar):
EXPORT_DATA = wx.NewId()
def __init__(self, plotCanvas):
# create the default toolbar
NavigationToolbar.__init__(self, plotCanvas)
# find where icons are located
path = os.path.dirname(__file__)
icon_file = os.path.join(path, 'data-export-icon.png')
self.AddSimpleTool(self.EXPORT_DATA, _load_bitmap(icon_file),
'Export data', 'Export current data to file')
wx.EVT_TOOL(self, self.EXPORT_DATA, self._on_export_data)
def _on_export_data(self, evt):
if not hasattr(self, 'dirname'):
self.dirname = os.path.expanduser('~')
dlg = wx.DirDialog(self, "Choose a directory to export data to", self.dirname, wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.dirname = dlg.GetPath()
else:
dlg.Destroy()
return 1
dlg.Destroy()
# write each axis data in separate file
for axis in self.canvas.figure.get_axes():
# axis title - the name of the file
title = axis.get_title()
l = [t.get_text() for t in self.canvas.figure.legends[0].get_texts()]
if os.sep in l[0]:
l = [t.split(os.sep) for t in l]
l = ['.'.join(t[1:3]) for t in l]
# getting data
x_max = 0
y = []
for line in axis.get_lines():
x_c = len(line.get_xdata())
if x_c > x_max:
x_max = x_c
x = line.get_xdata()
y.append(line.get_ydata())
# printing data to file
f = open(os.path.join(self.dirname, title.replace('/','_') + '.dat'), 'w')
head = [' X '] + l
hl = [len(t) for t in l]
hf = '{0[0]:7} '
for i in range(1, len(l) + 1):
hf += ' {0[%i]:%i} ' % (i, hl[i-1])
f.write(hf.format(head) + '\n')
y_max = [len(yi) for yi in y]
for xi in range(x_max):
is_y = [yi > xi for yi in y_max]
data = [x[xi]]
df = '{0[0]:^7.3f} '
for yi, is_yi in enumerate(is_y):
if is_yi:
data.append(y[yi][xi])
df += ' {0[%i]:^%i.5f} ' % (len(data) - 1, hl[yi-1])
else:
df += ' ' * (hl[yi-1] + 2)
f.write(df.format(data) + '\n')
f.close()
mbox.DataExported(self.dirname)
class PlotFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: PlotFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.CreateMplFigure()
self.PlotsCtrl = wx.ListCtrl(self.panel, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
self.ByCalcsChkBox = wx.CheckBox(self.panel, -1, 'Group by calcs')
self.ReplotBtn = wx.Button(self.panel, -1, "Replot!")
self.ShowInfoBtn = wx.Button(self.panel, -1, "Show info")
self.FitBtn = wx.Button(self.panel, -1, "Begin fit")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.ReplotBtnPress, self.ReplotBtn)
self.Bind(wx.EVT_BUTTON, self.InfoBtnPress, self.ShowInfoBtn)
self.Bind(wx.EVT_BUTTON, self.FitBtnPress, self.FitBtn)
self.Bind(wx.EVT_CHECKBOX, self.ByCalcsCheck, self.ByCalcsChkBox)
self.Bind(wx.EVT_CLOSE, self.OnClose, self)
self.Center()
# end wxGlade
self.PlotsCtrl.InsertColumn(0,'Data', width = 100)
def CreateMplFigure(self):
self.panel = wx.Panel(self)
self.dpi = 100
self.fig = Figure((8.0, 6.4), dpi=self.dpi)
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.axes = self.fig.add_subplot(111)
self.toolbar = MyCustomToolbar(self.canvas)
def __set_properties(self):
self.fitting = False
self.fit_points = []
self.SetTitle(self.title)
def __do_layout(self):
PCSizer = wx.BoxSizer(wx.VERTICAL)
PCSizer.Add(self.ByCalcsChkBox, 0, wx.ALL | wx.ALIGN_RIGHT, 5)
PCSizer.Add(self.ReplotBtn, 0, wx.ALL | wx.EXPAND, 5)
PCSizer.Add(self.PlotsCtrl, 1, wx.ALL |wx.EXPAND, 5)
PCSizer.Add(self.ShowInfoBtn, 0, wx.ALL |wx.EXPAND, 5)
PCSizer.Add(self.FitBtn, 0, wx.ALL |wx.EXPAND, 5)
PCSizer.Add((30, 30), 1)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
vbox.Add(self.toolbar, 0, wx.EXPAND)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(PCSizer, 0, wx.ALL | wx.EXPAND, 5)
hbox.Add(vbox, 1, wx.ALL | wx.EXPAND, 5)
self.panel.SetSizer(hbox)
hbox.Fit(self)
# Methods to be implemented in subclasses
def ReplotBtnPress(self, evt):
self.replot()
def InfoBtnPress(self, event):
pass
def FitBtnPress(self, event):
pass
def ByCalcsCheck(self, event):
self.initplot()
self.replot()
def OnClose(self, event):
pass
class PlotFuncFrame(PlotFrame):
title = 'Plot'
def __init__(self, *args, **kwds):
PlotFrame.__init__(self, *args, **kwds)
pub.subscribe(self.plot, 'data.plot')
def plot(self, message):
self.data = message
self.initplot()
self.replot()
def initplot(self):
self.PlotsCtrl.DeleteAllItems()
# all data are the same for different calcs
assert len(set([d.y_titles for d in self.data])) == 1
# graphs - different graphs
# leg - different lines on a graph
if self.ByCalcsChkBox.IsChecked():
graphs = [d.title for d in self.data]
self.leg = self.data[0].y_titles
else:
graphs = self.data[0].y_titles
self.leg = [d.title for d in self.data]
for i, s in enumerate(graphs):
self.PlotsCtrl.InsertStringItem(i, s)
# adjusting column width
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)
wh = self.PlotsCtrl.GetColumnWidth(0);
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE)
wc = self.PlotsCtrl.GetColumnWidth(0);
if wh > wc:
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)
self.PlotsCtrl.Select(0, 1)
def replot(self, cfd = True):
sind = getListCtrlSelection(self.PlotsCtrl)
print sind
ng = len(sind)
ncols = round(ng**0.5)
if ncols == 0.:
ncols = 1.
nrows = math.ceil(ng / ncols)
self.fig.clear()
# clear fitting data as well
if cfd:
self.fit_points = []
self.FitBtn.SetLabel("Begin fit")
self.fitting = False
for i, igraph in enumerate(sind):
title = self.PlotsCtrl.GetItemText(igraph)
axes = self.fig.add_subplot(nrows,ncols,i+1)
axes.set_title(title)
if self.ByCalcsChkBox.IsChecked():
if not hasattr(self.data[igraph],'var_x'):
x = self.data[igraph].x
else:
x = range(len(self.data[igraph].x))
axes.get_xaxis().set_ticks(x)
axes.get_xaxis().set_ticklabels(self.data[igraph].x, rotation=60, size='x-small')
for y in self.data[igraph].y:
axes.plot(x, y)
else:
for d in self.data:
if not hasattr(d,'var_x'):
x = d.x
else:
x = range(len(d.x))
axes.get_xaxis().set_ticks(x)
axes.get_xaxis().set_ticklabels(d.x, rotation=60, size='x-small')
axes.plot(x, d.y[igraph])
# get legend
lines = self.fig.axes[0].get_lines()
self.fig.legend(lines, self.leg, 1)
self.fig.tight_layout()
self.canvas.draw()
def InfoBtnPress(self, evt):
if self.info is None:
mbox.NoInfo()
return 1
mbox.ShowPlotInfo(self.calcs, self.info)
def FitBtnPress(self, evt):
sind = getListCtrlSelection(self.PlotsCtrl)
if len(sind) > 1:
print 'There should be one axis!'
return
sind = sind[0]
if not self.fitting:
# begin fit; show dialog
dlg = FitDialog(self, sets = self.leg)
if not dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
return
# get data from dialog
opts, iset = dlg.GetFitOptions()
dlg.Destroy()
# some quirks to begin fitting
self.FitBtn.SetLabel("Finish fit")
self.fitting = True
self.canvas.Bind(wx.EVT_LEFT_DCLICK, self.OnCanvasClick)
# get fit set according to the state of GBC checkbox
if self.ByCalcsChkBox.IsChecked():
fit_set = (self.data[sind][self.x], self.data[sind][self.PlotsCtrl.GetItemText(iset)])
else:
fit_set = (self.data[iset][self.x], self.data[iset][self.PlotsCtrl.GetItemText(sind)])
self.fit = fit.Fit(opts, fit_set)
else:
# try to end fit
if not self.fit.is_enough(len(self.fit_points)):
return
self.canvas.Unbind(wx.EVT_LEFT_DCLICK)
# fitting itself
p, x, y = self.fit.fit(self.fit_points)
self.replot()
ax = self.fig.gca()
ax.plot(x, y, '--x')
self.canvas.draw()
self.AddFitInfo(self.fit.FitInfo())
def OnCanvasClick(self, evt):
if self.fit.is_enough(len(self.fit_points)):
self.canvas.Unbind(wx.EVT_LEFT_DCLICK)
return
ax = self.fig.gca()
p = ax.transData.inverted().transform(evt.GetPositionTuple())
ax.axvline(x = p[0], c = 'r')
self.fit_points.append(p[0])
print 'Selected x = %f' % (p[0])
self.canvas.draw()
def AddFitInfo(self, info):
'Adds fitting info to self.info'
print info
def OnClose(self, evt):
pub.unsubscribe(self.plot, 'data.plot')
self.Destroy()
# end of class PlotFrame
class PlotCorrFrame(PlotFrame):
title = 'Correlations'
def __init__(self, *args, **kwds):
PlotFrame.__init__(self, *args, **kwds)
pub.subscribe(self.plot, 'corr.plot')
def plot(self, message):
self.calcs = message[0]
self.data = message[1]
# a number of tuples (x, y1, ... yn)
# self.names = self.data[0][1].dtype.names
self.names = message[2]
self.initplot()
self.replot()
def initplot(self):
self.PlotsCtrl.DeleteAllItems()
if self.ByCalcsChkBox.IsChecked():
data = self.calcs
self.leg = self.names
else:
data = self.names
self.leg = self.calcs
for i, s in enumerate(data):
self.PlotsCtrl.InsertStringItem(i, s)
# adjusting column width
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)
wh = self.PlotsCtrl.GetColumnWidth(0);
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE)
wc = self.PlotsCtrl.GetColumnWidth(0);
if wh > wc:
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)
self.PlotsCtrl.Select(0, 1)
def replot(self):
sind = getListCtrlSelection(self.PlotsCtrl)
ng = len(sind)
ncols = round(ng**0.5)
if ncols == 0.:
ncols = 1.
nrows = math.ceil(ng / ncols)
self.fig.clear()
for i, igraph in enumerate(sind):
color = iter(cm.get_cmap('prism')([x/24. for x in range(24)]))
title = self.PlotsCtrl.GetItemText(igraph)
axes = self.fig.add_subplot(nrows,ncols,i+1)
axes.set_title(title)
sdata = []
if self.ByCalcsChkBox.IsChecked():
for ins in range(len(self.names)):
sdata.append(axes.scatter(self.data[igraph][ins][0], self.data[igraph][ins][1], c = next(color)))
else:
for ds in self.data:
sdata.append(axes.scatter(ds[igraph][0], ds[igraph][1], c = next(color)))
# get legend
self.fig.legend(sdata, self.leg, scatterpoints = 1)
self.canvas.draw()
def OnClose(self, evt):
Publisher().unsubscribe(self.plot,('corr.plot'))
self.Destroy()
|
The 'Global and Chinese Building Automation Industry, 2013-2023 Market Research Report' is a professional and in-depth study on the current state of the global Building Automation industry with a focus on the Chinese market. The report provides key statistics on the market status of the Building Automation manufacturers and is a valuable source of guidance and direction for companies and individuals interested in the industry.Firstly, the report provides a basic overview of the industry including its definition, applications and manufacturing technology. Then, the report explores the international and Chinese major industry players in detail. In this part, the report presents the company profile, product specifications, capacity, production value, and 2013-2018 market shares for each company. Through the statistical analysis, the report depicts the global and Chinese total market of Building Automation industry including capacity, production, production value, cost/profit, supply/demand and Chinese import/export. The total market is further divided by company, by country, and by application/type for the competitive landscape analysis. The report then estimates 2018-2023 market development trends of Building Automation industry. Analysis of upstream raw materials, downstream demand, and current market dynamics is also carried out. In the end, the report makes some important proposals for a new project of Building Automation Industry before evaluating its feasibility. Overall, the report provides an in-depth insight of 2013-2023 global and Chinese Building Automation industry covering all important parameters.
|
from wickedjukebox import Base
from sqlalchemy.types import (
Integer,
Unicode,
Date,
String,
Boolean,
Float,
DateTime,
)
from sqlalchemy.orm import (
relationship,
)
from sqlalchemy import (
UniqueConstraint,
Column,
ForeignKey,
Index,
func,
Table,
PrimaryKeyConstraint
)
ALBUM_TYPE_ALBUM="album"
channel_album_data = Table("channel_album_data", Base.metadata,
Column('channel_id', Integer, ForeignKey("channel.id"), nullable=False),
Column('album_id', Integer, ForeignKey("album.id"), nullable=False),
Column('played', Integer, nullable=False, default=0),
PrimaryKeyConstraint('channel_id', 'album_id')
)
channel_song_data = Table("channel_song_data", Base.metadata,
Column('channel_id', Integer, ForeignKey('channel.id'), nullable=False),
Column('song_id', Integer, ForeignKey('song.id'), nullable=False),
Column('played', Integer, nullable=False, default=0),
Column('voted', Integer, nullable=False, default=0),
Column('skipped', Integer, nullable=False, default=0),
Column('lastPlayed', DateTime, default=None),
Column('cost', Integer, default=5),
PrimaryKeyConstraint('channel_id', 'song_id')
)
song_has_genre = Table("song_has_genre", Base.metadata,
Column('song_id', Integer, ForeignKey('song.id'), nullable=False),
Column('genre_id', Integer, ForeignKey('genre.id'), nullable=False),
PrimaryKeyConstraint('song_id', 'genre_id')
)
song_has_tag = Table("song_has_tag", Base.metadata,
Column('song_id', Integer, nullable=False),
Column('tag', String(32), ForeignKey('tag.label'), nullable=False),
PrimaryKeyConstraint('song_id', 'tag')
)
user_album_stats = Table("user_album_stats", Base.metadata,
Column('user_id', Integer, ForeignKey('users.id'), nullable=False),
Column('album_id', Integer, ForeignKey('album.id'), nullable=False),
Column('when', DateTime, nullable=False),
PrimaryKeyConstraint('user_id', 'album_id')
)
user_song_standing = Table("user_song_standing", Base.metadata,
Column('user_id', Integer, ForeignKey('users.id'), nullable=False),
Column('song_id', Integer, ForeignKey('song.id'), nullable=False),
Column('standing', String(12), nullable=False),
PrimaryKeyConstraint('user_id', 'song_id')
)
user_song_stats = Table("user_song_stats", Base.metadata,
Column('user_id', Integer, ForeignKey('users.id'), nullable=False),
Column('song_id', Integer, ForeignKey('song.id'), nullable=False),
Column('when', DateTime, nullable=False),
PrimaryKeyConstraint('user_id', 'song_id', 'when')
)
class Album(Base):
__tablename__ = "album"
__table_args__ = (
UniqueConstraint('path'),
Index('album_name_idx', 'name'),
Index('album_type_idx', 'type'),
)
id = Column(Integer, nullable=False, primary_key=True)
artist_id = Column(Integer, ForeignKey('artist.id'), nullable=False)
name = Column(Unicode(128), default=None)
release_date = Column(Date, default=None)
added = Column(DateTime, default=None)
downloaded = Column(Integer, nullable=False, default=0)
type = Column(String(32), nullable=False, default=ALBUM_TYPE_ALBUM)
path = Column(Unicode(255), nullable=False)
class Artist(Base):
__tablename__ = "artist"
__table_args__ = (
UniqueConstraint('name'),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode(128), default=None)
added = Column(DateTime, nullable=False)
class Channel(Base):
__tablename__ = "channel"
__table_args__ = (
UniqueConstraint('name'),
)
id = Column(Integer, nullable=False, primary_key=True)
name = Column(Unicode(32), nullable=False)
public = Column(Boolean, default=True)
backend = Column(Unicode(64), nullable=False)
backend_params = Column(Unicode, nullable=False, default=u'')
ping = Column(DateTime, default=None)
active = Column(Boolean, default=False)
status = Column(Integer, default=None)
class DynamicPlaylist(Base):
__tablename__ = "dynamicPlaylist"
id = Column(Integer, nullable=False, primary_key=True)
channel_id = Column(Integer, default=None)
group_id = Column(Integer, nullable=False)
probability = Column(Float, nullable=False) # COMMENT 'Probability at which a song is picked from the playlisy (0.0-1.0)',
label = Column(Unicode(64), default=None)
query = Column(Unicode)
class Event(Base):
__tablename__ = "events"
__table_args__ = (
Index('events_date_idx', 'startdate', 'enddate'),
)
id = Column(Integer, nullable=False, primary_key=True)
title = Column(Unicode, nullable=False)
startdate = Column(DateTime, nullable=False)
enddate = Column(DateTime, nullable=False)
lat = Column(Float, default=None)
lon = Column(Float, default=None)
class Genre(Base):
__tablename__ = "genre"
__table_args__ = (
UniqueConstraint('name'),
)
id = Column(Integer, nullable=False, primary_key=True)
name = Column(Unicode(128), default=None)
added = Column(DateTime, nullable=False)
class Group(Base):
__tablename__ = "groups"
id = Column(Integer, nullable=False, primary_key=True)
title = Column(Unicode(32), nullable=False)
admin = Column(Boolean, nullable=False, default=False)
nocredits = Column(Integer, nullable=False, default=0)
queue_skip = Column(Integer, nullable=False, default=0)
queue_remove = Column(Integer, nullable=False, default=0)
queue_add = Column(Integer, nullable=False, default=0)
class Lastfm_queue(Base):
__tablename__ = "lastfm_queue"
queue_id = Column(Integer, nullable=False, primary_key=True)
song_id = Column(Integer, ForeignKey('song.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
time_played = Column(DateTime, nullable=False)
time_started = Column(DateTime, nullable=False)
class Log(Base):
__tablename__ = "log"
priority = Column(Unicode(32), nullable=False)
message = Column(Unicode, nullable=False)
date = Column(DateTime, nullable=False, default=func.now(), primary_key=True)
class Queue(Base):
__tablename__ = "queue"
__table_args__ = (
Index('queue_song_idx', 'song_id'),
Index('queue_user_idx', 'user_id'),
Index('queue_channel_idx', 'channel_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
song_id = Column(Integer, ForeignKey('song.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
user_id = Column(Integer, ForeignKey('users.id', onupdate="CASCADE", ondelete="CASCADE"), default=None)
channel_id = Column(Integer, ForeignKey('channel.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
position = Column(Integer, default=0)
added = Column(DateTime, nullable=False)
class RenderPreset(Base):
__tablename__ = "render_presets"
__table_args__ = (
Index('render_presets_idx1', 'category', 'preset'),
)
id = Column(Integer, primary_key=True, nullable=False)
category = Column(Unicode(64), nullable=False)
preset = Column(Unicode(64), nullable=False)
hmax = Column(Integer, nullable=False)
wmax = Column(Integer, nullable=False)
placeholder = Column(Unicode(64), default=None)
noproportion = Column(Boolean, nullable=False, default=False)
force_mime = Column(String(16), nullable=False)
class Setting(Base):
__tablename__ = "setting"
__table_args__ = (
PrimaryKeyConstraint('var', 'channel_id', 'user_id'),
)
var = Column(Unicode(32), nullable=False)
value = Column(Unicode)
channel_id = Column(Integer, ForeignKey( 'channel.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False, default=0)
user_id = Column(Integer, ForeignKey( 'users.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False, default=0)
class SettingText(Base):
__tablename__ = "setting_text"
var = Column(Unicode(32), ForeignKey('setting.var'), nullable=False, primary_key=True)
text_en = Column(Unicode, nullable=False)
class Shoutbox(Base):
__tablename__ = "shoutbox"
__table_args__ = (
Index('shoutbox_added_idx', 'added'),
Index('shoutbox_user_idx', 'user_id'),
)
id = Column(Integer, nullable=False, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
message = Column(Unicode(255), nullable=False)
added = Column(DateTime, nullable=False)
class Song(Base):
__tablename__ = "song"
__table_args__ = (
UniqueConstraint('localpath'),
)
id = Column(Integer, nullable=False, primary_key=True)
artist_id = Column(Integer, ForeignKey('artist.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
album_id = Column(Integer, ForeignKey('album.id', onupdate="CASCADE", ondelete="CASCADE"), default=None)
track_no = Column(Integer, default=None)
title = Column(Unicode(128), default=None)
duration = Column(Float, default=None)
year = Column(Integer, default=None)
localpath = Column(Unicode(255), nullable=False)
downloaded = Column(Integer, default=0)
lastScanned = Column(DateTime, default=None)
bitrate = Column(Integer, default=None)
filesize = Column(Integer, default=None)
checksum = Column(String(14), default=None)
lyrics = Column(Unicode)
broken = Column(Boolean, default=0)
dirty = Column(Boolean, default=0)
added = Column(DateTime, nullable=False)
genres = relationship('genre', secondary=song_has_genre)
tags = relationship('tag', secondary=song_has_tag)
class State(Base):
__tablename__ = "state"
channel_id = Column(Integer, nullable=False, primary_key=True)
state = Column(String(64), primary_key=True, nullable=False)
value = Column(String(255), default=None)
class Tag(Base):
__tablename__ = "tag"
label = Column(Unicode(32), primary_key=True, nullable=False)
inserted = Column(DateTime, nullable=False, default=func.now())
modified = Column(DateTime, nullable=False, default='0000-00-00 00:00:00')
class Users(Base):
__tablename__ = "users"
__table_args__ = (
UniqueConstraint('username'),
UniqueConstraint('cookie'),
)
id = Column(Integer, nullable=False, primary_key=True)
username = Column(Unicode(32), nullable=False)
cookie = Column(Unicode(32), nullable=False)
password = Column(Unicode(32), nullable=False)
fullname = Column(Unicode(64), nullable=False)
email = Column(Unicode(128), nullable=False)
credits = Column(Integer, nullable=False)
group_id = Column(Integer, ForeignKey('groups.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
downloads = Column(Integer, nullable=False, default=0)
votes = Column(Integer, nullable=False, default=0)
skips = Column(Integer, nullable=False, default=0)
selects = Column(Integer, nullable=False, default=0)
added = Column(DateTime, nullable=False)
proof_of_life = Column(DateTime, nullable=False)
proof_of_listening = Column(DateTime, default=None)
ip = Column(Unicode(32), nullable=False)
picture = Column(Unicode(255), nullable=False)
lifetime = Column(Integer, nullable=False)
channel_id = Column(Integer, nullable=False, default=1)
|
This is a confidential service provided by hundreds of volunteers whose main concern is the safety of our roads.
Help us to promote this unique anti-drinking and driving program here in Caledon!
Please use the donate form below to make your donation, Visa, Mastercard, American Express and Apple Pay.
|
# -*- coding: iso-8859-1 -*-
import urllib, os, sys
import channel
#if channel.in_xbmc:
#icon = xbmc.translatePath(os.path.join(__home__, 'resources/rtl-tvi.png'))
channels = {'rtltvi': {'name': 'RTL-TVI', 'icon': 'rtl-tvi.png', 'module': 'rtl'},
'clubrtl': {'name': 'Club RTL', 'icon': 'club-rtl.png', 'module': 'rtl'},
'plugrtl': {'name': 'Plug RTL', 'icon': 'plug-rtl.png', 'module': 'rtl'},
'rtbf': {'name': 'RTBF', 'icon': 'rtbf-all.png'},
'tvcom': {'name': 'TV Com', 'icon': 'tvcom.jpg'},
'vtm': {'name': 'VTM', 'icon': 'vtm.jpg'},
'een': {'name': 'EEn', 'icon': 'een.png'},
}
def show_channels():
for channel_id, ch in channels.iteritems():
if channel.in_xbmc:
icon = xbmc.translatePath(os.path.join(channel.home, 'resources/' + ch['icon']))
channel.addDir(ch['name'], icon, channel_id=channel_id, action='show_categories')
else:
print ch['name'], channel_id, 'show_categories'
def get_params():
param = {}
if len(sys.argv) < 3:
return {}
paramstring = sys.argv[2]
if len(paramstring) >= 2:
params = sys.argv[2]
cleanedparams = params.replace('?', '')
if (params[len(params) - 1] == '/'):
params = params[0:len(params) - 2]
print cleanedparams
pairsofparams = cleanedparams.split('&')
print pairsofparams
param = {}
for i in range(len(pairsofparams)):
splitparams = {}
splitparams = pairsofparams[i].split('=')
if (len(splitparams)) == 2:
try:
param[splitparams[0]] = urllib.unquote_plus(splitparams[1])
except:
pass
return param
print "==============================="
print " Video Belgium"
print "==============================="
print
params = get_params()
channel_id = params.get('channel_id')
print 'channel_id:', channel_id
if params.get('action', False) is False:
show_channels()
elif channel_id:
context = channels[channel_id]
context.update(params)
import sys
channel_module_name = context.get('module', channel_id)
__import__(channel_module_name)
sys.modules[channel_module_name].Channel(context)
if channel.in_xbmc:
channel.xbmcplugin.endOfDirectory(int(sys.argv[1]))
|
In a chain of attention-grabbing supervisory case reports Jackson attracts on Kleinian techniques and Scandinavian medical event to teach how a psychotherapeutic technique can, via a mixture of empathy and sound concept, stabilise, include, combine and tame the unnamable terrors and compulsions of psychosis. Jackson's braveness, modesty and overall integrity shine via. Jeremy Holmes MD, collage of Exeter.
This can be a easy advent to the numerous mental treatments in use this day, together with cognitive-behavioural, humanistic and psychodynamic techniques. content material: ebook conceal; name; Contents; Illustrations; Preface; Acknowledgements; advent; class of psychological problems; versions of psychological disease; kinds of healing ways; bankruptcy precis; Somatic treatments; Electro-convulsive surprise remedy (ECT); Psychosurgery; different somatic techniques; bankruptcy precis; Psychodynamic remedies; Freudian psychoanalysis; smooth psychodynamic methods; Applicability and review; Behavioural treatments; Behaviour remedies; Behaviour amendment concepts; Applicability and overview; bankruptcy precis; Cognitive and cognitive-behavioural treatments.
Eye circulate Integration remedy is the 1st publication to aspect probably the most leading edge and potent new remedies on hand to psychotherapists this present day. full of case examples and expert by means of broad adventure instructing the method, the ebook is obtainable to proficient lay folks, in addition to to all readers with past education in psychology.
The possible influence of parents with obsessional tendencies on the early developmental processes must not be forgotten (Ramzy, 1966). Ada's intense pathogenic conflicts can be considered to be rooted in the early relation with her mother. Their intensity implies that she had originally succeeded i n forming a strong but insecure attachment to her mother, and that the birth of her brother aroused powerful feelings of rejection and of murderous jealousy. These unconscious and conflict-laden aggressive feelings were responsible for her attacks of anxiety at that time.
If she angrily discarded the gifts as representing her bad mother, she would at the same time lose her good mother (as in the case of Ada's recipes, Chapter 1). To preserve her good feelings about her mother, she had to decon taminate her "dirty" mind, symbolically represented by the "con taminated" clothing. She could not accept her mother's gifts, because this would involve recognizing her good mother and at once destroy ing her. This unconscious dilemma implies a failure to differentiate "good" from "bad", and thus love from hate, sufficiently to recognize and tolerate the fact that these "part" mothers are the same person.
Some may be an inherent part of the (oedipal) developmental process, of being forced to share her with father and siblings; others, the consequence of actual traumatic ADOLESCENT PSYCHOSIS: "BRENDA" 37 experience and relative emotional deprivation in an overcrowded and dysfunctional family. A t the time of the last discussion of her case, it w a s clear that m u c h remained to be learned about the details and significance of the prob able early sexual trauma and of the separation process. The positive transference w a s strong, but it remained to be seen whether it w o u l d prove possible, or necessary, to reach and work through the negative feelings towards the "bad" mother i n the therapeutic transference to a sufficient extent to allow a successful termination of the therapy and the likelihood of her maintaining her very considerable improvement.
|
#!/usr/bin/env python
import re
from setuptools import setup, find_packages
import sys
import warnings
if sys.version_info[:3] < (3, 5, 0):
warnings.warn("gmusicapi does not officially support versions below "
"Python 3.5.0", RuntimeWarning)
# This hack is from http://stackoverflow.com/a/7071358/1231454;
# the version is kept in a seperate file and gets parsed - this
# way, setup.py doesn't have to import the package.
VERSIONFILE = 'gmusicapi/_version.py'
version_line = open(VERSIONFILE).read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
match = re.search(version_re, version_line, re.M)
if match:
version = match.group(1)
else:
raise RuntimeError("Could not find version in '%s'" % VERSIONFILE)
setup(
name='gmusicapi',
version=version,
author='Simon Weber',
author_email='simon@simonmweber.com',
url='http://pypi.python.org/pypi/gmusicapi/',
packages=find_packages(),
scripts=[],
license=open('LICENSE').read(),
description='An unofficial api for Google Play Music.',
long_description=(open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read()),
install_requires=[
'validictory >= 0.8.0, != 0.9.2', # error messages
'decorator >= 3.3.1', # > 3.0 likely work, but not on pypi
'mutagen >= 1.34', # EasyID3 TPE2 mapping to albumartist
('requests >= 1.1.0, != 1.2.0,' # session.close, memory view TypeError
'!= 2.2.1, != 2.8.0, != 2.8.1,'
'!= 2.12.0, != 2.12.1, != 2.12.2,' # idna regression broke streaming urls
'!= 2.18.2'), # SSLError became ConnectionError
'python-dateutil >= 1.3, != 2.0', # 2.0 is python3-only
'proboscis >= 1.2.5.1', # runs_after
'protobuf >= 3.0.0',
'oauth2client >= 1.1', # TokenRevokeError
'appdirs >= 1.1.0', # user_log_dir
'gpsoauth >= 0.2.0', # mac -> android_id, validation, pycryptodome
'MechanicalSoup >= 0.4.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Software Development :: Libraries :: Python Modules',
],
include_package_data=True,
zip_safe=False,
)
|
The Work Variance field contains the difference between baseline work of a task, resource, or assignment and the currently scheduled work.
There are several categories of Work Variance fields.
Best Uses Add the Work Variance field to a task view when you want to analyze whether your work estimates for tasks are on track with your original plans.
Example You set a baseline for your project three months ago, and you now want to review the work variances for tasks. You add the Work Variance field to the Task Sheet view to analyze the task's progress and performance to date.
Remarks If the Work Variance field contains a negative number, less work is scheduled for the task than originally planned, as shown in the baseline. If the Work Variance field contains a positive number, more work is scheduled for the task than originally planned. If the Work Variance field contains zero, the scheduled amount of work is exactly as was planned.
You can set a baseline in the Set Baseline dialog box. This copies the current value for the Work field into the corresponding Baseline Work field.
The Work Variance field contains "0 hrs" until the work value varies from the baseline work value, as shown in the Baseline Work field.
Best Uses Add the Work Variance field to a resource sheet when you want to analyze whether your work estimates for resources are on track with your original plans.
Example You set a baseline for your project three months ago, and you now want to review the work variances for resources. You add the Work Variance field to the Resource Sheet view to analyze the resource's progress and performance to date.
Remarks If the Work Variance field contains a negative number, less work is scheduled for the resource than originally planned, as shown in the baseline. If the Work Variance field contains a positive number, more work is scheduled for the resource than originally planned. If the Work Variance field contains zero, the scheduled amount of work is exactly as was planned.
Best Uses Add the Work Variance field to the Task Usage or Resource Usage view when you want to analyze whether your work estimates for assignments are on track with your original plans.
Example You set a baseline for your project three months ago, and you now want to review your work variances for assignments. You add the Work Variance field to the Task Usage view to analyze the assignment's progress and performance to date.
Remarks If the Work Variance field contains a negative number, less work is scheduled for the assignment than originally planned, as shown in the baseline. If the Work Variance field contains a positive number, more work is scheduled for the assignment than originally planned. If the Work Variance field contains zero, the scheduled amount of work is exactly as was planned.
The Work Variance field contains "0 hrs" until the scheduled work varies from the baseline work value, as shown in the Baseline Work field.
|
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for the Quickstart."""
__author__ = 'alainv@google.com (Alain Vongsouvanh)'
from urlparse import urlparse
import httplib2
from apiclient.discovery import build
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import AccessTokenRefreshError
import sessions
from model import Credentials
# Load the secret that is used for client side sessions
# Create one of these for yourself with, for example:
# python -c "import os; print os.urandom(64)" > session.secret
SESSION_SECRET = open('session.secret').read()
def get_full_url(request_handler, path):
"""Return the full url from the provided request handler and path."""
pr = urlparse(request_handler.request.url)
return '%s://%s%s' % (pr.scheme, pr.netloc, path)
def load_session_credentials(request_handler):
"""Load credentials from the current session."""
session = sessions.LilCookies(request_handler, SESSION_SECRET)
userid = session.get_secure_cookie(name='userid')
if userid:
return userid, StorageByKeyName(Credentials, userid, 'credentials').get()
else:
return None, None
def store_userid(request_handler, userid):
"""Store current user's ID in session."""
session = sessions.LilCookies(request_handler, SESSION_SECRET)
session.set_secure_cookie(name='userid', value=userid)
def create_service(service, version, creds=None):
"""Create a Google API service.
Load an API service from a discovery document and authorize it with the
provided credentials.
Args:
service: Service name (e.g 'mirror', 'oauth2').
version: Service version (e.g 'v1').
creds: Credentials used to authorize service.
Returns:
Authorized Google API service.
"""
# Instantiate an Http instance
http = httplib2.Http()
if creds:
# Authorize the Http instance with the passed credentials
creds.authorize(http)
return build(service, version, http=http)
def auth_required(handler_method):
"""A decorator to require that the user has authorized the Glassware."""
def check_auth(self, *args):
self.userid, self.credentials = load_session_credentials(self)
self.mirror_service = create_service('mirror', 'v1', self.credentials)
# TODO: Also check that credentials are still valid.
if self.credentials:
try:
self.credentials.refresh(httplib2.Http())
return handler_method(self, *args)
except AccessTokenRefreshError:
# Access has been revoked.
store_userid(self, '')
credentials_entity = Credentials.get_by_key_name(self.userid)
if credentials_entity:
credentials_entity.delete()
self.redirect('/auth')
return check_auth
|
3. 9 million American men in prime working age can’t find jobs. I’m one of them.
5. Policies for Families: Is there a Best Practice?
The last policy brief highlights recent research findings of the project “FamiliesAndSocieties” on the current trends in social policies related to families in Europe. It focuses on crucial policy issues related to youth, gender equality and childcare arrangements. The brief also presents suggestions for policy interventions linked to the findings.
program in OK is a thing of beauty"
8. How does your relationship hold up on this 1930s 'Marital Rating Scale'?
Thanks Alysse. Hope all have a merry Christmas.
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
r'''
Plot a function.
'''
# Changes:
# ** version 1.0 2014-05-07 Hulei **
# 1. first version
import os
import sys
import argparse
import matplotlib.pyplot as plt
from math import *
def parse_cmd():
parser = argparse.ArgumentParser()
parser.add_argument("--start", '-s', default = 0.0, type = float, help = "start of the range")
parser.add_argument("--end", '-e', default = 10.0, type = float, help = "end of the range")
parser.add_argument("--quality", "-q", default = 200, type = int, help = "quality of the range")
parser.add_argument("--xlabel", "-x", default = "x", help = "set label of x")
parser.add_argument("--ylabel", "-y", default = "y", help = "set label of y")
parser.add_argument("--title", "-t", default = None, help = "set title")
parser.add_argument("--style", "-Y", default = "-", help = "set style, can be .,o,^")
parser.add_argument("expression", nargs = "+", help = "a python expression, like: 1 * x**2 + 2 * x + 3")
return parser.parse_args(sys.argv[1:])
def plot_expression(expression, xlist, style):
xx = []
ylist = []
exp = eval("lambda x: %s" % expression)
for x in xlist:
try:
ylist.append(exp(x))
xx.append(x)
except Exception:
pass
plt.plot(xx, ylist, style, label = expression)
def main():
ns = parse_cmd()
xlist = []
step = (ns.end - ns.start) / ns.quality
val = ns.start
for i in range(ns.quality):
xlist.append(val)
val += step
index = 0
for expression in ns.expression:
color = ("r", "b", "g", "c", "y", "k")[index % 6]
plot_expression(expression, xlist, color + ns.style)
index += 1
plt.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.)
plt.grid(True)
plt.xlabel(ns.xlabel)
plt.ylabel(ns.ylabel)
plt.show()
if __name__ == "__main__":
try:
main()
except Exception as e:
import traceback
traceback.print_exc()
print(type(e).__name__, ":", str(e))
sys.exit(1)
|
The College has priority use of the facilities for educational and traditional college performances by the College's arts programs within the VAPA Division (Visual Applied and Performing Arts).
Requests for rentals are accepted at any time on a first-come, first-served basis. However, a completed Event Request Form must be submitted online NO LATER THAN 60 DAYS PRIOR TO THE REQUESTED EVENT. Confirmed bookings, opened to the public for community and non-VAPA College users, will only be finalized on or after May 1, annually, for the following academic year.
User categories, rates, policies and request forms are located below, along with a copy of the Box Office Service Agreement form, College Co-Sponsorship Agreement, and a Category 3 One-Time Daily Use Fee Waiver form.
For more information please contact the Performing Arts Complex Coordinator, Poco Marshall, at (831) 479–6146 or by e-mail.
Please contact the Performing Arts Complex Coordinator, at (831) 479–6146 or by e-mail,to inquire about available dates, before submitting an event request form.
If you desire the use of classroom space(s) to support the needs of your event in a Performance Venue, please complete a separate "Room Request", at least 60 days prior to your event date.
|
# Copyright (C) 2020 Lunatixz
#
#
# This file is part of PseudoTV Live.
#
# PseudoTV Live is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV Live is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV Live. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
from resources.lib.globals import *
from resources.lib.builder import Builder
class Plugin:
def __init__(self, sysARG=sys.argv):
log('__init__, sysARG = ' + str(sysARG))
self.sysARG = sysARG
self.CONTENT_TYPE = 'episodes'
self.CACHE_ENABLED = False
self.myBuilder = Builder()
self.myBuilder.plugin = self
self.myPlayer = MY_PLAYER
self.playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
self.maxDays = getSettingInt('Max_Days')
self.seekTol = getSettingInt('Seek_Tolerance')
self.usePlaylist = bool(getSettingInt('Playback_Method'))
def buildMenu(self, name=None):
log('buildMenu, name = %s'%name)
MAIN_MENU = [(LANGUAGE(30008), '', '')]#,#todo
# (LANGUAGE(30009), '', '')]
UTIL_MENU = [#(LANGUAGE(30010), '', '', LANGUAGE(30008)),
(LANGUAGE(30011), '', '', LANGUAGE(30008)),
(LANGUAGE(30096), '', '', LANGUAGE(30096)),
(LANGUAGE(30012)%(getPluginMeta(PVR_CLIENT).get('name',''),ADDON_NAME,), '', '', LANGUAGE(30008)),
(LANGUAGE(30065)%(getPluginMeta(PVR_CLIENT).get('name','')), '', '', LANGUAGE(30008)),
(LANGUAGE(30081), '', '', LANGUAGE(30008)),
(LANGUAGE(30013), '', '', LANGUAGE(30008))]
CHAN_MENU = [(LANGUAGE(30014), '', '', LANGUAGE(30009)),
(LANGUAGE(30015), '', '', LANGUAGE(30009))]
if name is None: items = MAIN_MENU
elif name == LANGUAGE(30008): items = UTIL_MENU
elif name == LANGUAGE(30009): items = CHAN_MENU
else: return
[self.addDir(*item) for item in items]
def deleteFiles(self, channels=True):
log('utilities, deleteFiles')
msg = 30096 if channels else 30011
if yesnoDialog('%s ?'%(LANGUAGE(msg))):
if channels:
self.myBuilder.channels.delete()
else:
[func() for func in [self.myBuilder.m3u.delete,self.myBuilder.xmltv.delete]]
return
def utilities(self, name):
log('utilities, name = %s'%name)
if name == LANGUAGE(30010): self.myBuilder.buildService(reloadPVR=True)
elif name == LANGUAGE(30011): self.deleteFiles()
elif name == LANGUAGE(30096): self.deleteFiles(channels=True)
elif name == LANGUAGE(30012)%(getPluginMeta(PVR_CLIENT).get('name',''),ADDON_NAME,): configurePVR()
elif name == LANGUAGE(30065)%(getPluginMeta(PVR_CLIENT).get('name','')): brutePVR()
elif name == LANGUAGE(30013): REAL_SETTINGS.openSettings()
elif name == LANGUAGE(30081): textviewer(getProperty('USER_LOG'),usemono=True)
else: return
xbmc.executebuiltin('Action(Back,10025)')
def channels(self, name):
log('channels, name = %s'%name)
if name == LANGUAGE(30014): self.buildChannels()
elif name == LANGUAGE(30015): return #todo prompt user, self.myBuilder.playlist.clearChannelList()
else: return
xbmc.executebuiltin('Action(back)')
def buildChannels(self):
log('buildChannels')
channelList = self.myBuilder.createChannelItems()
items = [(item['name'], item['number'], item['path'], '', item['logo']) for item in channelList]
for item in items: self.addDir(*item)
def contextPlay(self, writer, isPlaylist=False):
stpos = 0
writer = loadJSON(writer.replace(' / "',' , "').replace(" / ",", "))# current item
if not writer:
return notificationDialog(LANGUAGE(30001))
log('contextPlay, writer = %s, isPlaylist = %s'%(dumpJSON(writer),isPlaylist))
self.playlist.clear()
xbmc.sleep(100)
if not isPlaylist:
liz = buildItemListItem(writer)
listitems = [liz]
else:
channelData = writer.get('data',{})
if not channelData:
return notificationDialog(LANGUAGE(30001))
pvritem = self.myBuilder.jsonRPC.getPVRposition(channelData.get('name',''), channelData.get('id',''), isPlaylist=isPlaylist)
nowitem = pvritem.get('broadcastnow',{})
nextitems = pvritem.get('broadcastnext',[])[slice(0, PAGE_LIMIT)] # list of upcoming items, truncate for speed.
nextitems.insert(0,nowitem)
for pos, nextitem in enumerate(nextitems):
if loadJSON(nextitem.get('writer',{})).get('file','') == writer.get('file',''):
stpos = pos
break
log('contextPlay, writer stpos = %s'%(stpos))
listitems = ([buildItemListItem(loadJSON(nextitem.get('writer',''))) for nextitem in nextitems])
[self.playlist.add(lz.getPath(),lz,idx) for idx,lz in enumerate(listitems)]
if isPlaylistRandom(): self.playlist.unshuffle()
return self.myPlayer.play(self.playlist, startpos=stpos)
def playRadio(self, name, id):
log('playRadio, id = %s'%(id))
pvritem = self.myBuilder.jsonRPC.getPVRposition(name, id, radio=True)
nowitem = pvritem.get('broadcastnow',{}) # current item
writer = loadJSON(nowitem.get('writer',{}))
if not writer:
notificationDialog(LANGUAGE(30001))
return xbmcplugin.setResolvedUrl(int(self.sysARG[1]), False, xbmcgui.ListItem())
json_response = self.myBuilder.jsonRPC.requestList(id, writer.get('data',{}).get('path',''), 'music', page=250)
if json_response:
setCurrentChannelItem(pvritem)
self.playlist.clear()
xbmc.sleep(100)
listitems = [buildItemListItem(item, mType='music') for item in json_response]
[self.playlist.add(lz.getPath(),lz,idx) for idx,lz in enumerate(listitems)]
if isPlaylistRandom(): self.playlist.unshuffle()
log('playRadio, Playlist size = %s'%(self.playlist.size()))
return self.myPlayer.play(self.playlist)
def playChannel(self, name, id, radio=False, isPlaylist=False, failed=False):
log('playChannel, id = %s, isPlaylist = %s'%(id,isPlaylist))
found = False
liz = xbmcgui.ListItem()
listitems = [liz] #empty listitem required to pass failed playback.
pvritem = self.myBuilder.jsonRPC.getPVRposition(name, id, isPlaylist=isPlaylist)
nowitem = pvritem.get('broadcastnow',{}) # current item
nextitems = pvritem.get('broadcastnext',[])[slice(0, PAGE_LIMIT)] # list of upcoming items, truncate for speed.
ruleslist = []#check pre-play channel rules.
if nowitem:
found = True
setCurrentChannelItem(pvritem)
progress = nowitem['progress']
runtime = nowitem['runtime']
liz = buildItemListItem(loadJSON(nowitem.get('writer',{})))
if (progress > self.seekTol):
# near end, avoid loopback; override last listitem and queue next show.
if (progress > ((runtime * 60) - 45)): #45sec endtime offset
log('playChannel, progress = %s near end, queue nextitem'%(progress))
liz = buildItemListItem(loadJSON(nextitems[0].get('writer',{})))
else:
log('playChannel, progress = %s within seek tolerance setting seek.'%(progress))
liz.setProperty('totaltime' , str((runtime * 60)))
liz.setProperty('resumetime' , str(progress))
liz.setProperty('startoffset', str(progress))
# remove bct pre-roll from stack://
url = liz.getPath()
info = liz.getVideoInfoTag()
writer = loadJSON(info.getWritingCredits())
file = writer.get('originalfile','')
if url.startswith('stack://') and not url.startswith('stack://%s'%(file)):
log('playChannel, playing stack with url = %s'%(url))
paths = url.split(' , ')
for path in paths:
if file not in path:
paths.remove(path)
elif file in path:
break
liz.setPath('stack://%s'%(' , '.join(paths)))
listitems = [liz]
if isPlaylist:
self.playlist.clear()
xbmc.sleep(100)
listitems.extend([buildItemListItem(loadJSON(nextitem.get('writer',''))) for nextitem in nextitems])
[self.playlist.add(lz.getPath(),lz,idx) for idx,lz in enumerate(listitems)]
if isPlaylistRandom(): self.playlist.unshuffle()
log('playChannel, Playlist size = %s'%(self.playlist.size()))
return self.myPlayer.play(self.playlist)
# else:
# listitems.extend([buildItemListItem(loadJSON(nextitem.get('writer',''))) for nextitem in nextitems])
# paths = [lz.getPath() for lz in listitems]
# liz.setPath('stack://%s'%(' , '.join(paths)))
# listitems = [liz]
#todo found == False set fallback to nextitem? with playlist and failed == True?
xbmcplugin.setResolvedUrl(int(self.sysARG[1]), found, listitems[0])
def addLink(self, name, channel, path, mode='',icon=ICON, liz=None, total=0):
if liz is None:
liz=xbmcgui.ListItem(name)
liz.setInfo(type="Video", infoLabels={"mediatype":"video","label":name,"title":name})
liz.setArt({'thumb':icon,'logo':icon,'icon':icon})
log('addLink, name = %s'%(name))
u=self.sysARG[0]+"?url="+urllib.parse.quote(path)+"&channel="+str(channel)+"&name="+urllib.parse.quote(name)+"&mode="+str(mode)
xbmcplugin.addDirectoryItem(handle=int(self.sysARG[1]),url=u,listitem=liz,totalItems=total)
def addDir(self, name, channel, path, mode='',icon=ICON, liz=None):
log('addDir, name = %s'%(name))
if liz is None:
liz=xbmcgui.ListItem(name)
liz.setInfo(type="Video", infoLabels={"mediatype":"video","label":name,"title":name})
liz.setArt({'thumb':icon,'logo':icon,'icon':icon})
liz.setProperty('IsPlayable', 'false')
u=self.sysARG[0]+"?url="+urllib.parse.quote(path)+"&channel="+str(channel)+"&name="+urllib.parse.quote(name)+"&mode="+str(mode)
xbmcplugin.addDirectoryItem(handle=int(self.sysARG[1]),url=u,listitem=liz,isFolder=True)
def getParams(self):
return dict(urllib.parse.parse_qsl(self.sysARG[2][1:]))
def run(self):
params=self.getParams()
name = (urllib.parse.unquote(params.get("name",'')) or None)
channel = (params.get("channel",'') or None)
url = (params.get("url",'') or None)
id = (params.get("id",'') or None)
radio = (params.get("radio",'') or 'False')
mode = (params.get("mode",'') or None)
log("Name: %s" %(name))
log("Channel: %s"%(channel))
log("URL: %s" %(url))
log("ID: %s" %(id))
log("Radio: %s" %(radio))
log("Mode: %s" %(mode))
if channel is None:
if mode is None: self.buildMenu(name)
elif mode == 'play':
if radio == 'True':
self.playRadio(name, id)
else:
self.playChannel(name, id, isPlaylist=self.usePlaylist)
elif mode == 'Utilities': self.utilities(name)
elif mode == 'Channels': self.channels(name)
xbmcplugin.setContent(int(self.sysARG[1]) , self.CONTENT_TYPE)
xbmcplugin.addSortMethod(int(self.sysARG[1]) , xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(self.sysARG[1]) , xbmcplugin.SORT_METHOD_NONE)
xbmcplugin.addSortMethod(int(self.sysARG[1]) , xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(int(self.sysARG[1]) , xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.endOfDirectory(int(self.sysARG[1]), cacheToDisc=self.CACHE_ENABLED)
if __name__ == '__main__': Plugin(sys.argv).run()
|
Depending on your individual situation, I offer the complementing modules consulting, training and implementation as a holistic approach. These modules base on my experience by long term cooperations with well reputated consulting companies and with small and medium enterprises as well as with international groups and holdings.
Consulting This phase starts with listening!
If I understand your problem, I can develop a solution concept for you.
Even I have proved tools and methods: you have an individual problem, so you should expect not less than an individual and customized solution.
Training The objective is defined and the approach is aligned and agreed!
Now all affected parties have to be informed, trained and coached. In the application of new processes, in preparation of methodic and organizational changes, up to supporting by the management. Here also the implementation during the daily business is essential.
Visions without actions are dreams!
Now it is time to implement the agreed and trained concepts into real life.
I support you on the way to your success of the measures. This includes elimination of road blocks and motivation of all affected individuals as well as the return of invest of these measures.
You need support for solving a defined problem?
For the realization of special tasks you need temporary external capacity?
Than please contact me for individual support. There will be only applied the appropriate elements that deliver an added value according to your objective.
The corresponding approach is everytime adapted to the cultural conditions of the projected site. Because for me it is essential to successfully realize your project in the local language, I am focused on the german, english and spanish speaking areas.
|
# This file does regression test using Random Forest Regression.
# The input file should be insert_total_jobs.csv that contains total jobs info.
import matplotlib.pyplot as plt
import math
import sys
import pandas as pd
import numpy as np
class SlidingWindow:
def __init__(self, job_instances_file, attribute = "DesktopEndDateMinute", train_window_size = 20, test_window_size = 1):
self.df = pd.read_csv(job_instances_file, header=0)
self.attribute = attribute
if attribute != "DesktopEndDateMinute":
print "ERROR: attribute is not DesktopEndDateMinute"
return -1
self.value_counts = self.df[attribute].value_counts()
self.index_list = sorted(self.value_counts.index)
self.train_window_size = train_window_size
self.test_window_size = test_window_size
self.cur_train_attr_start = None
self.cur_train_attr_end = None
self.cur_test_attr_start = None
self.cur_test_attr_end = None
self.cur_train_index_start = None
self.cur_train_index_end = None
self.cur_test_index_start = None
self.cur_test_index_end = None
self.cur_train_line_start = None
self.cur_train_line_end = None
self.cur_test_line_start = None
self.cur_test_line_end = None
self.next_window_index = None
self.next_window_line_start = None
self.df_train = None
self.df_test = None
# self.machine_learning_engine = osgparse.mlengine.MachineLearningEngine()
# self.regression_engine = osg
def slide_depreciated(self):
if self.cur_train_attr_start == None and self.cur_train_attr_end == None and self.cur_test_attr_start == None and self.cur_test_attr_end == None:
self.cur_train_index_start = 0
self.cur_train_attr_start = self.index_list[0]
self.cur_train_line_start = 0
self.next_window_line_start = 0
else:
# find right attrribute positions to avoid reaching the end of data
self.cur_train_index_start += self.test_window_size
self.cur_train_attr_start = self.index_list[self.cur_train_index_start]
self.cur_train_line_start = self.next_window_line_start
self.cur_train_index_end = self.cur_train_index_start + self.train_window_size - 1
self.cur_test_index_start = self.cur_train_index_end + 1
self.cur_test_index_end = self.cur_test_index_start + self.test_window_size - 1
if self.cur_test_index_end >= len(self.index_list):
print "Reach the end of DataFrame!"
return "EOF"
self.cur_train_attr_end = self.index_list[self.cur_train_index_end]
self.cur_test_attr_start = self.index_list[self.cur_test_index_start]
self.cur_test_attr_end = self.index_list[self.cur_test_index_end]
accumulate_line = 0
self.next_window_index = self.cur_train_index_start + self.test_window_size
for idx in range(self.cur_train_index_start, self.cur_train_index_end + 1):
if idx == self.next_window_index:
self.next_window_line_start += accumulate_line
accumulate_line += self.value_counts[self.index_list[idx]]
self.cur_train_line_end = self.cur_train_line_start + accumulate_line - 1
self.cur_test_line_start = self.cur_train_line_end + 1
accumulate_line = 0
for idx in range(self.cur_test_index_start, self.cur_test_index_end + 1):
accumulate_line += self.value_counts[self.index_list[idx]]
self.cur_test_line_end = self.cur_test_line_start + accumulate_line - 1
self.df_train = self.df[self.cur_train_line_start:self.cur_train_line_end+1]
self.df_test = self.df[self.cur_test_line_start:self.cur_test_line_end+1]
return (self.df_train, self.df_test)
def slide(self):
if self.cur_train_attr_start == None and self.cur_train_attr_end == None and self.cur_test_attr_start == None and self.cur_test_attr_end == None:
self.cur_train_index_start = 0
self.cur_train_attr_start = self.index_list[0]
self.cur_train_line_start = 0
self.next_window_line_start = 0
else:
# find right attrribute positions to avoid reaching the end of data
self.cur_train_index_start += self.test_window_size
self.cur_train_attr_start = self.index_list[self.cur_train_index_start]
self.cur_train_line_start = self.next_window_line_start
self.cur_train_index_end = self.cur_train_index_start + self.train_window_size - 1
self.cur_test_index_start = self.cur_train_index_end + 1
self.cur_test_index_end = self.cur_test_index_start + self.test_window_size - 1
if self.cur_test_index_end >= len(self.index_list):
print "Reach the end of DataFrame!"
return "EOF"
self.cur_train_attr_end = self.index_list[self.cur_train_index_end]
self.cur_test_attr_start = self.index_list[self.cur_test_index_start]
self.cur_test_attr_end = self.index_list[self.cur_test_index_end]
accumulate_line = 0
self.next_window_index = self.cur_train_index_start + self.test_window_size
for idx in range(self.cur_train_index_start, self.cur_train_index_end + 1):
if idx == self.next_window_index:
self.next_window_line_start += accumulate_line
accumulate_line += self.value_counts[self.index_list[idx]]
self.cur_train_line_end = self.cur_train_line_start + accumulate_line - 1
self.cur_test_line_start = self.cur_train_line_end + 1
self.df_train = self.df[self.cur_train_line_start:self.cur_train_line_end+1]
# print self.df[self.cur_test_line_start:]
if self.attribute != "DesktopEndDateMinute":
print "ERROR: attribute is no damn DesktopEndDateMinute!"
return -1
# self.df_test = self.df[self.cur_test_line_start:][(self.df["DesktopStartDateMinute"] <= self.cur_test_attr_start) & (self.df["DesktopEndDateMinute"] > self.cur_test_attr_start)]
self.df_test = self.df.loc[self.cur_test_line_start:].query('DesktopStartDateMinute <= @self.cur_test_attr_start and DesktopEndDateMinute > @self.cur_test_attr_start')
# print "cur_train_attr_start = ", self.cur_train_attr_start, "cur_train_attr_end = ", self.cur_train_attr_end
# print "cur_test_attr_start = ", self.cur_test_attr_start, "cur_test_attr_end = ", self.cur_test_attr_end
# print "df_train = ", self.df_train
# print "df_test = ", self.df_test
# print "cur_test_attr_start = ", self.cur_test_attr_start
# print self.df_test[['DesktopStartDateMinute','DesktopEndDateMinute']]
return (self.df_train, self.df_test, self.cur_test_attr_start)
def get_values(self, attribute):
return self.df[attribute].value_counts().index
|
ISLAMABAD: The Lahore-Multan motorway will be open for public in April next year, announced Federal Minister for Communications Hafiz Abdul Karim on Thursday.
The minister remarked that the progress of the Lahore- Multan motorway project was satisfactory and will be inaugurated in first week of April, which is 4 month earlier than its completion time of August 2018.
Meanwhile, the secretary said that the travelling distance from Lahore to Multan was five hours which will be reduced to three hours after the motorway will be completed.
The secretary also told fast paced work is also in progress at the Lahore-Sialkot motorway and this would also be completed by August 2018, and for the benefit of the public the eastern bypass from Kalasha Kaku will also be opened well before the completion of motorway.
The secretary said that presently total 13 motorway projects are underway out of which, three already have been completed, whereas 9 are in different phases of construction. The Karachi, Hazara and Hyderabad motorways have been completed and others are under construction which would be open for general public in 2018.
During the visit NHA Chairman Shahid Ashraf Tarar also briefed the minister about the completion of Khanewal-Multan motorway section. He said that Gojra, Shorkot and Shorkot- Khaniwal section are under-construction.
Other senior officials of the ministry of communication also accompanied the minister during his visit, including Secretary for Communications Mohammad Sadeeq Memon.
|
# coding=utf-8
import os
import secrets
import shutil
import tempfile
from nose.tools import assert_equal, assert_true
from unittest import TestCase
from ultros.core.storage.data.toml import TOMLData
from ultros.core.storage.manager import StorageManager
__author__ = "Gareth Coles"
class TestTOML(TestCase):
def setUp(self):
self.directory = os.path.join(tempfile.gettempdir(), secrets.token_urlsafe(10))
if not os.path.exists(self.directory):
os.mkdir(self.directory)
self.config_dir = os.path.join(self.directory, "config")
self.data_dir = os.path.join(self.directory, "data")
if not os.path.exists(self.config_dir):
os.mkdir(self.config_dir)
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
current_dir = os.path.dirname(__file__)
tests_dir = os.path.join(current_dir, "../../")
shutil.copy(os.path.join(tests_dir, "files/test.toml"), os.path.join(self.data_dir, "test.toml"))
self.manager = StorageManager(
ultros=None,
config_location=self.config_dir,
data_location=self.data_dir
)
def tearDown(self):
self.manager.shutdown()
del self.manager
if os.path.exists(self.directory):
shutil.rmtree(self.directory)
def test_dict_functionality(self):
"""
TOML data testing: Dict functionality
"""
def _data_object() -> TOMLData:
return self.manager.get_data(
"test.toml", None
)
data_obj = _data_object()
assert_equal(
len(data_obj),
6
)
assert_equal(
data_obj.copy(),
data_obj.data
)
assert_equal(
data_obj.get("test"),
"test"
)
assert_equal(
list(data_obj.items()),
[
("test", "test"),
("herp", "derp"),
("int", 1),
("float", 1.1),
("boolean", True),
("other_boolean", False)
]
)
assert_equal(
list(data_obj.keys()),
["test", "herp", "int", "float", "boolean", "other_boolean"]
)
assert_equal(
list(data_obj.values()),
["test", "derp", 1, 1.1, True, False]
)
assert_true(
"test" in data_obj
)
assert_equal(
data_obj["test"],
"test"
)
assert_equal(
list(data_obj),
["test", "herp", "int", "float", "boolean", "other_boolean"]
)
assert_equal(
len(data_obj),
6
)
def test_read(self):
"""
TOML data testing: Reading
"""
def _data_object() -> TOMLData:
return self.manager.get_data(
"test.toml", None
)
data_obj = _data_object()
assert_equal(
data_obj["test"],
"test"
)
assert_equal(
data_obj["herp"],
"derp"
)
assert_equal(
data_obj["int"],
1
)
assert_equal(
data_obj["float"],
1.1
)
assert_equal(
data_obj["boolean"],
True
)
assert_equal(
data_obj["other_boolean"],
False
)
|
This is an exclusive business profile of S B Sanitary And Supply Agency located in , Puri. From this Page, you can directly contact S B Sanitary And Supply Agency from the enquiry form provided on the right. On the left you can get the Verified Mobile Number of S B Sanitary And Supply Agency – feel free to call us to know more about our products & services. We will soon update our Catalog which you can download to get latest information about all our products & services and latest deals & offers by S B Sanitary And Supply Agency. Do check out our Job Openings section to know about all the vacancies in S B Sanitary And Supply Agency. You can also check out our Photo Gallery section to see latest photos of S B Sanitary And Supply Agency. Before leaving, do not forget to give us your review and rating if you have been a customer of S B Sanitary And Supply Agency in the past.
Do you want to receive special deals and offers from S B Sanitary And Supply Agency?
Daily Download limit reached! Do you want to alert S B Sanitary And Supply Agency to receive Catalogue?
|
# Query Jupyter server for the info about a dataframe
import json as _VSCODE_json
import pandas as _VSCODE_pd
import pandas.io.json as _VSCODE_pd_json
import builtins as _VSCODE_builtins
import vscodeDataFrameHelpers as _VSCODE_dataFrameHelpers
# Function to do our work. It will return the object
def _VSCODE_getDataFrameInfo(df):
df = _VSCODE_dataFrameHelpers._VSCODE_convertToDataFrame(df)
rowCount = _VSCODE_dataFrameHelpers._VSCODE_getRowCount(df)
# If any rows, use pandas json to convert a single row to json. Extract
# the column names and types from the json so we match what we'll fetch when
# we ask for all of the rows
if rowCount:
try:
row = df.iloc[0:1]
json_row = _VSCODE_pd_json.to_json(None, row, date_format="iso")
columnNames = list(_VSCODE_json.loads(json_row))
except:
columnNames = list(df)
else:
columnNames = list(df)
# Compute the index column. It may have been renamed
indexColumn = df.index.name if df.index.name else "index"
columnTypes = _VSCODE_builtins.list(df.dtypes)
# Make sure the index column exists
if indexColumn not in columnNames:
columnNames.insert(0, indexColumn)
columnTypes.insert(0, "int64")
# Then loop and generate our output json
columns = []
for n in _VSCODE_builtins.range(0, _VSCODE_builtins.len(columnNames)):
column_type = columnTypes[n]
column_name = str(columnNames[n])
colobj = {}
colobj["key"] = column_name
colobj["name"] = column_name
colobj["type"] = str(column_type)
columns.append(colobj)
# Save this in our target
target = {}
target["columns"] = columns
target["indexColumn"] = indexColumn
target["rowCount"] = rowCount
# return our json object as a string
return _VSCODE_json.dumps(target)
|
d. Agreement in Nairobi reaffirming WTO Members’ commitment to conclude the DDA in line with its development mandate.
(xv) Examination of all types of NTBs in developed country markets impacting on developing country exports.
3. We call upon Members to affirm their commitment to the DDA and its mandates, in particular on core areas of importance to developing countries.
4. We emphasise that future negotiations on agriculture and NAMA tariff cuts must be commensurate with meaningful cuts in domestic support and that flexibilities shall be accorded to developing countries, especially LDCs and SVEs.
5. We acknowledge that agriculture is of critical importance to the economies of the majority of the ACP Group of States and therefore reaffirm that special and differential treatment for developing countries shall be an integral element of the agricultural negotiations, taking into account the possible negative effects of non-implementation of commitments by developed countries on ACP States.
6. On domestic support, we reiterate the importance we attach to achieving meaningful cuts in Overall Trade Distorting Support (OTDS), Aggregate Measurement of Support (AMS), and disciplines to prevent box shifting. The AMS and de minimis flexibilities for developing countries, in particular LDCs, SVEs, and NFIDCs in accordance with Rev.4, should be preserved and the integrity of Article 6.2 of the Agreement on Agriculture shall be maintained.
7. On export competition, as per Rev.4, we reiterate our support for the progressive and parallel elimination of all forms of export subsidies and export measures with equivalent effect, including in the area of food aid, while taking into account problems faced by developing countries, and urge that the flexibilities currently envisaged in existing texts be maintained including those for LDCs and SVEs.
8. On Public Stockholding for Food Security Purposes, we call on Members to work expeditiously towards finding an appropriate permanent solution.
9. We reaffirm the need to preserve in the NAMA negotiations, at minimum, the core flexibilities contained in Rev.3 that fully take into account ACP States development priorities and that LDCs shall be exempted from taking any tariff reductions. With respect to the current NAMA tariff reduction formula; we note that, many WTO Members, including ACP States, have stated that the Swiss formula is not do-able.
10. We also reiterate the need for any agreement to include treatment of preference erosion.
11. In continued negotiations, we urge that tariff reduction for those developing Members that are part of a customs union of developing countries which may include LDCs and SVEs shall be no more than the average tariff reduction of all the other Members of the customs union and shall in no case result in final tariffs that are below the Common External Tariff. The tariff reduction commitments shall also be moderated to avoid widening divergences in tariff bindings amongst the Members of such customs unions.
12. With respect to non-tariff barriers, WTO Members should refrain from imposing measures on trade from ACP States, that amount to discriminatory or unnecessary barriers to trade. Technical assistance should be provided to support developing country initiatives aimed at ensuring ACP participation in relevant standard setting processes and to address other non-tariff barriers to trade.
13. The ACP Group of States identified non-exhaustive sectors and modes of supply of interest to our Members in JOB/TNC/46.
14. We recall that Members may decide how to proceed on their own offers taking into account their particular development objectives and offers from other Members. At the same time we reiterate key principles and flexibilities embedded in the General Agreement on Trade in Services, the Negotiating Guidelines and Procedures, the Hong Kong Ministerial Declaration and its Annex C.
15. In domestic regulations negotiations, we recall that developing countries have the flexibility in their own discretion with regard to taking on any new commitments, consistent with their right to regulate and development policy objectives. To further the development component of the DDA, developed Members should take on commitments in qualification requirements and procedures to facilitate developing country services market access. We also call upon Members to reaffirm that LDCs shall not be required to take on any new commitments.
16. The ACP Group continues to support TRIPS negotiations on the basis of proposals contained in TN/C/W/52.
17. We urge Members to take note that LDCs share in world merchandise and services trade is miniscule and they suffer staggering current account deficits. Therefore, we emphasise the need for value-added and binding decisions based on proposals from the LDC Group, to be taken at the Tenth WTO Ministerial Conference (MC10) in Nairobi as a matter of priority toward the real and greater integration of the LDCs into the multilateral trading system.
18. WTO mandates in areas of interest to the ACP Group of States, should be reinvigorated, in particular the development component across all on-going work programmes and deliberations in the relevant WTO bodies.
19. We reaffirm Paragraph 35 of the Doha Declaration and paragraph 41 of the Hong Kong Ministerial Declaration. We urge the WTO Membership to continue to address, in a substantive and meaningful manner, the particular structural disadvantages and inherent vulnerabilities of small, vulnerable economies and call for due regard to be given to the priorities of SVEs in all areas of the negotiations to ensure their further integration into the multilateral trading system. We also reiterate that the WTO must deliver flexibilities for SVEs as part of any development outcome.
20. We welcome the recent accession to the WTO of ACP State, the Republic of Seychelles, and applaud the completion of the accession negotiations for ACP State, Liberia.
21. Acceding ACP States should make concessions commensurate with their size and development needs and current WTO rules and standards. We urge the WTO and developed countries to provide technical assistance and resources to support acceding ACP States in the process of accession negotiations. In addition, we urge the implementation of the General Council's Decision of 25th July 2012 on Accession of LDCs aimed at strengthening, streamlining and operationalising the 2002 LDCs Accession Guidelines, pursuant to the 2011 MC8 Decision.
22. The ACP Group of States welcomes the launch of the Enhanced Integrated Framework (EIF) phase 2 on 1st July 2015, as well as the EIF Pledging Conference to be held during the Tenth WTO Ministerial Conference (MC10) in Nairobi. In this regard, we urge donors to come forward with substantial pledges in Nairobi.
23. We appreciate the effort of some donors to sustain their aid efforts. We urge donors to continue to support the efforts of our Members, to integrate into the world trading system, by directing aid for trade flows to areas of the highest priority as identified by the beneficiaries, including infrastructure, productive capacity, trade finance, connectivity, and costs of adjustment. This assistance should be in the form of new funding, without diverting from existing bilateral assistance in other areas, on a sustainable basis.
24. We are seriously concerned about the potential for erosion of the gains made as a result of graduation of some of our Members. Therefore, we call for the abandonment of per capita income statistics as the only measure to determine eligibility for Aid for Trade for WTO Members and recommend the use of different criteria.
25. We support the extension of the decision for MC1o to maintain the current practice of not imposing customs duties on electronic transmissions, and to continue the electronic commerce work programme with special attention to the situation in developing countries, particularly in least-developed country Members and least connected countries. In this regard, we also urge the recognition of the principles of non-discrimination, predictability, and transparency.
26. We also call on WTO Members to agree in Nairobi to make permanent the moratorium on the application of Subparagraphs 1(b) and 1(c) of Article XXIII of GATT 1994 on non-violation and situation complaints (NVCs) to the TRIPS Agreement.
27. We underline the importance of the Tenth WTO Ministerial Conference (MC10) to be held in Nairobi and look forward, therefore, to effectively and constructively participate in the deliberations of the conference. In addition, we call on all Members to ensure that any proposed Ministerial declaration for Nairobi is developed in a transparent, inclusive and consensus based process.
28. We call upon Members to reaffirm in Nairobi, the Ministerial declarations and General Council Decisions relevant to the Doha mandates; and to take concrete steps to conclude the remaining issues in the DDA, with development as a key component.
29. We further call upon Members to ensure that post-Nairobi, all unresolved issues in the DDA on the development mandate are addressed and yield specific development milestones to conclude the DDA as soon as possible.
30. We urge the Tenth WTO Ministerial Conference (MC10) in Nairobi to give due consideration to the present Declaration.
With a binding coverage of non-agricultural tariff lines of less than 35 percent.
Comprised of the ACP, African and LDC Groups.
|
import sys
import time
import datetime
import argparse
import pr0gramm
import data_collection
import logging_setup
import logging
LOG = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description="Collect data from pr0gramm.com")
parser.add_argument(
"--no_sfw", help="disable SFW content", action="store_true")
parser.add_argument(
"--nsfw", help="enable NSFW content", action="store_true")
parser.add_argument(
"--nsfl", help="enable NSFL content", action="store_true")
parser.add_argument(
"--no_images", help="disable images", action="store_true")
parser.add_argument(
"--allow_videos", help="enable video content", action="store_true")
parser.add_argument(
"--last_id", "-id", help="the last promoted id use as anchor point", type=int, default=None)
parser.add_argument("--age_threshold", "-age",
help="a submission must be the given amount of hours old to be downloaded (Default: 5)",
type=int, default=5)
parser.add_argument("--min_num_of_tags", "-min",
help="a submission must have the given amount of tags to be downloaded (Default: 5)",
type=int, default=5)
parser.add_argument(
"--search_backwards", help="search for submission older than last_id, instead of newer", action="store_true")
parser.add_argument("--media_directory", "-o",
help="the download directory for media content (images, videos)", type=str, default="/tmp")
parser.add_argument("--annotation_file", "-ann",
help="the annotation file that should be created/edited for the downloaded media content",
type=str, default="/tmp/annotation.txt")
parser.add_argument("--json_directory", "-jd",
help="the download directory for the retrieved json content", type=str, default="/tmp")
parser.add_argument("--data_source", "-ds",
help="the type of source that should be used when downloading media (0=IMAGE, 1=THUMBNAIL, 2=FULL_SIZE), Default: 0",
type=int, choices=range(3), default=0)
parser.add_argument("--no_download", "-nd",
help="disables downloading the media content for submissions", action="store_true")
parser.add_argument(
"--save_json", "-sj", help="enables saving the retrieved json content locally", action="store_true")
parser.add_argument("--use_local_storage", "-l",
help="enables using previously locally stored json contents instead of retrieving remotely",
action="store_true")
parser.add_argument(
"--waiting_time", "-t", help="set the waiting time for lookups in hours (Default: 5)", type=int, default=5)
parser.add_argument("logging_json_config", "-lc",
help="the logging json dictionary used to initialize the logging framework (Default: ../etc/logging.json)",
type=str,
default="../etc/logging.json")
parser.add_argument("logging_file", "-lf",
help="specify a log file, per default the log file is chosen based on the logging_json_config",
type=str,
default=None)
logging_setup.setup_logging(args.logging_json_config, log_file=args.logging_file)
args = parser.parse_args()
run_collection_process(args)
def run_collection_process(args):
collector = initialize_collector(args)
waiting_time_in_seconds = args.waiting_time * 60 * 60
while(True):
LOG.info("Start collecting from ID: {}.".format(collector.getLastId()))
collector.collectDataBatch()
LOG.info("Collected {0} item(s). Last ID: {1}".format(
collector.getSizeOfLastBatch(),
collector.getLastId()))
LOG.info("Going to sleep for {0} hours until {1}.".format(
args.waiting_time,
datetime.datetime.now() + datetime.timedelta(hours=args.waiting_time)))
if collector.getSizeOfLastBatch() <= 0:
# TODO: give some status updates while waiting
time.sleep(waiting_time_in_seconds)
def initialize_collector(args):
api = initialize_api(args)
collector = data_collection.DataCollector(api)
collector.setLastId(args.last_id)
collector.setAgeThreshold(hours=args.age_threshold)
collector.setMinimumNumberOfTags(args.min_num_of_tags)
if args.search_backwards:
collector.useBackwardsSearch()
collector.setMediaDirectory(args.media_directory)
collector.setAnnotationFile(args.annotation_file)
collector.setJsonDir(args.json_directory)
collector.setDataSource(args.data_source)
collector.setDownloadMedia(not args.no_download)
collector.setSaveJSON(args.save_json)
collector.setUseLocalStorage(args.use_local_storage)
return collector
def initialize_api(args):
api = pr0gramm.API()
if args.no_sfw:
api.disableSFW()
if args.nsfw:
api.enableNSFW()
if args.nsfl:
api.enableNSFL()
if args.no_images:
api.disableImages()
if args.allow_videos:
api.enableVideos()
return api
if __name__ == "__main__":
main()
|
X-ray devices produce radiations used for diagnostic and interventional purposes. Introduction of portable and digital x-ray devices in order to meet the rising demand for home healthcare and point of care diagnostics market is the most significant trend observed in this market. Computed tomography systems (CT) or computerized axial tomography scanning (CAT scan) is an imaging procedure which utilizes X-rays to obtain tomographic images of targeted areas. CT scan devices play an integral role in cancer diagnosis and in guiding biopsy procedures. Increasing adoption of PACS (Picture Archiving and Communication System) and the introduction of high resolution CT scan equipment will serve the portable x-ray and CT scan devices market as two of the highest impact rendering drivers. PACS is a medical imaging technology which involves the electronic storage and transmission of diagnostic images. Cost reduction via the elimination of storage costs associated with x-ray films and hard copies and providing remote access to diagnostic images to medical practitioners are some of the advantages of this technology. Some of the other drivers of this market include rapidly rising geriatric global population base, increasing incidence rates of target diseases such as cardiovascular diseases, cancer, tuberculosis and oral diseases.
High usage rates of portable devices in the X-ray segment and the introduction of handheld X-ray devices touting higher efficacies and applications in the field of dentistry are some of the factors accounting for its high market share. When in comparison with CT scan devices, X-ray devices hold higher market penetration rates owing to facts such as the latter is relatively cheaper, emit lower levels of radiation and is more user friendly. The global CT scan devices market on the other hand is expected to grow at a relatively faster CAGR of over 8.0% from 2013 to 2020. Its market share was valued at 34.4% in 2012. Its ability to capture multiple views in a series and to efficiently differentiate bone masses and elaborate soft tissues will serve this market as high impact rendering driver. Moreover, growing use of portable CT scan devices in interventional confirmation will help this market experience an expedited growth during the forecast period.
The portable X-ray devices market was valued at USD 3,067.0 million in 2012. The portable X-ray devices market was dominated by the mobile X-ray devices market in 2012 on account of factors such as the growing demand for bedside imaging and the growing base of geriatric population widening the base of immobile patients. Its market share was valued at over 90.0% in 2012. However, its market share will decline by 2020 on account of the rising demand for user-friendly handheld X-ray devices especially in the field of dentistry and the subsequent introduction of products such as the NOMAD Pro by Aribex. The global portable CT scan devices market was valued at USD 1,610.0 million in 2012. Mobile CT scan devices are now increasingly being used in diagnostic and interventional medical procedures. Some of the major uses of CT scan devices in interventional and diagnostic procedures in dental and orthopedic implant procedures to achieve precision, in gaining an anatomical assessment of the cochlear during cochlear implant procedures and in computing 3D locations of implanted electrodes in relation to the structure of the ear and in obtaining impeccable clarity pertaining to the anatomy of body parts and closely monitoring these parts while conducting plastic surgery procedures.
The computed radiography (CR) based portable X-ray devices market held the majority of the market share in 2012 at 39.6%. These devices are more user-friendly than digital radiography systems and so entail minimum re-training of radiologists. This makes the use of computed radiography systems a cost effective and time-saving affair. Moreover, faster image acquisition rates and the use of cheaper high-density line scan solid state detection systems or phosphor plates used in these systems are some of the drivers of this market. The global computed radiography systems market was valued at USD 1,214.8 million in 2012. However, the digital radiography based portable X-ray devices market are expected to grow at the highest CAGR during the forecast period owing to factors such as its high patient throughput rates, fastest image acquisition rates, good image quality and low radiation risks. The market is expected to grow at a CAGR of 8.8% from 2013 to 2020. CT scan devices, on the other hand, are dominated by medium slice scanners. The global medium slice scanners market was valued at USD 985.3 million in 2012. High usage rates of these devices due to their relative cost effectiveness (in comparison to high slice scanners) and better performance (in comparison to low slice scanners) are some of the factors accounting for its large market share.
The North American portable x-ray and CT scan devices market held the majority of the market share in 2011 at 30.0%. The demand for these devices stems from both hospital as well as clinic settings. Several market players have introduced hand-held x-ray devices with exceptional efficacy in North America in the past few years which has contributed significantly to the growth of the market. However, the Asia-Pacific market is expected to exhibit the highest CAGR during the forecast period. Its market is expected to grow at a CAGR of over 10.0% from 2013 to 2020. Some of the drivers of this market include the growing base of geriatric population, the presence of high unmet medical needs in countries such as Japan, China, and India. Moreover, growing market penetration rates of computed radiography systems especially because of its cost effectiveness in this region will propel the growth of this market.
Philips Healthcare dominated the global portable X-ray devices market with a market share of over 17% in 2012. Philips Healthcare possesses an extensive product portfolio catering to the x-ray segment ranging from stationary x-ray systems to digital and portable x-ray systems. Moreover, the introduction of innovative products capitalizing on customer preferences such as the introduction of Practix 360, which is a portable x-ray machine and is 30% lighter than the conventional portable x-ray machines, also helps it maintain its market position. Some of the other players of this market include GE Healthcare, Siemens Healthcare, Hitachi Medical Systems, Varian Medical Systems, Shimadzu Corporation and others.
|
# encoding: utf-8
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ObjCDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def appkit_tester_impl(self, commands):
self.build()
self.appkit_common_data_formatters_command()
commands()
def appkit_common_data_formatters_command(self):
"""Test formatters for AppKit classes."""
self.target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(
self, '// Set break point at this line.',
lldb.SBFileSpec('main.m', False))
# The stop reason of the thread should be breakpoint.
self.expect(
"thread list",
STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped', 'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type synth clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
|
Thanks to local dog lovers, Dogs Trust Leeds has helped 500 dogs enjoy their home comforts and lots of TLC in foster care before heading off to their forever homes.
The York Road rehoming centre runs a Home from Home fostering scheme for dogs who they feel particularly benefit from being in a home rather than kennels whilst they are waiting to find a forever family.
One such dog was Staffordshire Bull Terrier puppy, Winston, who was the 500th dog to be placed in foster before finding a forever home. Winston was found as a stray at just four weeks old so after being taken to the rehoming centre by the dog warden, he was immediately placed in foster care with Dianne Ellener and Mick Johnston from Leeds.
“Dianne and Mick have been fostering for us for three years and Winston was the 18th dog they looked after. They had cared for puppies before so when beautiful Winston came to us, I immediately called them and without hesitation they said they’d take care of him.
The scheme was set up in 2013 and as well as young puppies, the dogs that are looked after by foster carers tend to be poorly or pregnant dogs, strays and older dogs and those that might find life in kennels a little overwhelming.
“I am a qualified veterinary nurse so we are often asked to look after older dogs or those that are poorly, or young puppies like Winston. We have hand reared puppies too and fell in love with Darcy when we were hand rearing her so adopted her!
If you would like to find out more about becoming a doggie foster carer, please click here.
It's a white Christmas at Dogs Trust!
|
from django.forms import ModelChoiceField, ModelMultipleChoiceField
from .widgets import TypeaheadInput, MultipleTypeaheadInput
class TypeaheadField(ModelChoiceField):
"""A Typeahead Text field"""
def __init__(self, queryset, builder=False, required=True, label=None,
initial=None, help_text='', limit_choices_to=None,
*args, **kwargs):
super(TypeaheadField, self).__init__(
queryset, required=required,
widget=TypeaheadInput(queryset=queryset, builder=builder),
label=label, initial=initial, help_text=help_text,
limit_choices_to=limit_choices_to, empty_label='', *args, **kwargs
)
class MultipleTypeaheadField(ModelMultipleChoiceField):
"""A Typeahead Multiple choice field for Tags"""
def __init__(self, queryset, builder=False, required=True, label=None,
initial=None, help_text='', limit_choices_to=None,
*args, **kwargs):
super(MultipleTypeaheadField, self).__init__(
queryset, required=required,
widget=MultipleTypeaheadInput(queryset=queryset, builder=builder),
label=label, initial=initial, help_text=help_text, *args, **kwargs
)
|
2 years down the line have a sealed consent order, this was for a sum of money to be paid to me by october, this has been broken (next part states house is to be sold) however I was using a fixed fee solicitor as could not afford legal fees at time, they now will not enforce the order without payment for counsel upfront? and have suggested I find a litigation solicitor in my area? what do I do?
I think the enforcement form is a D50K, costs about £150 to apply.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------------------------------------
# pyOwaspBELVA - Contextual custom dictionary builder with character and word variations for pen-testers
# Copyright (C) 2016 OWASP Foundation / Kenneth F. Belva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# This project is named after my amazing father:
# Albert Joseph BELVA
#
# And, it is dedicated to him and his memory.
#
# This dedication and project is to raise awareness for
# Lewy Body Disease / Dementia which my father lived with
# since his mid-60s until his passing at 72.
#
# More information on Lewy Body Dementia may be found here:
# https://en.wikipedia.org/wiki/Dementia_with_Lewy_bodies
#
# Please add this dedication to every file in the project.
# Thank you much. -Ken
#--------------------------------------------------------------------------------------------------
import os, time, datetime, sys, sqlite3
from PyQt4 import QtGui
# converted Qt4 UI from Qt Converter & cmd; pyuic4 design.ui -o design.py
import src.gui.design
from src.db.belvaDbInitalize import belvaInitDB
from src.db.belvaDbInitalize import belvaRemoveDB
from src.db.belvaSqlDBroutines import count_text_words
from src.db.belvaSqlDBroutines import count_burp_words
from src.db.belvaSqlDBroutines import count_zap_words
from src.db.belvaSqlDBroutines import get_all_burp_words
from src.db.belvaSqlDBroutines import create_consolidated_list
from src.db.belvaSqlDBroutines import count_consolidated_list
from src.db.belvaSqlDBroutines import get_all_consolidated_words
from src.pluginSystem.pluginControlSystem import get_policy_mutate_names
from src.pluginSystem.pluginControlSystem import get_policy_select_names
from src.pluginSystem.pluginControlSystem import get_substitution_names
from src.pluginSystem.pluginControlSystem import return_substitution_dict
from src.threadQueue.aptQueueControlSystem import send_words_to_queue
from src.belvaCommonRoutines import iterative_function
from src.belvaCommonRoutines import get_positions
from src.dataImport.belvaDataImport import belvaDataImport
#--------------------------------------------------------------------------------------------------
class BELVA_AppUI(QtGui.QMainWindow, src.gui.design.Ui_MainWindow):
#--------------------------------------------------------------------------------------------------
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
#UPDATE WINDOW BAR GUI VERSION NUMBER
self.textBrowser_help_text.append("Follow / Contact me on Twitter: @infosecmaverick")
self.textBrowser_help_text.append("Help on the OWASP Project Page: http://bit.ly/1okrO1T")
# self.textBrowser_help_text.append(" https://www.owasp.org/index.php/OWASP_Basic_Expression_%26_Lexicon_Variation_Algorithms_%28BELVA%29_Project")
self.textBrowser_help_text.append("Topics will include:")
self.textBrowser_help_text.append(" How to import burp xml files for org specific content")
self.textBrowser_help_text.append(" How to import ZAP raw files for org specific content")
self.textBrowser_help_text.append(" How to create user id combinations")
self.textBrowser_help_text.append(" How to write a plugin")
self.textBrowser_help_text.moveCursor(QtGui.QTextCursor.Start)
self.progressBar.setValue(0)
#set the default directory to the localized importExternalSources Folder
# current_directory = os.getcwd()
current_directory = os.path.dirname(os.path.abspath(__file__))
current_directory = current_directory.replace("/src/gui", "")
current_directory = current_directory.replace("\src\gui", "")
self.lineEdit_input_src_dir.setText(current_directory + "/importExternalSources/")
#set the default directory to the localized outputFile Folder
self.lineEdit_output_src_dir.setText(current_directory+ "/outputFile/output.txt")
#load boxes....
policy_names = []
subsuitition_names = []
policy_mutate_names = get_policy_mutate_names()
policy_select_names = get_policy_select_names()
subsuitition_names = get_substitution_names()
for policy_name in policy_mutate_names:
self.listWidget_policies_mutate.addItem(policy_mutate_names[policy_name])
for policy_name in policy_select_names:
self.listWidget_policies_select.addItem(policy_select_names[policy_name])
for subsuitition_name in subsuitition_names:
self.listWidget_substitutions.addItem(subsuitition_names[subsuitition_name])
self.pushButton_input_src_dir.clicked.connect(self.input_src_dir) # When the button is pressed
self.pushButton_output_src_dir.clicked.connect(self.output_src_dir) # When the button is pressed
self.pushButton_run_belva.clicked.connect(self.run_belva) # When the button is pressed
def form_checks(self):
# default value should be false
passed_checks = False
# we can put error checking here
passed_checks = True
return passed_checks
#=================================================
# assuming we pass the checks, we write an API layer into UI design
#=================================================
def run_belva(self):
if self.form_checks():
# self.textBrowser_results_window.clear() # In case there are any existing elements in the list
self.progressBar.setValue(0)
self.textBrowser_status_msgs.clear()
self.textBrowser_status_msgs_brief.clear()
input_directory = self.lineEdit_input_src_dir.text()
output_file = self.lineEdit_output_src_dir.text()
global_gui_status_msgs = self.textBrowser_status_msgs
global_gui_status_msgs_brief = self.textBrowser_status_msgs_brief
global_gui_progressBar = self.progressBar
start_time = time.time()
# your code
# global_gui_status_msgs, global_gui_status_msgs_brief, global_gui_progressBar,
#------------------------------------
# This should really be passed in via parameters but need to
# research signals and slots for QT4... until then....
#------------------------------------
policy_mutate_names = []
policy_select_names = []
subsuitition_names = []
policy_mutate_names = get_policy_mutate_names()
policy_select_names = get_policy_select_names()
subsuitition_names = get_substitution_names()
#------------------------------------
#------------------------------------
# Create database to normalize data and have unique words
#------------------------------------
MD5_string = belvaInitDB()
#idea - have form to auto generate substitution and policy plugins...
policy_mutate_descriptions_selected = []
for policy_description_selected in self.listWidget_policies_mutate.selectedItems():
policy_mutate_descriptions_selected.append(policy_description_selected.text())
policy_select_descriptions_selected = []
for policy_description_selected in self.listWidget_policies_select.selectedItems():
policy_select_descriptions_selected.append(policy_description_selected.text())
substitution_descriptions_selected = []
for substitution_description_selected in self.listWidget_substitutions.selectedItems():
substitution_descriptions_selected.append(substitution_description_selected.text())
#------------------------------------
# Translate Descriptions back into plugin names
#------------------------------------
policy_mutate_plugin_names = []
for policy_description in policy_mutate_descriptions_selected:
for policy_name in policy_mutate_names:
if policy_mutate_names[policy_name] == policy_description:
policy_mutate_plugin_names.append(policy_name)
policy_select_plugin_names = []
for policy_description in policy_select_descriptions_selected:
for policy_name in policy_select_names:
if policy_select_names[policy_name] == policy_description:
policy_select_plugin_names.append(policy_name)
substitution_plugin_names = []
for substitution_description in substitution_descriptions_selected:
for substitution_name in subsuitition_names:
if subsuitition_names[substitution_name] == substitution_description:
substitution_plugin_names.append(substitution_name)
#------------------------------------
# Get files to import / separate large from small
#------------------------------------
small_filename_dict = {}
large_filename_dict = {}
for root, directories, filenames in os.walk(input_directory):
for filename in filenames:
full_path_w_file = os.path.join(root,filename)
filename, file_extension = os.path.splitext(full_path_w_file)
# small_filename_dict[full_path_w_file] = file_extension
# filename = os.path.basename(full_path_w_file)
# 10 MB
if ((os.path.getsize(full_path_w_file) >= 10485760) and (file_extension == '.txt')):
large_filename_dict[full_path_w_file] = file_extension
else:
small_filename_dict[full_path_w_file] = file_extension
#------------------------------------
# Get words to filter
#------------------------------------
remove_common_words = []
# common_words_dir = os.getcwd() + "/filterDictionaries/"
common_words_dir = os.path.dirname(os.path.abspath(__file__))
common_words_dir = common_words_dir.replace("/src/gui", "")
common_words_dir = common_words_dir.replace("\src\gui", "")
common_words_dir = common_words_dir + "/filterDictionaries/"
for root, directories, filenames in os.walk(common_words_dir):
for filename in filenames:
full_path_w_file = os.path.join(root,filename)
f = open(full_path_w_file,'r')
for line in f:
if str(line).strip():
remove_common_words.append(str(line).strip().lower())
f.close()
f = None
#-------------------
#------------------------------------
# Import Data from Wordlists, ZAP and burp
#------------------------------------
self.textBrowser_status_msgs.append("Starting: reading through files...")
self.textBrowser_status_msgs.append("Starting: removing common words...")
self.textBrowser_status_msgs.append("Starting: creating temp word dictionary...")
all_consolidated_words = belvaDataImport(global_gui_status_msgs, global_gui_status_msgs_brief, global_gui_progressBar, small_filename_dict, MD5_string, remove_common_words)
total_word_count = len(all_consolidated_words)
self.textBrowser_status_msgs_brief.clear()
self.textBrowser_status_msgs.append("Total Number of Unique Consolidated Words for small files: " + str(total_word_count))
# no words found!
# gui.belva_qt4_global_gui_vars.global_gui_window = self.textBrowser_results_window
# gui.belva_qt4_routines_delete.run_app(nmap_text, masscan_network_text, masscan_ports_text)
#------------------------------------
# Set progress bar for end user info
#------------------------------------
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(int(total_word_count))
self.progressBar.setValue(0)
count = 0
positions_ds = {}
subsitution_dictionary = {}
# all_consolidated_words = get_all_consolidated_words(MD5_string)
self.textBrowser_status_msgs.append("Mutating finalized temp word dictionary for small files...")
for substitution_plugin_name in substitution_plugin_names:
#------------------------------------
# retrieve dictionary from substitution selected
#------------------------------------
subsitution_dictionary = return_substitution_dict(substitution_plugin_name)
self.textBrowser_status_msgs.append("Using substitution plug-in: " + substitution_plugin_name)
#------------------------------------
# cycle through finalized list of words
#------------------------------------
if int(total_word_count) > 20:
break_up_queue = round(int(total_word_count) / 20)
else:
break_up_queue = int(total_word_count)
if all_consolidated_words:
for word in all_consolidated_words:
# the illusion of progress
count = self.progressBar.value() + 1
self.progressBar.setValue(count)
self.textBrowser_status_msgs_brief.setText("Now processing word " + str(count) + " of " + str(total_word_count) + " : " + str(word).strip())
send_words_to_queue(all_consolidated_words, subsitution_dictionary, policy_mutate_plugin_names, policy_select_plugin_names, output_file)
#------------------------------------
# process large files
#------------------------------------
self.textBrowser_status_msgs_brief.clear()
if large_filename_dict:
self.textBrowser_status_msgs.append("Now processing large files...")
for full_path in large_filename_dict:
total_word_count = -1
with open(full_path, 'r', errors='replace') as f:
for total_word_count, l in enumerate(f):
pass
if total_word_count == -1:
total_word_count = 0
elif total_word_count >= 0:
total_word_count += 1
break_up_queue = round(total_word_count / 20)
filename = os.path.basename(full_path)
self.textBrowser_status_msgs.append("Now processing large file: " + str(filename) + " with a word count of: " + str(total_word_count))
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(int(total_word_count))
self.progressBar.setValue(0)
count = 0
f = open(full_path,'r', errors='replace')
words_array = []
for line in f:
count = self.progressBar.value() + 1
self.progressBar.setValue(count)
self.textBrowser_status_msgs_brief.setText("Processing through word " + str(count) + " of " + str(total_word_count) + " : " + str(line).strip())
if str(line).strip():
if not(str(line).strip() in remove_common_words):
words_array.append(str(line).strip())
if len(words_array) == break_up_queue:
for substitution_plugin_name in substitution_plugin_names:
subsitution_dictionary = return_substitution_dict(substitution_plugin_name)
send_words_to_queue(words_array, subsitution_dictionary, policy_mutate_plugin_names, policy_select_plugin_names, output_file)
words_array = []
if (len(words_array) <= break_up_queue) and (len(words_array) > 0) :
for substitution_plugin_name in substitution_plugin_names:
subsitution_dictionary = return_substitution_dict(substitution_plugin_name)
send_words_to_queue(words_array, subsitution_dictionary, policy_mutate_plugin_names, policy_select_plugin_names, output_file)
f.close()
f = None
# total word count for output file...
total_word_count = -1
with open(output_file, 'r', errors='replace') as f:
for total_word_count, l in enumerate(f):
pass
if total_word_count == -1:
total_word_count = 0
elif total_word_count >= 0:
total_word_count += 1
elapsed_time = time.time() - start_time
self.textBrowser_status_msgs_brief.clear()
self.textBrowser_status_msgs.append("Finished Mutating temp word dictionary")
#------------------------------------
# Clean up temporary files
#------------------------------------
self.textBrowser_status_msgs.append("Cleaning up temporary data....")
belvaRemoveDB(MD5_string)
self.textBrowser_status_msgs.append("Please Find the final custom dictionary here:")
self.textBrowser_status_msgs.append(output_file)
self.textBrowser_status_msgs.append("Total number of words in output file: " + str(total_word_count))
self.textBrowser_status_msgs.append("Elapsed run time: " + str(datetime.timedelta(seconds=int(elapsed_time))))
self.textBrowser_status_msgs.append("FINISHED!!!")
def input_src_dir(self):
directory = QtGui.QFileDialog.getExistingDirectory(self,"Pick a folder")
self.lineEdit_input_src_dir.clear()
self.lineEdit_input_src_dir.setText(directory)
def output_src_dir(self):
output_file = QtGui.QFileDialog.getOpenFileName(self,"Pick Output File")
self.lineEdit_output_src_dir.clear()
self.lineEdit_output_src_dir.setText(output_file)
def launch_gui():
app = QtGui.QApplication(sys.argv)
form = BELVA_AppUI()
form.show()
app.exec_()
|
DAY 30 Shaped for Serving God Your hands shaped me and made me. Job 10:8 (NIV) The people I have shaped for myself will broadcast my praises. Isaiah 43:21 (NJB) You were shaped to serve God. God formed every creature on this planet with a special area of expertise. Some animals run, some hop, some swim, some burrow, and some fly. Each has a particular role to play, based on the way they were shaped by God. The same is true with humans. Each of us was uniquely designed, or "shaped, 'to do certain things. Before architects design any new building they first ask, "What will be its purpose? How will it be used?" The intended function always determines the form of the building. Before God created you, he decided what role he wanted you to play on earth. He planned exactly how he wanted you to serve him, and then he shaped you for those tasks. You are the way you are because you were made for a specific ministry. The Bible says, "We are God's workmanship, created in Christ Jesus to do good works. "I Our English word poem comes from the Greek word translated "workmanship." You are God's handcrafted work of art. You are not an assembly-line product, mass produced without thought. You are a custom-designed, one-of-a-kind, original masterpiece. God deliberately shaped and formed you to serve him in a way that makes your ministry unique. He carefully mixed the DNA cocktail that created you. David praised God for this incredible personal attention to detail: "You made all the delicate, inner parts of my body and knit me together in my mother's womb. Thank you for making me so wonderfully complex! Your workmanship is marvelous." As Ethel Waters said, "God doesn't make junk." Not only did God shape you before your birth, he planned every day of your life to support his shaping process. David continues, "Every day of my life was recorded in your book. Every moment was laid out before a single day had passed." This means that nothing that happens in your life is insignificant. God uses all of it to mold you for your ministry to others and shape you for your service to him. God never wastes anything. He would not give you abilities, interests, talents, gifts, personality, and life experiences unless he intended to use them for his glory. By identifying and understanding these factors you can discover God's will for your life. The Bible says you are "wonderfully complex.' You arc a combination of many different factors. To help you remember five of these factors, I have created a simple acrostic: SHAPE. In this chapter and the next we will look at these five factors, and following that, I will explain how to discover and use your shape. HOW GOD SHAPES YOU FOR YOUR MINISTRY Whenever God gives us an assignment, he always equips us with what we need to accomplish it. This custom combination of capabilities is called your SHAPE: Spiritual gifts Heart Abilities Personality Experience SHAPE: UNWRAPPING YOUR SPIRITUAL GIFTS God gives every believer spiritual gifts to be used in ministry. These are special Godempowered abilities for serving him that are given only to believers. The Bible says, "Whoever does not have the Spirit cannot receive the gifts that come from God's Spirit." You can't earn your spiritual gifts or deserve them-that's why they are called gifts! They are an expression of God's grace to you. "Christ has generously divided out his gifts to us." Neither do you get to choose which gifts you'd like to have; God determines that. Paul explained, "It is the one and only Holy Spirit who distributes these gifts. He alone decides which gift each person should have." Because God loves variety and he wants us to be special, no single gift is given to everyone.' Also, no individual receives all the gifts. If you had them all, you'd have no need of anyone else, and that would defeat one of God's purposes-to teach us to love and depend on each FOR other. Your spiritual gifts were not given for your own benefit but for the benefit of others, just as other people were given gifts for your benefit. The Bible says, "A spiritual gift is given to each of us as a means of helping the entire church." God planned it this way so we would need each other. When we use our gifts together, we all benefit. If others don't use their gifts, you get cheated, and if you don't use your gifts, they get cheated. This is why we're commanded to discover and develop our spiritual gifts. Have you taken the time to discover your spiritual gifts? An unopened gift is worthless. Whenever we forget these basic truths about gifts, it always causes trouble in the church. Two common problems are 'gift-envy' and 'gift-projection. "The first occurs when we compare our gifts with others', feel dissatisfied with what God gave us, and become resentful or jealous of how God uses others. The second problem happens when we expect everyone else to have our gifts, do what we are called to do, and feel as passionate about it as we do. The Bible says, "There are different kinds of service in the church, but it is the same Lord we are serving." Sometimes spiritual gifts are overemphasized to the neglect of the other factors God uses to shape you for service. Your gifts reveal one key to discovering God's will for your ministry, but your spiritual gifts are not the total picture. God has shaped you in four other ways, too. SHAPE: LISTENING TO YOUR HEART The Bible uses the term heart to describe the bundle of desires, hopes, interests, ambitions, dreams, and affections you have. Your heart represents the source of all your motivations-what you love to do and what you care about most. Even today we still use the word in this way when we say, "I love you with all my heart." The Bible says, "As a face is reflected in water, so the heart reflects the person." Your heart reveals the real you-what you truly are, not what others think you are or what circumstances force you to be. Your heart determines why you say the things you do, why you feel the way you do, and why you act the way you do. Physically, each of us has a unique heartbeat. Just as we each have unique thumbprints, eye prints, and voice prints, our hearts beat in slightly different patterns. It's amazing that out of all the billions of people who have ever lived, no one has had a heartbeat exactly like yours. In the same way, God has given each of us a unique emotional "heartbeat" that races when we think about the subjects, activities, or circumstances that interest us. We instinctively care about some things and not about others. These are clues to where you should be serving. Another word for heart is passion. There are certain subjects you feel passionate about and others you couldn't care less about. Some experiences turn you on and capture your attention while others turn you off or bore you to tears. These reveal the nature of your heart. When you were growing up, you may have discovered that you were intensely interested in some subjects that no one else in your family cared about. Where did those interests come from? They came from God. God had a purpose in giving you these inborn interests. Your emotional heartbeat is the second key to understanding your shape for service. Don't ignore your interests. Consider how they might be used for God's glory. There is a reason that you love to do these things. Repeatedly the Bible says to "serve the Lord with all your heart." God wants you to serve him passionately, not dutifully. People rarely excel at tasks they don't enjoy doing or feel passionate about. God wants you to use your natural interests to serve him and others. Listening for inner promptings can point to the ministry God intends for you to have. How do you know when you are serving God from your heart? The first telltale sign is enthusiasm. When you are doing what you love to do, no one has to motivate you or challenge you or check up on you. You do it for the sheer enjoyment. You don't need rewards or applause or payment, because you love serving in this way. The opposite is also true: When you don't have a heart for what you're doing, you are easily discouraged. The second characteristic of serving God from your heart is effectiveness. Whenever you do what God wired you to love to do, you get good at it. Passion drives perfection. If you don't care about a task, it is unlikely that you will excel at it. In contrast, the highest achievers in any field are those who do it because of passion, not duty or profit. We have all heard people say, "I took a job I hate in order to make a lot of money, so someday I can quit and do what I love to do." That's a big mistake. Don't waste your life in a job that doesn't express your heart. Remember, the greatest things in life are not things. Meaning is far more important than money. The richest man in the world once said, "A simple life in the fear-of-God is better than a rich life with a ton of headaches." Don't settle for just achieving "the good life," because the good life is not good enough. Ultimately it doesn't satisfy. You can have a lot to live on and still have nothing to live for. Aim instead for "the better life"-serving God in a way that expresses your heart. Figure out what you love to do-what God gave you a heart to do-and then do it for his glory. DAY THIRTY THINKING ABOUT MY PURPOSE Point to Ponder: I was shaped for serving God. Verse to Remember: "God works through different men in different ways, but it is the same God who achieves his purposes through them all." 1 Corinthians 12:6 (Ph) Question to Consider: In what way can I see myself passionately serving others and loving it?
|
#Make/update positional index for a given file
#The index has the form
#word,word_freq:doc1,freq_in_doc1(pos1,pos2,...posn);doc2,freq_in_doc2(pos1,pos2,...posn), etc
#to,993427:23,2(3,6);35,1(34)
import sys
import os
import re
path_to_PeARS = os.path.dirname(__file__)
index={} #This is the positional index, of the form word:WordEntry
individual_index={} #This is the positional index for the individual file
word_positions={} #This is a temporary dictionary with positions for each word in the document being processed
#WordEntry contains all the information pertaining to a word
#(freq, docs in which it appears)
class WordEntry:
def __init__(self, freq):
self.freq=freq
self.docs=[]
#Load existing index file
def load_index(path_to_index):
index_file=open(path_to_index)
for line in index_file:
try:
line=line.rstrip('\n')
pair=line.split(':')
word_freq=pair[0].split(',')
word=word_freq[0]
freq=int(word_freq[1])
index[word]=WordEntry(freq)
docs=pair[1].rstrip(';') #Remove last ; before splitting
docs=docs.split(';')
for d in docs:
name=d.split(',')[0]
m=re.search(',([0-9]+)',d)
dfreq=0
if m:
dfreq_str=m.group(1)
dfreq=int(dfreq_str)
#print name,dfreq
m=re.search(',[0-9]+\[(.*)\]',d)
positions=[]
if m:
positions_strings=m.group(1).split(", ")
#print positions_strings
positions=[]
for p in positions_strings:
intp=int(p)
positions.append(intp)
index[word].docs.append([name,dfreq,positions])
except:
#print "ERROR processing",line
continue
##########################################
#Read new document
##########################################
def readWordPositions(input_file):
c=0 #Initialise wordcount for this document
f=open(input_file,'r')
for line in f:
line=line.rstrip('\n')
words=line.split()
for w in words:
m=re.search('(.*_.).*',w)
if m:
w=m.group(1)
c+=1
if w not in word_positions:
word_positions[w]=[c]
else:
word_positions[w].append(c)
def mkWordEntries(docname):
for k,v in word_positions.items():
#General index
if k not in index:
entry=WordEntry(len(v))
entry.docs.append([docname,len(v),v])
index[k]=entry
else:
entry=index[k]
entry.freq+=len(v)
entry.docs.append([docname,len(v),v])
index[k]=entry
#Individual index
if k not in individual_index:
entry=WordEntry(len(v))
entry.docs.append([docname,len(v),v])
individual_index[k]=entry
else:
entry=individual_index[k]
entry.freq+=len(v)
entry.docs.append([docname,len(v),v])
individual_index[k]=entry
def writeIndex(path_to_index):
out=open(path_to_index,'w')
for k,v in index.items():
line=k+","+str(v.freq)+":"
for d in v.docs:
line=line+d[0]+","+str(d[1])+str(d[2])+";"
out.write(line+'\n')
def writeIndividualIndex(path_to_ind_index):
out=open(path_to_ind_index,'w')
for k,v in individual_index.items():
line=k+","+str(v.freq)+":"
for d in v.docs:
line=line+d[0]+","+str(d[1])+str(d[2])+";"
out.write(line+'\n')
def runScript(a1,a2):
input_file=a1
pear = a2 #The pear which will host these pages (local folder or Raspberry Pi)
index.clear()
individual_index.clear()
word_positions.clear()
if os.path.exists(input_file):
path_to_index=os.path.join(path_to_PeARS, pear+"/index.txt")
if os.path.exists(path_to_index):
load_index(path_to_index)
m=re.search(".*\/(.*)\.txt",input_file)
docname=m.group(1)
path_to_ind_index=os.path.join(path_to_PeARS, pear+"/indexes/"+docname+".txt")
readWordPositions(input_file)
mkWordEntries(docname)
writeIndex(path_to_index)
writeIndividualIndex(path_to_ind_index)
else:
print "ERROR: file",input_file,"does not exist. Bye."
# when executing as script
if __name__ == '__main__':
runScript(sys.argv[1],sys.argv[2]) #Input file and pear
|
The 2019 RBC Heritage presented by Boeing will take place at the Harbour Town Golf Links. The RBC Heritage is the PGA TOUR’s only event in South Carolina, first played in 1969. Make plans now to “Get Your Plaid On!” (843) 671-2448 or www.rbcheritage.com.
Forsythe Jewelers invites you to our Roberto Coin ‘Pop-up Shop’ located at Wine At Nine during the RBC Heritage Presented by Boeing. Sip and shop Wednesday through Sunday and experience the newest collections and one-of-a-kind pieces from this world-renowned Italian designer. forsythejewelers.biz.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import codecs
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
__version__ = "1.5.2"
def read(fname):
return codecs.open(
os.path.join(os.path.dirname(__file__), fname), "r", "utf-8"
).read()
readme = read("README.md")
history = read("HISTORY.md")
if sys.argv[-1] == "publish":
try:
import wheel
import twine
except: # Yes, this is not how we usually do try/except
raise ImportError('Run "pip install wheel twine"')
os.system("python setup.py sdist bdist_wheel")
os.system("twine upload dist/*")
os.system("git tag -a %s -m 'version %s'" % (__version__, __version__))
os.system("git push --tags")
sys.exit()
setup(
name="cached-property",
version=__version__,
description="A decorator for caching properties in classes.",
long_description=readme + "\n\n" + history,
long_description_content_type="text/x-md",
author="Daniel Greenfeld",
author_email="pydanny@gmail.com",
url="https://github.com/pydanny/cached-property",
py_modules=["cached_property"],
include_package_data=True,
license="BSD",
zip_safe=False,
keywords="cached-property",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
BEECH HOUSE STUD rose to prominence with the arrival of the unbeaten NEARCO, who entered stud here in 1940. Beech House was originally part of the Cheveley Park Stud of Col. Harry McCalmont, but this parcel was purchased by Charles Hackford, who sold it to Martin Benson in 1930. Here Benson stood Derby winner Windsor Lad, who died after only 3 seasons. NEARCO was found as a suitable replacement in the fall of 1939, and became the Leading Sire in Great Britain in 1947, 1948. He died in June of 1957 from cancer at the age of 22. NEARCO's sireline is one of the most prolific in the world today through his many successful sons including Nearctic, Nasrullah, Dante and others.
The mare LADY ANGELA was also bred here, and was exported to Canada carrying the champion runner and sire NEARCTIC (by Nearco) in 1953.
Beech House Stud was sold to Sir Victor Sassoon in 1960, who stood homeberd Derby winners CREPELLO (died 1974) and ST. PADDY (died 1984) here. ST. PADDY is also buried here. The farm was sold to Dr. Carlo Vittadini, and it is currently owned by Sheikh Hamdan Al Maktoum, who reintroduced stallions to the property in the last couple of years. --A.P.
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tflite_transfer_converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import unittest
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.regularizers import l2
# pylint: disable=g-bad-import-order
from tfltransfer import bases
from tfltransfer import heads
from tfltransfer import optimizers
from tfltransfer import tflite_transfer_converter
# pylint: enable=g-bad-import-order
DEFAULT_INPUT_SIZE = 64
DEFAULT_BATCH_SIZE = 128
LEARNING_RATE = 0.001
class TestTfliteTransferConverter(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestTfliteTransferConverter, cls).setUpClass()
cls._default_base_model_dir = tempfile.mkdtemp('tflite-transfer-test-base')
model = tf.keras.Sequential([
layers.Dense(
units=DEFAULT_INPUT_SIZE, input_shape=(DEFAULT_INPUT_SIZE,))
])
model.build()
model.save(cls._default_base_model_dir, save_format='tf')
def setUp(self):
super(TestTfliteTransferConverter, self).setUp()
self._default_base_model = bases.SavedModelBase(
TestTfliteTransferConverter._default_base_model_dir)
def test_mobilenet_v2_saved_model_and_keras_model(self):
input_size = DEFAULT_INPUT_SIZE
output_size = 5
head_model = tf.keras.Sequential([
layers.Dense(
units=32,
input_shape=(input_size,),
activation='relu',
kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)),
layers.Dense(
units=output_size,
kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)),
])
head_model.compile(loss='categorical_crossentropy', optimizer='sgd')
converter = tflite_transfer_converter.TFLiteTransferConverter(
output_size, self._default_base_model, heads.KerasModelHead(head_model),
optimizers.SGD(LEARNING_RATE), DEFAULT_BATCH_SIZE)
models = converter._convert()
parameter_shapes = [(input_size, 32), (32,), (32, output_size),
(output_size,)]
self.assertSignatureEqual(models['initialize'], [()], parameter_shapes)
self.assertSignatureEqual(models['bottleneck'], [(1, input_size)],
[(1, input_size)])
self.assertSignatureEqual(models['inference'],
[(1, input_size)] + parameter_shapes,
[(1, output_size)])
self.assertSignatureEqual(models['optimizer'],
parameter_shapes + parameter_shapes,
parameter_shapes)
def test_mobilenet_v2_saved_model_and_softmax_classifier_model(self):
input_size = DEFAULT_INPUT_SIZE
output_size = 5
batch_size = DEFAULT_BATCH_SIZE
converter = tflite_transfer_converter.TFLiteTransferConverter(
output_size, self._default_base_model,
heads.SoftmaxClassifierHead(batch_size, (input_size,), output_size),
optimizers.SGD(LEARNING_RATE), batch_size)
models = converter._convert()
parameter_shapes = [(input_size, output_size), (output_size,)]
self.assertSignatureEqual(models['initialize'], [()], parameter_shapes)
self.assertSignatureEqual(models['bottleneck'], [(1, input_size)],
[(1, input_size)])
self.assertSignatureEqual(models['train_head'],
[(batch_size, input_size),
(batch_size, output_size)] + parameter_shapes,
[()] + parameter_shapes)
self.assertSignatureEqual(models['inference'],
[(1, input_size)] + parameter_shapes,
[(1, output_size)])
self.assertSignatureEqual(models['optimizer'],
parameter_shapes + parameter_shapes,
parameter_shapes)
def test_mobilenet_v2_base_and_softmax_classifier_model(self):
input_size = 224
output_size = 5
batch_size = DEFAULT_BATCH_SIZE
base = bases.MobileNetV2Base(image_size=input_size)
head = heads.SoftmaxClassifierHead(batch_size, base.bottleneck_shape(),
output_size)
optimizer = optimizers.SGD(LEARNING_RATE)
converter = tflite_transfer_converter.TFLiteTransferConverter(
output_size, base, head, optimizer, batch_size)
models = converter._convert()
parameter_shapes = [(7 * 7 * 1280, output_size), (output_size,)]
self.assertSignatureEqual(models['initialize'], [()], parameter_shapes)
self.assertSignatureEqual(models['bottleneck'],
[(1, input_size, input_size, 3)],
[(1, 7, 7, 1280)])
self.assertSignatureEqual(models['train_head'],
[(batch_size, 7, 7, 1280),
(batch_size, output_size)] + parameter_shapes,
[()] + parameter_shapes)
self.assertSignatureEqual(models['inference'],
[(1, 7, 7, 1280)] + parameter_shapes,
[(1, output_size)])
self.assertSignatureEqual(models['optimizer'],
parameter_shapes + parameter_shapes,
parameter_shapes)
def test_mobilenet_v2_base_and_softmax_classifier_model_adam(self):
input_size = 224
output_size = 5
batch_size = DEFAULT_BATCH_SIZE
base = bases.MobileNetV2Base(image_size=input_size)
head = heads.SoftmaxClassifierHead(batch_size, base.bottleneck_shape(),
output_size)
optimizer = optimizers.Adam()
converter = tflite_transfer_converter.TFLiteTransferConverter(
output_size, base, head, optimizer, batch_size)
models = converter._convert()
param_shapes = [(7 * 7 * 1280, output_size), (output_size,)]
self.assertSignatureEqual(
models['optimizer'],
param_shapes + param_shapes + param_shapes + param_shapes + [()],
param_shapes + param_shapes + param_shapes + [()])
def assertSignatureEqual(self, model, expected_inputs, expected_outputs):
interpreter = tf.lite.Interpreter(model_content=model)
inputs = [
input_['shape'].tolist() for input_ in interpreter.get_input_details()
]
outputs = [
output['shape'].tolist() for output in interpreter.get_output_details()
]
self.assertEqual(inputs, [list(dims) for dims in expected_inputs])
self.assertEqual(outputs, [list(dims) for dims in expected_outputs])
if __name__ == '__main__':
unittest.main()
|
Long Tall Sally Sale is now on across all clothing! Spruce up your winter wardrobe in this mega sale!
Long Tall Sally have been providing fashionable clothing for women 5'8'' and over since their first store opened in 1976, and have developed a reputation for quality and style. The brand offer a huge range of items from basics such as jeans and knitwear to party dresses and office clothing. Designed specifically for the taller figure, Long Tall Sally's trousers are available in lengths up to 38'', and sleeve and body lengths are elongated to create the perfect fit. Long Tall Sally incorporate each season's key trends into their collections, making sure that tall women do not need to compromise on style. With Long Tall Sally's range of stylish footwear, designed with a larger foot size in mind, you can be sure to look great from head to toe.
Add Long Tall Sally to your Favourites list and be the first to know every time they have a Sale on!
1st January 2018 - Long Tall Sally New Years Day sales for 2018.
7th May 2018 - Long Tall Sally Early May Bank Holiday sales for 2018, add to your diary now!
July 2018 - Long Tall Sally Summer 2018 sales, joining us?
23rd November 2018 - Long Tall Sally Black Friday sales in 2018 start on Friday 24th November.
Add products from Long Tall Sally to your Love Sales list. Get the Love Sales button now!
Long Tall Sally January Sale is now on across all clothing! Spruce up your winter wardrobe in this mega sale!
Long Tall Sally Boxing day Sale is now on across all clothing! Spruce up your winter wardrobe in this mega sale!
Long Tall Sally NChristmas Sale is now on across all clothing! Spruce up your winter wardrobe in this mega sale!
Long Tall Sally November 50% off Sale is now on across all clothing! Spruce up your winter wardrobe in this mega sale!
Long Tall Sally Cyber Monday Event is now on across all clothing! Spruce up your winter wardrobe in this mega sale!
|
# -*- coding: ascii -*-
r"""
:Copyright:
Copyright 2006 - 2015
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=====================
Markup Parser Logic
=====================
Soup Parser
~~~~~~~~~~~
This module provides a very lenient HTML/XML lexer. The `SoupLexer` class is
initialized with a listener object, which receives all low level events
(like starttag, endtag, text etc). Listeners must implement the
`ListenerInterface`.
On top of the lexer there's `SoupParser` class, which actually implements the
`ListenerInterface` itself (the parser listens to the lexer). The parser adds
HTML semantics to the lexed data and passes the events to a building listener
(`BuildingListenerInterface`). In addition to the events sent by the lexer the
`SoupParser` class generates endtag events (with empty data arguments) for
implicitly closed elements. Furthermore it knows about CDATA elements like
``<script>`` or ``<style>`` and modifies the lexer state accordingly.
The actual semantics are provided by a DTD query class (implementing
`DTDInterface`.)
"""
if __doc__:
# pylint: disable = redefined-builtin
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
import re as _re
from ..._exceptions import LexerEOFError, LexerFinalizedError
from ... import interfaces as _interfaces
from . import dtd as _dtd
class SoupLexer(object):
"""
(X)HTML Tagsoup Lexer
The lexer works hard to preserve the original data. In order to achieve
this goal, it does not validate the input and recognizes its input in a
quite lenient way.
:Groups:
- `Lexer states` :
`TEXT`,
`CDATA`,
`MARKUP`,
`STARTTAG`,
`ENDTAG`,
`COMMENT`,
`MSECTION`,
`DECL`,
`PI`,
`EMPTY`,
`FINAL`
- `Regex Matchers` :
`_START_MATCH`,
`_ATT_ITER`,
`_COMMENT_SEARCH`,
`_MSECTION_MATCH`,
`_MSECTIONINVALID_MATCH`,
`_MEND_SEARCH`,
`_MSEND_SEARCH`,
`_DECL_MATCH`
:CVariables:
`TEXT` : ``int``
Lexer state ``TEXT`` (between tags)
`CDATA` : ``int``
Lexer state ``CDATA`` (between (P)CDATA tags)
`MARKUP` : ``int``
Lexer state ``MARKUP`` (``<``)
`STARTTAG` : ``int``
Lexer state ``STARTTAG`` (``<[letter]``)
`ENDTAG` : ``int``
Lexer state ``ENDTAG`` (``</``)
`COMMENT` : ``int``
Lexer state ``COMMENT`` (``<!--``)
`MSECTION` : ``int``
Lexer state ``MSECTION`` (``<![``)
`DECL` : ``int``
Lexer state ``DECL`` (``<!``)
`PI` : ``int``
Lexer state ``PI`` (``<?``)
`EMPTY` : ``int``
Lexer state ``EMPTY`` (``<>``)
`FINAL` : ``int``
Lexer state ``FINAL``
`_LEXERS` : ``tuple``
The state lexer method names (``('method', ...)``)
`_STATES` : ``tuple``
The state names (``('name', ...)``)
:IVariables:
`_state` : ``int``
The current lexer state
`_lexers` : ``list``
The state lexer methods (``[method, ...]``)
`_listener` : `ListenerInterface`
The listener the events shall be sent to
`_buffer` : ``str``
Current unprocessed buffer
`_conditional_ie_comments` : ``bool``
Handle conditional IE comments as text?
"""
# pylint: disable = no-member
def __init__(self, listener, conditional_ie_comments=True):
r"""
Initialization
:Parameters:
`listener` : `ListenerInterface`
The event listener
`conditional_ie_comments` : ``bool``
Handle conditional IE comments as text?
Conditional comments are described in full detail
at `MSDN`_\.
.. _MSDN: http://msdn.microsoft.com/en-us/library/
ms537512%28v=vs.85%29.aspx
"""
self._listener = listener
self._normalize = None
self._cdata_name = None
self._state = self.TEXT
self._lexers = [getattr(self, name) for name in self._LEXERS]
self._buffer = ''
self._conditional_ie_comments = bool(conditional_ie_comments)
def feed(self, food):
"""
Feed the lexer with new data
:Parameters:
`food` : ``str``
The data to process
"""
self._buffer += food
self._lex()
def finalize(self):
"""
Finalize the lexer
This processes the rest buffer (if any)
:Exceptions:
- `LexerEOFError` : The rest buffer could not be consumed
"""
self._lex()
if self._buffer:
raise LexerEOFError(
"Unfinished parser state %s" % self._STATES[self._state]
)
self._state = self.FINAL
def cdata(self, normalize, name):
""" Set CDATA state """
if self._state != self.FINAL:
self._state = self.CDATA
self._normalize = normalize
self._cdata_name = normalize(name)
def _lex(self):
""" Parse the current buffer """
while self._buffer:
if self._lexers[self._state]():
break
def _lex_text(self):
"""
Text lexer
State: We are between tags or at the very beginning of the document
and look for a ``<``.
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
pos = data.find('<')
if pos == 0:
self._state = self.MARKUP
return False
elif pos == -1:
self._buffer = ''
else:
self._buffer, data = data[pos:], data[:pos]
self._state = self.MARKUP
self._listener.handle_text(data)
return False
def _lex_cdata(self):
"""
(PR)CDATA lexer
State: We are inside a text element and looking for the end tag only
:Return: Unfinished state?
:Rtype: ``bool``
"""
incomplete = False
data, pos = self._buffer, 0
while True:
pos = data.find('<', pos)
if pos == -1:
pos = len(data)
self._buffer = ''
break
else:
char = data[pos + 1:pos + 2]
if char == '/':
self._state = self.ENDTAG
break
elif char == '':
incomplete = True
break
else:
pos += 1
if pos > 0:
self._buffer, data = data[pos:], data[:pos]
self._listener.handle_text(data)
return incomplete
#: Regex matcher for a tagname character
#:
#: :Type: ``callable``
_TAGNAME_MATCH = _re.compile(r'[a-zA-Z0-9]').match
def _lex_markup(self):
"""
Markup lexer
State: We've hit a ``<`` character and now find out, what it's
becoming
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
if len(data) < 2:
return True
char = data[1]
state = (self.ENDTAG, self.DECL, self.PI, self.EMPTY, -1)[
"/!?>".find(char)
]
if state == -1:
if self._TAGNAME_MATCH(char):
state = self.STARTTAG
else:
state = self.TEXT
self._buffer = data[1:]
self._listener.handle_text(data[0])
self._state = state
return False
#: Regex matcher for a start tag
#:
#: :Type: ``callable``
_START_MATCH = _re.compile(r'''
<
(?P<name>[^ \t\r\n\f/>]+)
(?P<attr>
[^"'>]*
(?:
(?:
"[^"]*"
| '[^']*'
)
[^"'>]*
)*
)
[ \t\r\n\f]*
>
''', _re.X).match
#: Regex iterator for extracting start tag attributes
#:
#: :Type: ``callable``
_ATT_ITER = _re.compile(r'''
[ \t\r\n\f]*
(?P<name>(?:/|[^ \t\r\n\f/=>]*)) # attribute name
[ \t\r\n\f]*
(?:
=
(?P<value> # optional value
[ \t\r\n\f]*"[^"]*"
| [ \t\r\n\f]*'[^']*'
| [^ \t\r\n\f/>]*
)
)?
''', _re.X).finditer
def _lex_start(self):
"""
Starttag lexer
State: We've hit a ``<x`` and now look for the ``>``.
:Return: Unfinished State?
:Rtype: ``bool``
"""
data = self._buffer
match = self._START_MATCH(data)
if match is None:
return True
pos = match.end()
self._buffer, data = data[pos:], data[:pos]
name, attrstring = match.group('name', 'attr')
attr, closed = [], False
if attrstring:
for match in self._ATT_ITER(attrstring):
key, value = match.group('name', 'value')
if key == '/' and value is None:
closed = True
continue
if key or value is not None:
if value:
value = value.strip()
attr.append((key.strip(), value))
else: # bug in Python < 2.3.5 (fixed in rev 37262)
break
self._state = self.TEXT
self._listener.handle_starttag(name, attr, closed, data)
return False
def _lex_end(self):
"""
Endtag lexer
State: We've hit ``</``.
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
pos = data.find('>') + 1
if pos == 0:
return True
self._buffer, data = data[pos:], data[:pos]
name = data[2:-1].strip()
if self._cdata_name is not None and \
self._normalize(name) != self._cdata_name:
self._state = self.CDATA
self._listener.handle_text(data)
else:
self._cdata_name = self._normalize = None
self._state = self.TEXT
self._listener.handle_endtag(name, data)
return False
#: Regex searcher for finding the end of a comment
#:
#: :Type: ``callable``
_COMMENT_SEARCH = _re.compile(r'--[ \t\r\n\f]*>').search
#: Regex searcher for matching IE conditional comment
#:
#: :Type: ``callable``
_IE_COMMENT_MATCH = _re.compile(r'''
\[[ \t\r\n\f]* (?:
[iI][fF] | [eE][lL][sS][eE] | [eE][nN][dD][iI][fF]
) [^\]]+]>
''', _re.X).match
def _lex_comment(self):
"""
Comment lexer
State: We've hit ``<!--``.
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
if len(data) < 7:
return True
if self._conditional_ie_comments:
match = iec = self._IE_COMMENT_MATCH(data, 4)
else:
match = iec = None
if match is None:
match = self._COMMENT_SEARCH(data, 4)
if match is None:
return True
pos = match.end()
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
if iec:
self._listener.handle_text(data)
else:
self._listener.handle_comment(data)
return False
#: List of MS-specific marked section names (lowercased)
#:
#: :Type: ``tuple``
_MSSECTIONS = ('if', 'else', 'endif')
#: Regex matcher for the start of a marked section
#:
#: :Type: ``callable``
_MSECTION_MATCH = _re.compile(r'''
<!\[[ \t\r\n\f]*(?P<name>[^\][ \t\r\n\f>]+)(?=[\][ \t\r\n\f>])
''', _re.X).match
#: Regex matcher for the start of an invalid marked section
#:
#: :Type: ``callable``
_MSECTIONINVALID_MATCH = _re.compile(r'<!\[[ \t\r\n\f]*[\][>]').match
#: Regex searcher for the end of a marked section
#:
#: :Type: ``callable``
_MEND_SEARCH = _re.compile(r'][ \t\r\n\f]*][ \t\r\n\f]*>').search
#: Regex searcher for the end of a MS specific marked section
#:
#: :Type: ``callable``
_MSEND_SEARCH = _re.compile(r'][ \t\r\n\f]*(?:--)?[ \t\r\n\f]*>').search
def _lex_msection(self):
"""
Marked section lexer
State: We've hit a ``<![`` and now seek the end
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
match = self._MSECTION_MATCH(data)
if match is None:
match = self._MSECTIONINVALID_MATCH(data)
if match is not None: # pass invalid msection as text
pos = match.end()
self._buffer = data[pos:]
data = data[:pos]
self._state = self.TEXT
self._listener.handle_text(data)
return False
return True
name = match.group('name')
start = match.end()
if self._conditional_ie_comments and name.lower() in self._MSSECTIONS:
match = iec = self._MSEND_SEARCH(data, start)
else:
pos = data.find('[', start)
if pos >= 0:
start = pos + 1
match = self._MEND_SEARCH(data, start)
iec = None
if match is None:
return True
pos, end = match.end(), match.start()
value = data[start:end]
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
if iec:
self._listener.handle_text(data)
else:
self._listener.handle_msection(name, value, data)
return False
#: Regex matcher for a complete declaration
#:
#: This regex seems a bit nasty, but it should catch all stuff allowed
#: in declarations (including doctype). Some day, it probably needs to
#: be replaced it by real lexer states...
#:
#: :Type: ``callable``
_DECL_MATCH = _re.compile(r'''
<!
(?P<name>[^\][ \t\r\n\f>]*)
(?P<value>
[^"'<>-]* # any nonspecial
(?:
(?:
"[^"]*" # double quoted string
| '[^']*' # single quoted string (valid?)
| <!\[ # marked section
[^\]]*
(?:
](?![ \t\r\n\f]*][ \t\r\n\f]*>)
[^\]]*
)*
][ \t\r\n\f]*][ \t\r\n\f]*>
| <(?!!\[) # declaration
# hopefully not a doctype
# (but unlikely, because we are
# probably already in a DT subset)
[^"'>-]*
(?:
(?:
"[^"]*"
| '[^']*'
| -- # comment
[^-]*
(?:-[^-]+)*
--
| -(?!-) # just a hyphen
)
[^"'>-]*
)*
>
| -- # comment
[^-]*
(?:-[^-]+)*
--
| -(?!-) # just a hyphen
)
[^"'<>-]* # more non-specials
)*
)
>
''', _re.X).match
def _lex_decl(self):
"""
Declaration lexer
State: We've hit a ``<!`` and now peek inside
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
if len(data) < 3:
return True
if data.startswith('<!--'):
self._state = self.COMMENT
return False
elif data.startswith('<!['):
self._state = self.MSECTION
return False
elif data == '<!-':
return True
match = self._DECL_MATCH(data)
if match is None:
return True
name, value = match.group('name', 'value')
pos = match.end()
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
self._listener.handle_decl(name, value.strip(), data)
return False
def _lex_pi(self):
"""
Processing instruction lexer
State: We've hit a ``<?`` and now peek inside
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
pos = data.find('?>', 2)
if pos == -1:
return True
pos += 2
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
self._listener.handle_pi(data)
return False
def _lex_empty(self):
"""
Empty tag lexer
State: We've hit a ``<>``
:Return: Unfinished state?
:Rtype: ``bool``
"""
self._buffer, data = self._buffer[2:], self._buffer[:2]
self._state = self.TEXT
self._listener.handle_starttag('', [], False, data)
return False
def _lex_final(self):
"""
Called after the lexer was finalized
State: after all
:Exceptions:
- `LexerFinalizedError` : The lexer was already finalized
(raised always)
"""
raise LexerFinalizedError("The lexer was already finalized")
_LEXERS = []
_STATES = []
for _idx, (_statename, _funcname) in enumerate([
# pylint: disable = bad-whitespace
('FINAL', '_lex_final'),
('TEXT', '_lex_text'),
('CDATA', '_lex_cdata'),
('MARKUP', '_lex_markup'),
('STARTTAG', '_lex_start'),
('ENDTAG', '_lex_end'),
('COMMENT', '_lex_comment'),
('MSECTION', '_lex_msection'),
('DECL', '_lex_decl'),
('PI', '_lex_pi'),
('EMPTY', '_lex_empty'),
]): # noqa
setattr(SoupLexer, _statename, _idx)
_LEXERS.append(_funcname)
_STATES.append(_statename)
SoupLexer._LEXERS = tuple(_LEXERS) # pylint: disable = protected-access
SoupLexer._STATES = tuple(_STATES) # pylint: disable = protected-access
del _idx, _statename, _funcname # pylint: disable = undefined-loop-variable
del _LEXERS, _STATES
from ... import c
c = c.load('impl')
if c is not None:
DEFAULT_LEXER = c.SoupLexer
else:
DEFAULT_LEXER = SoupLexer # pylint: disable = invalid-name
del c
class SoupParser(object):
"""
=========================
(X)HTML Tag Soup Parser
=========================
Overview
~~~~~~~~
The parser is actually a tagsoup parser by design in order to process
most of the "HTML" that can be found out there. Of course, if the HTML
is well-formed and valid, this would be the best. There is only as
much HTML syntax applied as necessary to parse it. You can influence
these syntax definitions by picking another lexer. You can change
the semantics by picking another dtd query class.
This parser guarantees, that for each not-self-closing starttag event also
an endtag event is generated (if the endtag is not actually there, the
data parameter is an empty string). This also happens for empty tags (like
``br``). On the other hand, there may be more endtag events than starttag
events, because of unbalanced or wrongly nested tags.
Special constructs, which are comments, PIs, marked sections and
declarations may occur anywhere, i.e. they are not closing elements
implicitly.
The default lexer does not deal with NET tags (<h1/Heading/). Neither
does it handle unfinished starttags by SGML rules like ``<map<area>``.
It *does* know about empty tags (``<>`` and ``</>``).
CDATA elements and comments are handled in a simplified way. Once
the particular state is entered, it's only left, when the accompanying
end marker was found (``<script>...</script>``, ``<!-- ... -->``).
Anything in between is text.
How is it used?
~~~~~~~~~~~~~~~
The parser API is "streamy" on the input side and event based on the
output side. So, what you need first is a building listener, which will
receive all generated parser events and process them. Such is listener
object is expected to implement the `BuildingListenerInterface`.
Now you create a `SoupParser` instance and pass the listener object to
the contructor and the parser is ready to be fed. You can feed as many
chunks of input data you like into the parser by using the `feed`
method. Every feed call may generate mutiple events on the output side.
When you're done feeding, call the parser's `finalize` method in order
to clean up. This also flushes pending events to the listener.
:IVariables:
`listener` : `BuildingListenerInterface`
The building listener to send the events to
`lexer` : `SoupLexer`
The lexer instance
`_tagstack` : ``list``
The current tag stack
`_inempty` : ``bool``
indicates if the last tag on the stack is an empty one
`_lastopen` : ``str``
Stores the last seen open tag name
"""
__implements__ = [
_interfaces.ListenerInterface, _interfaces.ParserInterface
]
def __init__(self, listener, dtd, lexer=None):
"""
Initialization
:Parameters:
`listener` : `ListenerInterface`
The building listener
`dtd` : `DTDInterface`
DTD query object
`lexer` : ``callable``
Lexer class/factory. This mus be a callable taking an
event listener and returning a lexer instance. If omitted or
``None``, the default lexer will be used (`DEFAULT_LEXER`).
"""
self._tagstack, self._inempty, self._lastopen = [], False, ''
self.listener = listener
self._is_nestable = dtd.nestable
self._is_cdata = dtd.cdata
self._is_empty = dtd.empty
if lexer is None:
lexer = DEFAULT_LEXER
self._lexer = lexer(self)
self._normalize = listener.decoder.normalize
@classmethod
def html(cls, listener):
"""
Construct a parser using the `HTMLDTD`
:Parameters:
`listener` : `BuildingListenerInterface`
The building listener
:Return: The new parser instance
:Rtype: `SoupParser`
"""
return cls(listener, _dtd.HTMLDTD())
@classmethod
def xml(cls, listener):
"""
Construct a parser using the `XMLDTD`
:Parameters:
`listener` : `ListenerInterface`
The building listener
:Return: The new parser instance
:Rtype: `SoupParser`
"""
return cls(listener, _dtd.XMLDTD())
def _close_empty(self):
""" Ensure we close last empty tag """
if self._inempty:
self._inempty = False
self.listener.handle_endtag(self._tagstack.pop()[1], '')
#########################################################################
# ListenerInterface #####################################################
#########################################################################
def handle_text(self, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_text(data)
def handle_starttag(self, name, attrs, closed, data):
""" :See: `ListenerInterface` """
self._close_empty()
if name == '' and not attrs:
name = self._lastopen
else:
self._lastopen = name
tagstack = self._tagstack
nestable = self._is_nestable
starttag = self._normalize(name)
while tagstack and not nestable(tagstack[-1][0], starttag):
self.listener.handle_endtag(tagstack.pop()[1], '')
if closed:
self.listener.handle_starttag(name, attrs, closed, data)
else:
if self._is_cdata(starttag):
self._lexer.cdata(self._normalize, starttag)
self.listener.handle_starttag(name, attrs, closed, data)
tagstack.append((starttag, name))
if self._is_empty(starttag):
self._inempty = True
def handle_endtag(self, name, data):
""" :See: `ListenerInterface` """
tagstack = self._tagstack
if tagstack:
if name == '':
name = tagstack[-1][1]
endtag = self._normalize(name)
if endtag in dict(tagstack):
toclose, original = tagstack.pop()
self._inempty = False
while toclose != endtag:
self.listener.handle_endtag(original, '')
toclose, original = tagstack.pop()
self._close_empty()
self.listener.handle_endtag(name, data)
def handle_comment(self, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_comment(data)
def handle_msection(self, name, value, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_msection(name, value, data)
def handle_decl(self, name, value, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_decl(name, value, data)
def handle_pi(self, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_pi(data)
def handle_escape(self, escaped, data):
""" :See: `ListenerInterface` """
# pylint: disable = unused-argument
raise AssertionError()
#########################################################################
# ParserInterface #######################################################
#########################################################################
def feed(self, food):
""" :See: `ParserInterface` """
self._lexer.feed(food)
def finalize(self):
"""
:See: `ParserInterface`
:Exceptions:
- `LexerEOFError` : EOF in the middle of a state
"""
if self._lexer is not None:
self._lexer, _ = None, self._lexer.finalize() # noqa
tagstack = self._tagstack
while tagstack:
self.listener.handle_endtag(tagstack.pop()[1], '')
from ... import c
c = c.load('impl')
if c is not None:
DEFAULT_PARSER = c.SoupParser
else:
DEFAULT_PARSER = SoupParser # pylint: disable = invalid-name
del c
|
The price you found is 36% lower than this hotel's average rate of $171/night.
Great location, friendly and helpful staff, hotel is clean and comfortable, nice pool and hot tub, good breakfast, plenty of parking. Large common area was perfect for our extended family to gather in the evening.
Thank you for sharing your feedback with us and the Trip Advisor community. We are glad you enjoyed your overall stay with us, and that you were able to enjoy our lobby with your family to gather. We hope to be able to have you as our guest, should you visit our area soon!
Nice and clean newer hotel. Staff was friendly and helpful. Our room had two queen beds, and was very clean. The breakfast in the hotel lobby was a good free breakfast. We had scramble eggs, bacon, french toast sticks, and oatmeal. Another table was fresh waffles, that you make yourself. A cereal bar with 5 to choose from. A bread bar, with toaster, had bagels and english muffins for example. A fresh fruit station, and a juice bar. So not a big choice, but you could find something if you were hungry. I ate there 3 days in a row. Pool was small but kids had a great time regardless. The hotel lobby was kind of the meeting place for everyone. We all brought our own beer and wine and there was no issues with the staff. We had pizza delivered, along with other groups too, and there was plenty of room for all of us. I would recommend this hotel for groups and also for families visiting the area.
Thank you, Stephen L, for allowing us to host your hockey team while in our area for a tournament. We are always happy to accommodate our groups as best we can, and we’re glad you enjoyed the lobby as a common meeting place with your team. Our staff will appreciate your kind remarks about our hotel and staff and we hope to have you as our guest again soon!
We really enjoyed our recent stay here at the Hyatt in Cape Cod. Great room, appetizers and live music for entertainment. Great pool and jacuzzi. Very friendly and accommodating staff! I would highly recommend this hotel to family and friends.
Thank you for your recent review and for choosing our hotel. We are happy that you enjoyed your stay. We appreciate your positive feedback about our hotel as we do take pride in catering to our guests’ needs the best way we can. We will pass along your positive feedback to our staff so we can continue strive to make you happy. Glad you enjoyed our appetizers, live music and our pool area. If you’re traveling to our area again soon, don’t hesitate to call us we look for into being your host again.
We chose to stay at the Hampton Inn because of its close proximity to an event we attended. The room was clean and quiet and comfortable. The staff members were pleasant and accommodating. We would stay again.
This hotel is clean, staffed by gracious employees who make sure that everything is going well every step of the way. The shuttle bus service is great for those who don't like to drive at night! The proximity to Hyannis restaurants is perfect. I would recommend this hotel to all. I will be going back the next time my business is in that area!
|
#add IokeLexer to __all__ at the top of agile.py like so:
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
'RubyLexer', 'RubyConsoleLexer', 'PerlLexer', 'LuaLexer',
'MiniDLexer', 'IoLexer', 'IokeLexer', 'TclLexer', 'ClojureLexer',
'Python3Lexer', 'Python3TracebackLexer']
#Then insert the following IokeLexer with the other class definitions:
class IokeLexer(RegexLexer):
"""
For `Ioke <http://ioke.org/>`_ (a strongly typed, dynamic,
prototype based programming language) source.
"""
name = 'Ioke'
filenames = ['*.ik']
aliases = ['ioke', 'ik']
mimetypes = ['text/x-iokesrc']
tokens = {
'interpolatableText': [
(r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}|\\[0-3]?[0-7]?[0-7])', String.Escape),
(r'#{', Punctuation, 'textInterpolationRoot')
],
'text': [
(r'(?<!\\)"', String, '#pop'),
include('interpolatableText'),
(r'[^"]', String)
],
'documentation': [
(r'(?<!\\)"', String.Doc, '#pop'),
include('interpolatableText'),
(r'[^"]', String.Doc)
],
'textInterpolationRoot': [
(r'}', Punctuation, '#pop'),
include('root')
],
'slashRegexp': [
(r'(?<!\\)/[oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\/', String.Regex),
(r'[^/]', String.Regex)
],
'squareRegexp': [
(r'(?<!\\)][oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\]', String.Regex),
(r'[^\]]', String.Regex)
],
'squareText': [
(r'(?<!\\)]', String, '#pop'),
include('interpolatableText'),
(r'[^\]]', String)
],
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r';(.*?)\n', Comment),
(r'\A#!(.*?)\n', Comment),
#Regexps
(r'#/', String.Regex, 'slashRegexp'),
(r'#r\[', String.Regex, 'squareRegexp'),
#Symbols
(r':[a-zA-Z0-9_!:?]+', String.Symbol),
(r'[a-zA-Z0-9_!:?]+:(?![a-zA-Z0-9_!?])', String.Other),
(r':"(\\\\|\\"|[^"])*"', String.Symbol),
#Documentation
(r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()|(?<=dsyntax\())[\s\n\r]*"', String.Doc, 'documentation'),
#Text
(r'"', String, 'text'),
(r'#\[', String, 'squareText'),
#Mimic
(r'[a-zA-Z0-9_][a-zA-Z0-9!?_:]+(?=\s*=.*mimic\s)', Name.Entity),
#Assignment
(r'[a-zA-Z_][a-zA-Z0-9_!:?]*(?=[\s]*[+*/-]?=[^=].*($|\.))', Name.Variable),
# keywords
(r'(break|cond|continue|do|ensure|for|for:dict|for:set|if|let|loop|p:for|p:for:dict|p:for:set|return|unless|until|while|with)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# Origin
(r'(eval|mimic|print|println)(?![a-zA-Z0-9!:_?])', Keyword),
# Base
(r'(cell\?|cellNames|cellOwner\?|cellOwner|cells|cell|documentation|hash|identity|mimic|removeCell\!|undefineCell\!)(?![a-zA-Z0-9!:_?])', Keyword),
# Ground
(r'(stackTraceAsText)(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehaviour Literals
(r'(dict|list|message|set)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Case
(r'(case|case:and|case:else|case:nand|case:nor|case:not|case:or|case:otherwise|case:xor)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Reflection
(r'(asText|become\!|derive|freeze\!|frozen\?|in\?|is\?|kind\?|mimic\!|mimics|mimics\?|prependMimic\!|removeAllMimics\!|removeMimic\!|same\?|send|thaw\!|uniqueHexId)(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehaviour Aspects
(r'(after|around|before)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# DefaultBehaviour
(r'(kind|cellDescriptionDict|cellSummary|genSym|inspect|notice)(?![a-zA-Z0-9!:_?])', Keyword),
(r'(use|destructuring)', Keyword.Reserved),
#DefaultBehavior BaseBehavior
(r'(cell\?|cellOwner\?|cellOwner|cellNames|cells|cell|documentation|identity|removeCell!|undefineCell)(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehavior Internal
(r'(internal:compositeRegexp|internal:concatenateText|internal:createDecimal|internal:createNumber|internal:createRegexp|internal:createText)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Conditions
(r'(availableRestarts|bind|error\!|findRestart|handle|invokeRestart|rescue|restart|signal\!|warn\!)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# constants
(r'(nil|false|true)(?![a-zA-Z0-9!:_?])', Name.Constant),
# names
(r'(Arity|Base|Call|Condition|DateTime|Aspects|Pointcut|Assignment|BaseBehavior|Boolean|Case|AndCombiner|Else|NAndCombiner|NOrCombiner|NotCombiner|OrCombiner|XOrCombiner|Conditions|Definitions|FlowControl|Internal|Literals|Reflection|DefaultMacro|DefaultMethod|DefaultSyntax|Dict|FileSystem|Ground|Handler|Hook|IO|IokeGround|Struct|LexicalBlock|LexicalMacro|List|Message|Method|Mixins|NativeMethod|Number|Origin|Pair|Range|Reflector|Regexp|Regexp Match|Rescue|Restart|Runtime|Sequence|Set|Symbol|System|Text|Tuple)(?![a-zA-Z0-9!:_?])', Name.Builtin),
# functions
(ur'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)(?![a-zA-Z0-9!:_?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
(r'-?(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'-?\d+', Number.Integer),
(r'#\(', Punctuation),
# Operators
(ur'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![a-zA-Z0-9_!?])', Operator),
# Punctuation
(r'(\`\`|\`|\'\'|\'|\.|\,|@|@@|\[|\]|\(|\)|{|})', Punctuation),
#kinds
(r'[A-Z][a-zA-Z0-9_!:?]*', Name.Class),
#default cellnames
(r'[a-z_][a-zA-Z0-9_!:?]*', Name)
]
}
|
As a Job Shop Manufacturer, your needs are very unique. You are catering to a wide variety of customers with specific requirements and need to ensure that the customer is always happy, thus generating repeat business. You also need to ensure that you profit on every job, securing your success.
Typically each job is different from previous jobs. As such, estimates for the materials and labor need to be accurate and the manufacturing needs to be done to exacting standards.
Discrete Manufacturing or Job Shop Manufacturing is concerned with discrete units, bills of materials and the assembly of components, and can be contrasted with Process manufacturing which is the branch of manufacturing that is associated with formulas and manufacturing recipes.
HOW CAN ONEIR SOLUTIONS HELP MY JOB SHOP MANUFACTURING COMPANY?
Accurately estimating jobs is key!
You need to know the actual cost of each job as it happens. Without this knowledge, you are potentially losing precious profits. The success of each job is dependent on many factors, specifically detailed labor reporting, accurate inventory tracking, knowing which contractors are efficient at which jobs and providing accurate estimates to your customers from the start.
With Oneir Solutions, you can calculate the actual cost of each job, including materials, what type and how much labor, thus reducing extra surprise costs. Track your inventory in real-time as the production workers take the materials they require.
Accurately estimating specific jobs is key. With Oneir Solutions, you can easily and quickly turn these quotes into orders and expedite as required. You may easily see estimated vs. actual costs, securing your continued success in the industry.
Your inventory may not be vast, but you must ensure that each step of the manufacturing process runs as smoothly as possible. You have purchasing of special products and services that have to be monitored and finally charge to the job as they are received. Track and report on orders placed with your vendors to ensure timely delivery every time.
Oneir Solutions can help with each step of the manufacturing process. From order taking with Sales Order to placing and tracking vendor orders with Purchase Order to tracking active jobs with Job Costing and finally, delivering to and invoicing the customer.
Oneir Solutions will help you accurately estimate your jobs each and every time. Stop losing customers and jobs by over-pricing or losing money by under-pricing!
And if you have service trucks on-the-road, Oneir Solutions has the ability to monitor their performance, schedule jobs and track their on-board inventory, thus ensuring that each and every job is monitored and invoiced.
If you are a job shop manufacturer that is “bursting at the seams” and is expanding into more locations, Oneir Solutions is designed to make your dreams reality.
|
#!/bin/env/python3
from pathlib import Path
CARGO_CONF = (
('name', '"programming-challenges"'),
('version', '"0.1.0"'),
('authors', '["Alexey Golubev <dr.freecx@gmail.com>"]')
)
CARGO_DEPS = (('lazy_static', '"*"'), ('time', '"*"'))
CARGO_EXTRA = (
('sdl2', {
'version': '"*"',
'default-features': 'false',
'features': ['ttf']
}),
)
def extra(extra_list):
r = ''
fx = lambda x: f'"{x}"'
for item in extra_list:
name, extra = item
r += f'[dependencies.{name}]\n'
for k, v in extra.items():
r += f'{k} = [' + ','.join(map(fx, v)) + ']' if isinstance(k, list) else f'{k} = {v}\n'
return r
if __name__ == '__main__':
binary = lambda f: f'[[bin]]\nname = "{f.stem}"\npath = "{f}"\n'
config = lambda d: '\n'.join(map(lambda l: '{} = {}'.format(*l), d))
split = lambda f: f.suffix == '.rs'
binaries = sorted(map(binary, filter(split, Path('./src').iterdir())))
with open('Cargo.toml', 'w') as f:
f.write('[package]\n{}\n'.format(config(CARGO_CONF)))
f.write('\n{}\n'.format('\n'.join(binaries)))
f.write('[dependencies]\n{}'.format(config(CARGO_DEPS)))
f.write('\n\n{}'.format(extra(CARGO_EXTRA)))
|
It can be surprising how quickly a fire can spread. However, facilities equipped with a properly installed and well-maintained fire sprinkler system carry a huge advantage. A quick response can drastically slow a fire's progress, thus reducing the damage, until firefighters arrive.
|
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._lorentzian.
Lorentzian distance
"""
from math import log1p
from typing import Any, Optional
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Lorentzian']
class Lorentzian(_TokenDistance):
r"""Lorentzian distance.
For two multisets X and Y drawn from an alphabet S, Lorentzian distance is
.. math::
dist_{Lorentzian}(X, Y) =
\sum_{i \in S} log(1 + |A_i - B_i|)
Notes
-----
No primary source for this measure could be located, but it is included
in surveys and catalogues, such as :cite:`Deza:2016` and :cite:`Cha:2008`.
.. versionadded:: 0.4.0
"""
def __init__(
self, tokenizer: Optional[_Tokenizer] = None, **kwargs: Any
) -> None:
"""Initialize Lorentzian instance.
Parameters
----------
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
.. versionadded:: 0.4.0
"""
super(Lorentzian, self).__init__(tokenizer=tokenizer, **kwargs)
def dist_abs(self, src: str, tar: str) -> float:
"""Return the Lorentzian distance of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Lorentzian distance
Examples
--------
>>> cmp = Lorentzian()
>>> cmp.dist_abs('cat', 'hat')
2.772588722239781
>>> cmp.dist_abs('Niall', 'Neil')
4.852030263919617
>>> cmp.dist_abs('aluminum', 'Catalan')
10.1095256359474
>>> cmp.dist_abs('ATCG', 'TAGC')
6.931471805599453
.. versionadded:: 0.4.0
"""
self._tokenize(src, tar)
alphabet = self._total().keys()
return sum(
log1p(abs(self._src_tokens[tok] - self._tar_tokens[tok]))
for tok in alphabet
)
def dist(self, src: str, tar: str) -> float:
"""Return the normalized Lorentzian distance of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Normalized Lorentzian distance
Examples
--------
>>> cmp = Lorentzian()
>>> cmp.dist('cat', 'hat')
0.6666666666666667
>>> cmp.dist('Niall', 'Neil')
0.7777777777777778
>>> cmp.dist('aluminum', 'Catalan')
0.9358355851062377
>>> cmp.dist('ATCG', 'TAGC')
1.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 0.0
elif not src or not tar:
return 1.0
score = self.dist_abs(src, tar)
alphabet = self._total().keys()
return score / sum(
log1p(max(self._src_tokens[tok], self._tar_tokens[tok]))
for tok in alphabet
)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
I did not have a successful nights sleep. After falling asleep at 10pm, I woke at 2.30am and that was it. I could not sleep. When it came to 6am and time to get up, I had been solidly awake for the past 3.5 hours. How annoying.
The guesthouse had lots of free food, so I made some peanut butter sandwiches to take with me for breakfast, with some kiwi. Then I headed off back on the metro.
One thing I had not noticed yesterday were all the emergency boxes everywhere, filled with gas masks. Not quite sure what emergency they are planning for. But these boxes were everywhere!
And even at 6.30am, on a Saturday, the tube was surprisingly busy. I was only going a couple of stops along.
But once I was back outside, the streets were deserted. It was just getting light and it was cold. This street was full of small 2-3 storey grey buildings and it looked so similar to the Beijing hutongs.
I was headed to the USO building. This is a military services organisation and they run tours to the North Korean border. Starting at 7am…! But I arrived in plenty of time.
It was about an hours drive to reach Camp Bonifas – the U.S. Military base near the border. Here our passports were checked and we were given a briefing of the area by an army man. There were so many rules about what you cannot do I was a bit lost.
North Korea and South Korea are technically still at war. There was a truce agreement entered into in 1953 where the border, known as the Military demarcation line was decided upon. The line is marked by white markers, about 1m high and spaced about 10m apart). A 2 km section of land on each side of the line (so 4km in total) was marked on each side of the line, and this whole area is known as the demilitarised zone (DMZ). This stretches the full width of the country. We were loaded onto a bus and driven into the DMZ. We passed a couple of anti-tank sections on the road. Apparently these can be exploded and slow down tanks. And passed by several watch towers which are used to watch the border.
We entered the Freedom House which sits meters from the border. At the back of this building is a road called Conference Road and here the border is marked out by a small stone line. Several buildings across over the line and are used for conferences.
The representatives from the north sit still in North Korea and from the south, in South Korea. There is a table which runs across the border, so that they can remain in their country. We were told to stand in a row along the back of Freedom house. Everyone was too scared to move – the army guys were scary and there were so many rules, we weren’t sure what to do. No photos to the left, right or behind, only infront. No pointing, gesturing, smiling, etc towards North Korea. And no stepping out of line. Several South Korean soldiers were standing next to the buildings on the south side. There was one North Korean solider standing on the North Korean building on the north side. It was a bit strange seeing someone from North Korea. We went inside the main conference building. Again in here were 2 South Korean soldiers.
We were able to cross over into North Korea inside this building and stand on the north side. Back on the bus and we drove over to a lookout point. Again we were right on the border. But this time with a great view across the land – and properly into North Korea. Both North and South Korea maintain peace villages.
In South Korea, this is Daeseong-dong. The people who live here receive large areas of land to farm on, earn c$80k tax free (which is a lot for South Korea) and receive various other benefits. In North Korea this is Kijong-dong, and unofficially called propaganda village. The buildings are small converge tower blocks. It is thought that no one lives here. Apparently at night the lights go on and off at certain times and are bright on the top floors and dim on the lower floors – leading people to believe that the buildings are empty shells. Each village has a flag. North Korea built their flag pole taller than South Korea and it holds a 300kg flag!
The whole time we were there the village had a loudspeaker playing – it was playing words (which I didn’t understand) and songs. Apparently these have only just started up again due to the recent tension and it plays day and night.
From the lookout point, we could see the village and the flagpole. It was pretty cool. We drove past a couple of other landmarks. The bridge of no return.
This bridge was used to return members of each country following the war. And the site of the 1976 axe murders – whilst cutting down a tree, two American soldiers were killed by North Korean soldiers. The DMZ is a large nature reserve. There are various rare species which live here as well as some animals which only live here. We saw a water deer – also known as a vampire deer. We then drove back to the army base. Here there was a small shop selling various souvenirs, including North Korean money (of course I got some…!).
Next up – a train station at Dorosan. This train station is the final stop before North Korea. It is hoped that trains will run across the border and the station already has signs for trains to Pyongyang ready. But for now it’s just a tourist attraction. For lunch we stopped at a canteen style cafe.
They served Bibimbap which was in pre-made bowls (with carrot, spinach, mushroom and seaweed) then you added your own rice. Plus there was cabbage salad and slices of orange. It was ok – as exciting as vegetables and rice is. I’m not a massive fan of the chilli sauce they use. Where is the sriracha?!
After the truce was decided in 1953, North Korea still dug tunnels beneath the DMZ to try to enter South Korea. These tunnels were apparently designed for a surprise attack on Seoul from North Korea, and could accommodate 30,000 men per hour along with light weapons. Four such tunnels have been found and it is believed there could be 20. When found North Korea claimed these to be coal mines. The third tunnel is now open to tourists. The third tunnel is 76m underground. To get down to it, you have to walk down a long South Korean made wide concrete tunnel. At the end of the concrete tunnel is the beginning of the third tunnel. This tunnel is 1,635 metres long, 1.95m high at its maximum point and 2.1m wide.
You could walk some way along it, until the border point – here it is blocked off with barricades. There are holes in the rock where dynamite could be placed. I thought it was going to be very claustrophobic, but actually it wasn’t too bad. The last stop was another lookout point. This one wasn’t as good as the one right by the border. Whilst you could see far, as we were a few kilometres away from the border, it was not as clear. Then we headed back to the city.
We got back about 3pm and I jumped on the metro to Itaewon and met my friend. The streets here are full of trendy shops and cafes. We stopped off for cake at one of the vegan cafes – Plant.
We shared a salad (leaves, black rice, pumpkin and vegan cheese) it was so good! And some cakes – peanut butter and chocolate. And red velvet with strawberry. Wow!! So delicious! And I was so full.
From here we wandered around a bit more, then caught a bus into Hannam-dong. This is a largely residential area, with some embassies. But there is a random museum dotted in the middle which has a light exhibition on. My friend had some free tickets, so we went to have a look. There was a really long queue. Largely girls of a similar age to us. Like a selfie brigade. They were all dressed up, lots of make up, all armed with a smart phone and busy taking photos of themselves and each other. This continued inside (once we eventually got in)! It was cool.
There were 9 rooms, each with a different light design. A couple were a bit boring. But the others were all interactive and you could play with the light.
From here, back on the bus and to another trendy street in a different part of Itaewon. From here there was a great view of Mountain Namsan with the Seoul Tower on top. We were still full from the cake earlier, so rather than have dinner, we stopped off at a traditional rice wine (makgeolli) bar.
This fermented drink comes in white bottles which look like bleach bottles. It is a milky colour and slightly fizzy. We got a selection of different ones and they came presented so nicely in different coloured small cups, on a wooden platter. With free rice crackers!
They all tasted slightly differently, some sweeter and some fizzier. It was actually ok (i was expecting it to taste rather horrible!). We also ordered some cooked vegetables, which came as a mini pancake, all cooked and stuck together in a rice batter. Was really good. But didn’t manage to finish it all!!
It was about 8pm by now, so we caught the bus heading back to Dongdaemun, but got off early in Myeong-dong to have a wander around. This is shopping central. There was everything. All lit up. And crowds and crowds of people. Food stalls lined every street. Each stall with something different – meat on sticks, pancakes, fish-shaped red bean filled desserts (I used to love those), dried persimmon (but so expensive), syrup covered sweet potato on sticks (so good), cups of fruit, octopus. Mainly just meat things.
We went into a large chemist shop (the local Boots equivalent). The main thing here – face masks. Koreans are obsessed. So much that the first 4 aisles of the shop was just face masks. And not the face masks we get at home. These are face-shaped wet-wipe style things that you put over your face and leave for 20 minutes. And they aren’t that cheap either – at about £1 each. But they use them every day. So I bought a few to try out! From here I caught the metro back to my guesthouse and got back about 9pm. What a long day!
|
#!/usr/bin/env python
#
# Class 4
# Exercise 1
# Author Mateusz Blaszczyk
""" I. Prompt a user to input an IP address. Re-using some of the
code from class3, exercise4--determine if the IP address is valid.
Continue prompting the user to re-input an IP address until a valid IP
address is input. """
# initialize ip_addr
ip_addr = ''
while True:
# initialize the valid flag
valid = True
# prompt user for IP addres
ip_addr = raw_input ("Please enter a valid IP address: ")
octets = ip_addr.split(".")
if len(octets) != 4:
valid = False
else:
a,b,c,d = octets
if int(a) < 1 or int(a) > 223 or int(a) == 127:
valid = False
elif ( (int(b) not in range (0, 256)) or
(int(c) not in range (0, 256)) or
(int(d) not in range (0, 256))) :
valid = False
elif int(a) == 169 and int(b) == 254:
valid = False
if valid:
# break if valid, otherwise rinse and repeat
print "[%s] is valid!" % ip_addr
break
else:
print "[%s] is not valid!" % ip_addr
|
CRW is pleased to announce its most recent service line of Geotechnical Engineering!
Steven Halcomb, PE, GE (licensed in Alaska and California) brings a wealth of experience and knowledge to our newest department. With both a Bachelor’s and Master’s in Civil Engineering, graduate studies in Earthquake and Arctic Engineering and ongoing Ph.D. coursework on the topic at Kansas State University, Steve is a Geotechnical guru! Steven brings expertise from working throughout Alaska during his 11 year career. Having performing geotechnical explorations from southeast, to the North Slope, the Aleutian chain, and right here in Anchorage. He oversees projects both large and small to ensure geotechnical standards are met and clients are provided with the information they need to advance their projects from concept to completion.
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
""" This module contains the Promise class """
from threading import Event
class Promise(object):
"""A simple Promise implementation for the run_async decorator"""
def __init__(self, pooled_function, args, kwargs):
self.pooled_function = pooled_function
self.args = args
self.kwargs = kwargs
self.done = Event()
self._result = None
def run(self):
try:
self._result = self.pooled_function(*self.args, **self.kwargs)
except:
raise
finally:
self.done.set()
def result(self, timeout=None):
self.done.wait(timeout=timeout)
return self._result
|
http://www.jewishworldreview.com -- SUDDENLY, THIS PRESIDENTIAL CAMPAIGN is larger than Bill Clinton's mini-presidency, which has reached the 89th of its 96 months without seriously attempting to meet the principal domestic challenge of its era, entitlement reform. Suddenly, Social Security, the biggest and most popular program of big government--the program that calls the country's bluff, proving that although Americans praise Jeffersonian minimalism in government, they demand a large, active, ameliorative Hamiltonian government--is the great issue in this election.
George W. Bush made it so with his decision to propose allowing Americans to devote a small portion, perhaps one-sixth, of their payroll taxes to personal retirement accounts invested in some well-established stock and bond funds. Because the crisis of the system, although predictable, is not imminent, dealing with Social Security's future just now is optional and potentially hazardous, politically. Therefore Bush is demonstrating leadership. He also is plucking up a banner from some Democratic senators.
It will be many years (how many depends on the economy's performance) before the imbalance between Social Security revenues (produced by a declining ratio of workers to retirees) and promised benefits becomes acute. There can be three ingredients to a solution--cutting benefits, raising payroll taxes (close to 80 percent of Americans already pay more in payroll taxes than in income taxes) and causing retirement resources to grow faster than they do under the current Social Security system.
This third is the core of Bush's solution. Al Gore's proposal is to increase benefits (for working mothers who take time off to raise children) in the near term, and in the longer term--here Gore's plan is a more radical departure from Social Security's premises than Bush's is--to turn Social Security into a welfare program by large infusions of general revenues.
Gore says Bush's plan would put retirement resources horribly at risk. Gore compares participation in the stock market to rolling "dice" or playing "roulette" in a "casino." His rhetoric radiates fear and incomprehension of one of America's central economic institutions, by which capital is raised and allocated to productive uses.
Gore's rhetoric has about it the ring of the '90s--the 1890s, when Wall Street was feared and demonized. Today polls show that the only age cohort opposed to some modest diversion of payroll taxes to personal retirement accounts is Americans over 70. And among voters under 35, there is support of landslide proportions. If Gore does not understand why, he evidently missed the 1990s. Writing in National Review, Ramesh Ponnuru notes that in 1989, a high priority of the new Republican president, George Bush, was a cut in the capital gains tax. He could not get it. In 1997, a Democratic president signed a capital gains tax cut. What had changed? A number of things, but most important was the process that produced this number: Between 1989 and 1997, more than 25 million additional Americans became owners of stocks.
Democracies make difficult decisions under the lash of necessity. Americans radically changed the role of the central government in the 1930s not because they read John Maynard Keynes's "The General Theory of Employment, Interest, and Money," but because a large event--the Great Depression--frightened them. Sixty years ago this month, the British, having consigned Churchill to the wilderness for years, brought him to power only because German forces were rampant across the English Channel. In the 1960s, America came to grips with its racial problem because it had to--as a result of civil, and uncivil, disobedience, it could no longer have a comfortable conscience or domestic tranquility.
No comparable exigency compelled Bush to put Social Security reform at the center of the campaign. But, then, considerations of equity are a kind of exigency. Personal retirement accounts would be sound social policy even if Social Security's condition were not actuarially ominous. Such accounts are implements whereby people of modest means can, over a 45-year working life (about average), with the help of what Keynes called "the magic of compound interest," build wealth.
Two years ago two Democratic senators singularly informed about Social Security, New York's Pat Moynihan and Nebraska's Bob Kerrey, proposed that employees be allowed to put approximately one-sixth their payroll taxes into personal savings accounts. According to Moynihan, a worker who spent 45 years at Bethlehem Steel "could easily find himself with an estate of half a million dollars."
This speaks volumes about the recent transformation of American politics: It is the Republican presidential candidate, a conservative, not the Democratic candidate with labor's backing, who is embracing Social Security, the central program of the welfare state, as a vehicle for advancing an egalitarian agenda.
|
# -*- coding: utf-8 -*-
from project import helpers
import resource
from django.utils.translation import get_language
def create(request):
data = request.DATA
if data is False:
return {'code': 'no_data'}, 404, False
data = helpers.set_null_values_if_not_exist(data, resource.get_fields())
if data['name'] is None:
return {'code': 'project/no_name'}, 404, False
if data['title_%s' % get_language()] is None:
return {'code': 'project/no_title'}, 404, False
if data['description_%s' % get_language()] is None:
return {'code': 'project/no_description'}, 404, False
user = helpers.get_user(request)
if not user or not request.user.is_superuser:
return {'code': 'no_access'}, 404, False
if user is None:
return {'code': 'account/not_active'}, 404, False
data, code, item = resource.get_item_by_name(request, data['name'])
if item is not False:
return {'code': 'project/exists', 'values': [data['name']]}, 404, False
return {'code': 'ok'}, 200, True
def update(request, project_id):
"""Update record"""
data = request.DATA
if data is False:
return {'code': 'no_data'}, 404, False
data = helpers.set_null_values_if_not_exist(data, resource.get_fields())
if data['name'] is None:
return {'code': 'project/no_name'}, 404, False
if data['title_%s' % get_language()] is None:
return {'code': 'project/no_title'}, 404, False
if data['description_%s' % get_language()] is None:
return {'code': 'project/no_description'}, 404, False
user = helpers.get_user(request)
if not user or not request.user.is_superuser:
return {'code': 'no_access'}, 404, False
if user is None:
return {'code': 'account/not_active'}, 404, False
data, code, item = resource.get_item_by_name(request, data['name'])
if (item is not False) and (int(item.id) != int(project_id)):
return {'code': 'project/exists', 'values': [data['text']]}, 404, False
return {'code': 'ok'}, 200, True
def delete(request):
"""Update record"""
data = request.DATA
if data is False:
return {'code': 'no_data'}, 404, False
user = helpers.get_user(request)
if not user or not request.user.is_superuser:
return {'code': 'no_access'}, 404, False
if user is None:
return {'code': 'account/not_active'}, 404, False
return {'code': 'ok'}, 200, True
|
If you think you have lead paint or failing material that may contain asbestos in your home, call in the pros before renovating.
No matter how well built, older houses are likely to contain environmental hazards. The EPA estimates that 87 percent of homes built before 1940 contain lead paint. Asbestos is ubiquitous in 20th-century building components, from wall and pipe insulation to vinyl-asbestos tile.
A toxin that affects the nervous system, lead was a key component in house paint before its use was banned in 1978. While asbestos has been banned in many common building materials, it’s still permitted in others, including cement roofing and siding shingles, roof coatings and felt, and some types of vinyl floor tile.
Left undisturbed, the lead in paint and the asbestos in building components are not hazardous. When a renovation project affects these materials, though, toxins can be released into the environment, posing a danger to both residents and the construction crew. That’s why jurisdictions from the federal to the local level now regulate the removal of materials that contain lead and asbestos.
When lead-based paint is disturbed by chipping, scraping, sanding, or high-heat removal, it releases particulates. Just a small amount of lead-contaminated dust—less than 1⁄16 of the amount in a sugar packet—can cause nerve and brain damage in children. In adults, lead exposure can lead to hypertension, a major risk factor for heart disease.
Asbestos is most dangerous when it’s deteriorating or disturbed, releasing fibers into the air. If the material is in good condition—say, as heating duct insulation inside walls—it’s better to leave it alone, says the U.S. Consumer Product Safety Commission. Removing it may create a health hazard where none existed.
When an asbestos-containing material is damaged, it becomes friable, releasing microscopic asbestos fibers into the air, where they can be inhaled or swallowed. Exposure can cause lung cancer, and also a rare related cancer called mesothelioma.
Usually associated with industrial exposure, mesothelioma is incurable and has a latency period of 20 to 50 years. People who work on old houses are at high risk, too. Even short-term exposure, as might occur when prying up old vinyl floor tiles without wearing a respirator, has been known to cause mesothelioma.
For lead: In 2010, the Environmental Protection Agency passed a rule requiring that commercial contractors use only low-temperature heating devices and vacuum-attached power tools to mechanically strip paint suspected of containing lead.
Contractors are required to use containment methods to keep paint and debris from spreading beyond the work area.
For asbestos: If you suspect you have deteriorating asbestos in your home, don’t try to remove it yourself, even to take a sample. Contact an accredited asbestos inspector to assess and test it. If asbestos is found in an area under renovation, many jurisdictions issue stop-work orders until the material is removed. The work should be done by a certified asbestos remediator (usually a different party from the inspector, for conflicts-of-interest reasons). Asbestos remediation is regulated at the state level: look for licensed pros online at your state’s government site (Google state name and “asbestos regulations”).
Again, there’s usually no need to disturb sound materials that contain asbestos. And instead of ripping up old vinyl tile, cover it with new flooring instead.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.