text
stringlengths 29
850k
|
|---|
from __future__ import absolute_import
import json
import logging
import sys
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.mysql import TINYINT
from .base import BetterBase, session_scope
from .drop import Drop
from .log import Log
from .prize import Prize
QUEST_PRIZE_TYPE = 4
class Quest(BetterBase):
__tablename__ = 'quest'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(String(length=64), nullable=False)
description = Column(String(length=1024), nullable=False)
achieve_cond_description = Column(String(length=128), nullable=False)
achieve_type = Column(TINYINT, nullable=False)
achieve_type_name = Column(String(length=16), nullable=False)
hint_title = Column(String(length=128), nullable=False)
hint_msg = Column(String(length=512), nullable=False)
frontend_columns = (
('title', 'Title'),
('description', 'Description'),
)
@property
def name(self):
return self.title
def generate_main_panels(self):
self._main_panels = [
{
'title': self.title,
'body': self.description,
},
{
'title': 'Prizes',
'items': self.prizes,
},
]
def __init__(self, **kwargs):
self.description = kwargs['description'].encode(
sys.stdout.encoding, error='ignore')
for i in (
'can_challenge',
'disp_number',
'is_achieved',
'is_completed',
'is_new',
'is_tutorial',
'prizes',
'order_cond_description',
'is_special',
'is_challenging',
# This is the recipe for "Create Ability" quests
'ability_recipes',
'description',
):
if i in kwargs:
del(kwargs[i])
super(Quest, self).__init__(**kwargs)
def __repr__(self):
return self.title
def import_quests(data=None, filepath=''):
'''
/dff/quest/list
'''
logging.debug('{}(filepath="{}") start'.format(
sys._getframe().f_code.co_name, filepath))
if data is None or not isinstance(data, dict):
if not filepath:
raise ValueError('One kwarg of data or filepath is required.')
with open(filepath) as infile:
data = json.load(infile)
if data.get('special_quest_prizes'):
logging.critical('There is a special quest prize!')
success = False
with session_scope() as session:
for quest in data['quests']:
prizes = quest['prizes']
new_quest = session.query(Quest)\
.filter(Quest.id == quest['id']).first()
if new_quest is None:
new_quest = Quest(**quest)
new_log = Log(log='Create {}({})'.format(
type(new_quest).__name__, new_quest))
session.add_all((new_quest, new_log))
session.commit()
for prize in prizes:
id = prize['id']
name = prize['name']
drop = session.query(Drop).filter(
Drop.id == id).first()
if drop is None:
drop = Drop(id=id, name=name)
old_prize = session.query(Prize).filter(
Prize.drop_id == id,
Prize.prize_type == QUEST_PRIZE_TYPE,
Prize.quest == new_quest).first()
if old_prize is not None:
continue
prize['drop_id'] = id
prize['prize_type'] = QUEST_PRIZE_TYPE
new_prize = Prize(**prize)
new_prize.drop = drop
new_prize.quest = new_quest
#session.add(new_prize)
#session.flush()
new_log = Log(log='Create {}({}) from {}({})'.format(
type(new_prize).__name__, new_prize,
type(new_quest).__name__, new_quest))
#session.add(new_log)
session.add_all((new_prize, new_log))
session.commit()
success = True
logging.debug('{}(filepath="{}") end'.format(
sys._getframe().f_code.co_name, filepath))
return success
### EOF ###
|
Bayley C W 4479 Private QSA (4).
Bayley F J 4175 Private QSA (5). Wounded, Koodoosberg Drift, 07 Feb 00.
Bayley Frederick G Lieutenant QSA (5).
Bayley H 5809 Private QSA (3).
|
# -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import fields, models
class MrpBomDetailHistory(models.Model):
_name = 'mrp.bom.detail.history'
_description = 'Mrp Bom Detail History'
product_master_id = fields.Many2one(
'product.product',
string='Product Master',
)
prev_product_detail_id = fields.Many2one(
'product.product',
string='Previous product detail',
)
upd_product_detail_id = fields.Many2one(
'product.product',
string='Updated product detail',
)
user_id = fields.Many2one(
'res.users',
string='User',
)
action = fields.Selection(
[('create', 'Create'),
('update', 'Update'),
('delete', 'Delete')],
string="Action",
)
action_date = fields.Datetime(
string="Date",
)
prev_qty = fields.Float(
'Previous quantity',
)
upd_qty = fields.Float(
'Updated quantity',
)
prev_cost = fields.Float(
'Previous cost',
)
upd_cost = fields.Float(
'Updated cost',
)
deference = fields.Float(
'Deference',
)
|
Join us as we kick of the American Farm Bureau Photo Contest! The contest is open to all state and county Farm Bureau members and staff over the age of 18 at the time of entry, including professional photographers.
We are holding this contest to obtain usable and appropriate photos that accurately portray today’s agriculture and safe practices of farmers and ranchers for future publications and promotions. All photos submitted must exemplify safe practices on the farm or ranch.
Three photos will be selected from each of the four categories. The winners will receive a cash prize and be featured on our websites and social media.
Open to residents of the USA.
Participants retain ownership of the copyright in any submitted photographs, and a photo credit will be given to the photographer in any use whenever practical.
|
#!/usr/bin/env python3
####
### Project: Pyfuzz
### Version: 1.1.0
### Creator: Ayoob Ali ( www.AyoobAli.com )
### License: MIT
###
import http.client
import sys
import os
from optparse import OptionParser
import string
import signal
import ssl
from time import sleep
import random
import subprocess
logFile = ""
def signal_handler(signal, frame):
print("\nScan stopped by user.")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def printMSG(printM):
print(printM)
if logFile != "":
fhandle = open(logFile, "a")
fhandle.write(printM + "\n")
fhandle.close()
def cmd(command = None):
returnArr = {}
returnArr.update({"returnCode": 99})
try:
if command == None or command == "":
return returnArr
stdout = ""
stderr = ""
reCode = subprocess.Popen(command,shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stData = reCode.communicate()
returnArr.update({"stdout": stData[0].decode("utf-8")})
returnArr.update({"stderr": stData[1].decode("utf-8")})
returnArr.update({"returnCode": reCode.returncode})
reCode.terminate()
return returnArr
except Exception as ErrMs:
returnArr.update({"error": ErrMs})
return returnArr
def main():
global logFile
parser = OptionParser(usage="%prog -u http://example.com/en/ -l sharepoint.txt", version="%prog 1.1.0")
parser.add_option("-u", "--url", dest="targetURL", metavar="URL", help="Target URL to scan")
parser.add_option("-l", "--list", dest="listFile", metavar="FILE", help="List of paths to scan")
parser.add_option("-r", "--redirect", action="store_true", dest="showRedirect", help="Show redirect codes (3xx)")
parser.add_option("-e", "--error", action="store_true", dest="showError", help="Show Error codes (5xx)")
parser.add_option("-s", "--sleep", dest="milliseconds", type="int", metavar="NUMBER", help="Sleep for x milliseconds after each request")
parser.add_option("-a", "--header", action="append", dest="headers", help="Add Header to the HTTP request (Ex.: -a User-Agent x)", metavar='HEADER VALUE', nargs=2)
parser.add_option("-b", "--body", dest="requestBody", metavar="Body", help="Request Body (Ex.: name=val&name2=val2)")
parser.add_option("-x", "--method", dest="requestMethod", metavar="[Method]", help="HTTP Request Method (Ex.: GET, POST, PUT, etc...)")
parser.add_option("-i", "--ignore", action="append", dest="ignoreText", metavar="Text", help="Ignore results that contain a specific string")
parser.add_option("-m", "--min-response-size", dest="dataLength", type="int", metavar="NUMBER", help="The minimum response body size in Byte")
parser.add_option("-g", "--log", dest="logFile", metavar="FILE", help="Log scan results to a file")
parser.add_option("-f", "--start-from", dest="startFrom", type="int", metavar="NUMBER", help="Start scanning from path number x in the provided list")
parser.add_option("-t", "--timeout", dest="reqTimeout", type="int", metavar="Seconds", help="Set request timeout")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Show error messages")
parser.add_option("-d", "--define-variable", action="append", dest="variables", help="Define variables to be replaced in URL (Ex.: -d '$varExtension' 'php')", metavar='VARIABLE VALUE', nargs=2)
parser.add_option("--cmd", dest="excCMD", metavar="Command", help="Execute shell command on each found results (Use with caution). Available variables ({#CODE#}, {#URL#}, {#SIZE#}, {#BODY#}, and {#REDIRECT#})")
parser.add_option("-p", "--proxy", dest="httpProxy", metavar="PROXY:PORT", help="HTTP Proxy to pass the connection through (Ex.: localhost:9080)")
startFrom = 0
reqTimeout = 15
(options, args) = parser.parse_args()
if options.requestMethod == None:
options.requestMethod = "GET"
if options.requestBody == None:
options.requestBody = ""
if options.dataLength == None:
options.dataLength = 0
requestHeaders = {}
if options.headers == None:
options.headers = []
for header in options.headers:
requestHeaders.update({header[0]: header[1]})
if options.variables == None:
options.variables = []
if options.listFile == None or options.targetURL == None:
parser.print_help()
sys.exit()
if options.logFile != None:
logFile = options.logFile
if options.startFrom != None:
startFrom = options.startFrom
if options.reqTimeout != None:
if options.reqTimeout > 0:
reqTimeout = int(options.reqTimeout)
excCMD = ""
if options.excCMD != None:
excCMD = str(options.excCMD)
isProxy = False
proxyHost = ""
proxyPort = 0
if options.httpProxy != None:
if str(options.httpProxy).find(':') >= 0:
httpProxy = str(options.httpProxy).split(':')
proxyHost = httpProxy[0]
if httpProxy[1].isnumeric() == True:
proxyPort = int(httpProxy[1])
isProxy = True
if proxyPort < 1 or proxyPort > 65535:
printMSG("Error: Port number should be between 1 and 65535.")
sys.exit()
else:
printMSG("Error: Proxy format should be HOSTNAME:PORT")
sys.exit()
if not os.path.isfile(options.listFile):
printMSG("Error: File (" + options.listFile + ") doesn't exist.")
sys.exit()
if options.targetURL[-1] != "/":
options.targetURL += "/"
targetPro = ""
if options.targetURL[:5].lower() == 'https':
targetDomain = options.targetURL[8:].split("/",1)[0].lower()
targetPath = "/" + options.targetURL[8:].split("/",1)[1]
if isProxy == True:
connection = http.client.HTTPSConnection(proxyHost, proxyPort, timeout=reqTimeout, context=ssl._create_unverified_context())
connection.set_tunnel(targetDomain)
else:
connection = http.client.HTTPSConnection(targetDomain, timeout=reqTimeout, context=ssl._create_unverified_context())
targetPro = "https://"
printMSG("Target : " + targetPro+targetDomain + " (over HTTPS)")
printMSG("Path : " + targetPath)
elif options.targetURL[:5].lower() == 'http:':
targetDomain = options.targetURL[7:].split("/",1)[0].lower()
targetPath = "/"+options.targetURL[7:].split("/",1)[1]
if isProxy == True:
connection = http.client.HTTPConnection(proxyHost, proxyPort, timeout=reqTimeout)
connection.set_tunnel(targetDomain)
else:
connection = http.client.HTTPConnection(targetDomain, timeout=reqTimeout)
targetPro = "http://"
printMSG("Target : " + targetDomain)
printMSG("Path : " + targetPath)
else:
targetDomain = options.targetURL.split("/",1)[0].lower()
targetPath = "/"+options.targetURL.split("/",1)[1]
if isProxy == True:
connection = http.client.HTTPConnection(proxyHost, proxyPort, timeout=reqTimeout)
connection.set_tunnel(targetDomain)
else:
connection = http.client.HTTPConnection(targetDomain, timeout=reqTimeout)
targetPro = "http://"
printMSG("Target : " + targetDomain)
printMSG("Path : " + targetPath)
printMSG("Method : " + options.requestMethod)
printMSG("Header : " + str(requestHeaders))
printMSG("Body : " + options.requestBody)
printMSG("Timeout : " + str(reqTimeout))
printMSG("Proxy : " + str(proxyHost) + ":" + str(proxyPort))
if options.showRedirect != None:
printMSG("Show Redirect: ON")
if options.showError != None:
printMSG("Show Error : ON")
try:
randomPage = ''.join([random.choice(string.ascii_lowercase + string.digits) for n in range(16)])
connection.request(options.requestMethod, targetPath+randomPage+".txt", options.requestBody, requestHeaders)
res = connection.getresponse()
except Exception as ErrMs:
if options.verbose != None:
printMSG("MainError: " + str(ErrMs))
sys.exit(0)
if res.status == 200:
printMSG("NOTE: Looks like the server is returning code 200 for all requests, there might be lots of false positive links.")
if res.status >= 300 and res.status < 400 and options.showRedirect != None:
printMSG("NOTE: Looks like the server is returning code " + str(res.status) + " for all requests, there might be lots of false positive links. try to scan without the option -r")
tpData = res.read()
with open(options.listFile) as lFile:
pathList = lFile.readlines()
totalURLs = len(pathList)
printMSG ("Scanning ( " + str(totalURLs) + " ) files...")
countFound = 0
countAll = 0
strLine = ""
for pathLine in pathList:
try:
countAll = countAll + 1
pathLine = pathLine.strip("\n")
pathLine = pathLine.strip("\r")
if countAll < startFrom:
continue
if pathLine != "":
for variable in options.variables:
pathLine = pathLine.replace(variable[0], variable[1])
if pathLine[:1] == "/":
pathLine = pathLine[1:]
print (' ' * len(strLine), "\r", end="")
strLine = "Checking ["+str(countAll)+"/"+str(totalURLs)+"] "+targetPath+pathLine
print (strLine,"\r", end="")
if options.milliseconds != None:
sleep(options.milliseconds/1000)
connection.request(options.requestMethod, targetPath+pathLine, options.requestBody, requestHeaders)
res = connection.getresponse()
resBody = res.read().decode("utf-8")
resBodySize = len(resBody)
isignored = False
if options.ignoreText != None:
for igText in options.ignoreText:
if igText in resBody:
isignored = True
fURL = str(targetPro+targetDomain+targetPath+pathLine)
redirectHead = ""
exCommand = False
if res.getheader("location") != None:
redirectHead = str(res.getheader("location"))
if res.status >= 200 and res.status < 300:
if isignored == False and resBodySize >= options.dataLength:
exCommand = True
print (' ' * len(strLine), "\r", end="")
printMSG("Code " + str(res.status) + " : " + fURL + " (" + str(resBodySize) + " Byte)")
countFound += 1
if options.showError != None:
if res.status >= 500 and res.status < 600:
if isignored == False and resBodySize >= options.dataLength:
exCommand = True
print (' ' * len(strLine), "\r", end="")
printMSG("Code " + str(res.status) + " : " + fURL)
countFound += 1
if options.showRedirect != None:
if res.status >= 300 and res.status < 400:
if isignored == False and resBodySize >= options.dataLength:
exCommand = True
print (' ' * len(strLine), "\r", end="")
printMSG("Code " + str(res.status) + " : " + fURL + " ( " + redirectHead + " )")
countFound += 1
if str(excCMD) != "" and exCommand == True:
cmdStr = str(excCMD)
cmdStr = cmdStr.replace("{#CODE#}", str(res.status))
cmdStr = cmdStr.replace("{#URL#}", fURL)
cmdStr = cmdStr.replace("{#SIZE#}", str(resBodySize))
cmdStr = cmdStr.replace("{#REDIRECT#}", redirectHead)
cmdStr = cmdStr.replace("{#BODY#}", resBody)
cmdRes = cmd(str(cmdStr))
if options.verbose != None and isinstance(cmdRes, dict) and 'stdout' in cmdRes:
printMSG(cmdRes['stdout'])
except Exception as ErrMs:
if options.verbose != None:
print (' ' * len(strLine), "\r", end="")
printMSG("Error[" + str(countAll) + "]: " + str(ErrMs))
try:
connection.close()
pass
except Exception as e:
if options.verbose != None:
printMSG("Error2:" + str(e))
pass
connection.close()
print (' ' * len(strLine), "\r", end="")
printMSG( "Total Pages found: " + str(countFound) )
if __name__ == "__main__":
main()
|
KOLKATA (miningweekly.com) – The Indian government will frame a policy to facilitate the construction of floating storage and regasification unit (FSRU) liquefied natural gas (LNG) terminals at every major port in the country.
The policy will lay down investment structure options for private investors so that there is a shift in focus from land-based LNG storage terminals to offshore terminals, as the construction cost of latter is almost half of land-based terminals, an official familiar with framing the proposed policy said.
He said that FSRU LNG terminals at every major port would merely entail lease of a waterfront wherein offshore storage ships with onboard regasification plants would be moored and linked directly to a gas pipeline network, which would ensure a much more cost efficient way of increasing total LNG storage capacity in the country.
Since FSRU would not entail acquisition of land, gestation periods would be shorter and enable speedier ramping up of total storage capacity available for imported LNG, which was a prerequisite if government’s target of increasing natural gas use by two-and-a-half times by 2030 and to 15% of the country’s energy mix were to be achieved.
Currently, natural gas accounts for 6.5% of India's total energy mix. To increase LNG’s share in the total energy mix to 15%, total LNG storage capacity would need to be increased to 47.5-million tons, almost double the capacity currently available at terminals across the country.
Officials pointed out that while some major ports like Mumbai Port Trust had floated tenders inviting investors to construct FSRU storage facilities on waterfront controlled by the port, there had not been any response - presumably in the absence of a policy framework from government.
Some private investors have announced FSRU LNG storage facilities along India’s eastern coastline, but these are largely outside the governance purview of the central government as there was no policy guidelines at present.
Real estate major, the Hiranandani Group, which operated an FSRU facility in Maharashtra, in the west, has proposed a similar storage facility in West Bengal along the Bay of Bengal coast, while two more were on the anvil along the coast of Andhra Pradesh, at the ports of Kakinada and Krishnapatnam.
|
""" Color data builder objects"""
from logger import log
from explorer import get_spends
from toposort import toposorted
class ColorDataBuilder(object):
pass
class ColorDataBuilderManager(object):
"""Manages multiple color data builders, one per color"""
def __init__(self, colormap, blockchain_state,
cdstore, metastore, builder_class):
self.colormap = colormap
self.metastore = metastore
self.blockchain_state = blockchain_state
self.cdstore = cdstore
self.builders = {}
self.builder_class = builder_class
def get_color_def_map(self, color_id_set):
"""given a set of color_ids <color_id_set>, return
a dict of color_id to color_def.
"""
color_def_map = {}
for color_id in color_id_set:
color_def_map[color_id] = self.colormap.get_color_def(color_id)
return color_def_map
def get_builder(self, color_id):
if color_id in self.builders:
return self.builders[color_id]
colordef = self.colormap.get_color_def(color_id)
builder = self.builder_class(
self.cdstore, self.blockchain_state, colordef, self.metastore)
self.builders[color_id] = builder
return builder
def ensure_scanned_upto(self, color_id_set, blockhash):
""" Ensure color data is available up to a given block"""
for color_id in color_id_set:
if color_id == 0:
continue
builder = self.get_builder(color_id)
builder.ensure_scanned_upto(blockhash)
def scan_txhash(self, color_id_set, txhash):
for color_id in color_id_set:
if color_id == 0:
continue
builder = self.get_builder(color_id)
tx = self.blockchain_state.get_tx(txhash)
builder.scan_tx(tx)
class BasicColorDataBuilder(ColorDataBuilder):
""" Base class for color data builder algorithms"""
def __init__(self, cdstore, blockchain_state, colordef, metastore):
self.cdstore = cdstore
self.blockchain_state = blockchain_state
self.colordef = colordef
self.color_id = colordef.color_id
self.metastore = metastore
def scan_tx(self, tx):
""" Scan transaction to obtain color data for its outputs. """
in_colorvalues = []
empty = True
for inp in tx.inputs:
val = self.cdstore.get(
self.color_id, inp.prevout.hash, inp.prevout.n)
in_colorvalues.append(val)
if val:
empty = False
if empty and not self.colordef.is_special_tx(tx):
return
out_colorvalues = self.colordef.run_kernel(tx, in_colorvalues)
for o_index, val in enumerate(out_colorvalues):
if val:
self.cdstore.add(
self.color_id, tx.hash, o_index, val[0], val[1])
class FullScanColorDataBuilder(BasicColorDataBuilder):
"""Color data builder based on exhaustive blockchain scan,
for one specific color"""
def __init__(self, cdstore, blockchain_state, colordef, metastore):
super(FullScanColorDataBuilder, self).__init__(
cdstore, blockchain_state, colordef, metastore)
self.genesis_blockhash = self.blockchain_state.get_blockhash_at_height(
self.colordef.genesis['height'])
def scan_block(self, blockhash):
log("scan block %s", blockhash)
for tx in self.blockchain_state.iter_block_txs(blockhash):
self.scan_tx(tx)
self.metastore.set_as_scanned(self.color_id, blockhash)
def scan_blockchain(self, blocklist):
with self.cdstore.transaction():
for i, blockhash in enumerate(blocklist):
self.scan_block(blockhash)
if i % 25 == 0: # sync each 25 blocks
self.cdstore.sync()
def ensure_scanned_upto(self, final_blockhash):
if self.metastore.did_scan(self.color_id, final_blockhash):
return
# start from the final_blockhash and go backwards to build up
# the list of blocks to scan
blockhash = final_blockhash
genesis_height = self.blockchain_state.get_block_height(
self.genesis_blockhash)
blocklist = []
while not self.metastore.did_scan(self.color_id, blockhash):
log("recon block %s", blockhash)
blocklist.insert(0, blockhash)
blockhash, height = self.blockchain_state.get_previous_blockinfo(
blockhash)
if blockhash == self.genesis_blockhash:
break
# sanity check
if height < genesis_height:
break
self.scan_blockchain(blocklist)
class AidedColorDataBuilder(FullScanColorDataBuilder):
"""Color data builder based on following output spending transactions
from the color's genesis transaction output, for one specific color"""
def scan_blockchain(self, blocklist):
txo_queue = [self.colordef.genesis]
for blockhash in blocklist:
if self.metastore.did_scan(self.color_id, blockhash):
continue
# remove txs from this block from the queue
block_txo_queue = [txo for txo in txo_queue
if txo['blockhash'] == blockhash]
txo_queue = [txo for txo in txo_queue
if txo['blockhash'] != blockhash]
block_txos = {}
while block_txo_queue:
txo = block_txo_queue.pop()
if txo['txhash'] in block_txos:
# skip the ones we have already visited
continue
block_txos[txo['txhash']] = txo
spends = get_spends(txo['txhash'], self.blockchain_state)
for stxo in spends:
if stxo['blockhash'] == blockhash:
block_txo_queue.append(stxo)
else:
txo_queue.append(stxo)
block_txs = {}
for txhash in block_txos.keys():
block_txs[txhash] = self.blockchain_state.get_tx(txhash)
def get_prev_txs(tx):
"""all transactions from current block this transaction
directly depends on"""
prev_txs = []
for inp in tx.inputs:
if inp.prevout.hash in block_txs:
prev_txs.append(block_txs[inp.prevout.hash])
return prev_txs
sorted_block_txs = toposorted(block_txs.values(), get_prev_txs)
for tx in sorted_block_txs:
self.scan_tx(tx)
if __name__ == "__main__":
import blockchain
import store
import colormap as cm
import colordata
import datetime
start = datetime.datetime.now()
blockchain_state = blockchain.BlockchainState.from_url(None, True)
store_conn = store.DataStoreConnection("test-color.db")
cdstore = store.ColorDataStore(store_conn.conn)
metastore = store.ColorMetaStore(store_conn.conn)
colormap = cm.ColorMap(metastore)
cdbuilder = ColorDataBuilderManager(
colormap, blockchain_state, cdstore, metastore, AidedColorDataBuilder)
colordata = colordata.ThickColorData(cdbuilder, blockchain_state, cdstore)
blue_desc = "obc:" \
"b1586cd10b32f78795b86e9a3febe58dcb59189175fad884a7f4a6623b77486e:" \
"0:46442"
red_desc = "obc:" \
"8f6c8751f39357cd42af97a67301127d497597ae699ad0670b4f649bd9e39abf:" \
"0:46444"
blue_id = colormap.resolve_color_desc(blue_desc)
red_id = colormap.resolve_color_desc(red_desc)
blue_set = set([blue_id])
red_set = set([red_id])
br_set = blue_set | red_set
print br_set, ("Blue", "Red")
g = colordata.get_colorvalues
print g(
br_set,
"b1586cd10b32f78795b86e9a3febe58dcb59189175fad884a7f4a6623b77486e",
0), "== 1000 Blue (blue genesis TX)"
print g(
br_set,
"8f6c8751f39357cd42af97a67301127d497597ae699ad0670b4f649bd9e39abf",
0), "== 1000 Red (red genesis TX)"
print g(
br_set,
"b1586cd10b32f78795b86e9a3febe58dcb59189175fad884a7f4a6623b77486e",
1), "== None (blue genesis TX, other output)"
print g(
br_set,
"8f6c8751f39357cd42af97a67301127d497597ae699ad0670b4f649bd9e39abf",
1), "== None (red genesis TX, other output)"
print g(
br_set,
'c1d8d2fb75da30b7b61e109e70599c0187906e7610fe6b12c58eecc3062d1da5',
0), "== Red"
print g(
br_set,
'36af9510f65204ec5532ee62d3785584dc42a964013f4d40cfb8b94d27b30aa1',
0), "== Red"
print g(
br_set,
'3a60b70d425405f3e45f9ed93c30ca62b2a97e692f305836af38a524997dd01d',
0), "== None (Random TX from blockchain)"
print g(
br_set,
'c1d8d2fb75da30b7b61e109e70599c0187906e7610fe6b12c58eecc3062d1da5',
0), "== Red"
print g(
br_set,
'8f6c8751f39357cd42af97a67301127d497597ae699ad0670b4f649bd9e39abf',
0), "== Red"
print g(
br_set,
'f50f29906ce306be3fc06df74cc6a4ee151053c2621af8f449b9f62d86cf0647',
0), "== Blue"
print g(
br_set,
'7e40d2f414558be60481cbb976e78f2589bc6a9f04f38836c18ed3d10510dce5',
0), "== Blue"
print g(
br_set,
'4b60bb49734d6e26d798d685f76a409a5360aeddfddcb48102a7c7ec07243498',
0), "== Red (Two-input merging TX)"
print g(
br_set,
'342f119db7f9989f594d0f27e37bb5d652a3093f170de928b9ab7eed410f0bd1',
0), "== None (Color mixing TX)"
print g(
br_set,
'bd34141daf5138f62723009666b013e2682ac75a4264f088e75dbd6083fa2dba',
0), "== Blue (complex chain TX)"
print g(
br_set,
'bd34141daf5138f62723009666b013e2682ac75a4264f088e75dbd6083fa2dba',
1), "== None (mining fee change output)"
print g(
br_set,
'36af9510f65204ec5532ee62d3785584dc42a964013f4d40cfb8b94d27b30aa1',
0), "== Red (complex chain TX)"
print g(
br_set,
'741a53bf925510b67dc0d69f33eb2ad92e0a284a3172d4e82e2a145707935b3e',
0), "== Red (complex chain TX)"
print g(
br_set,
'741a53bf925510b67dc0d69f33eb2ad92e0a284a3172d4e82e2a145707935b3e',
1), "== Red (complex chain TX)"
print "Finished in", datetime.datetime.now() - start
|
Shabbat Shuvah: How Will You Go On the Last Day?
Although this form of speech may seem familiar to some of us (i.e. “he went and spoke”, or “he’s gone and done it now”) a strict grammarian, or a Torah commentator such as the ancient Sages of Israel, sees here a question. Based on the Rabbinical rules for interpreting Torah, which take as a given that there are no superfluous words in the sacred text, we can ask the simple question: where did Moshe go? The Torah does not specify where he went. It is an even more interesting point when we note that this is Moshe’s last day on earth.
Where was he going, on this last day of his?
1. Even at the end of his long and distinguished career, Moshe was still a humble person. Rather than call all Israel together to hear him, he chose to go to each family tent. He chose to spend his last day of life with his people, meeting intimately with those with whom he had shared so many years of struggle and hope.
2. More disturbingly, it is suggested that the people of Israel were not willing to gather to listen to him. At the end of his life, they dismissed him and his words as no longer meaningful or relevant.
3. The mystics suggest a third possibility from his words: “I can no longer come and go.” They remind us that Moshe had been accustomed to going “up” to commune with G*d, and then coming back “down” to be with the rest of the Israelites. Now, nearing his death, he had risen toward G*d and was unable to meet us on our level.
From these insights we see that the question is not where he went, but how. Did he go in humility as a great leader? did he go as a scorned old man that no one wanted to listen to any more? Did he go somewhere that no one could follow?
On this Shabbat Shuvah, our Shabbat of Returning between Rosh HaShanah and Yom Kippur, we consider not where we are going but how we are going. Each day of our lives we draw nearer to the last day. How are you going?
On this Shabbat Shuvah, may you feel supported in your search for your best way to go forward, toward the rest of your life, and toward your last day. That is why we create spiritual community: to talk about this, to encourage and support each other, and to be there, on this Shabbat and every day.
This entry was tagged Moshe, parashat hashavua, Shabbat Shuvah, Shuvah, vayelekh. Bookmark the permalink.
|
# _*_ coding: utf-8 _*_
'''
lutil: library functions for KODI media add-ons.
Copyright (C) 2017 José Antonio Montes (jamontes)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Description:
These funtions are called from the main plugin module, aimed to ease
and simplify the plugin development process.
Release 0.1.10
'''
# First of all We must import all the libraries used for plugin development.
import re, urllib, urllib2
from datetime import date
debug_enable = False # The debug logs are disabled by default.
def local_log(message):
"""This function logs the messages into the main KODI log file. Called from the libraries module by other functions."""
if debug_enable:
print "%s" % message
log = local_log # Use local log function by default.
def set_debug_mode(debug_flag, func_log=local_log):
"""This function sets the debug_enable var to log everything if debug option is true."""
global debug_enable
global log
debug_enable = debug_flag in ("true", True)
log = func_log
def get_url_decoded(url):
"""This function returns the URL decoded."""
log('get_url_decoded URL: "%s"' % url)
return urllib.unquote_plus(url)
def get_url_encoded(url):
"""This function returns the URL encoded."""
log('get_url_encoded URL: "%s"' % url)
return urllib.quote_plus(url)
def get_parms_encoded(**kwars):
"""This function returns the params encoded to form an URL or data post."""
param_list = urllib.urlencode(kwars)
log('get_parms_encoded params: "%s"' % param_list)
return param_list
def carga_web(url):
"""This function loads the html code from a webserver and returns it into a string."""
log('carga_web URL: "%s"' % url)
MiReq = urllib2.Request(url) # We use the Request method because we need to add a header into the HTTP GET to the web site.
# We have to tell the web site we are using a real browser.
MiReq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0') # This is a true Firefox header.
MiConex = urllib2.urlopen(MiReq) # We open the HTTP connection to the URL.
MiHTML = MiConex.read() # We load all the HTML contents from the web page and store it into a var.
MiConex.close() # We close the HTTP connection as we have all the info required.
return MiHTML
def carga_web_cookies(url, headers=''):
"""This function loads the html code from a webserver passsing the headers into the GET message
and returns it into a string along with the cookies collected from the website."""
log('carga_web_cookies URL: "%s"' % url)
MiReq = urllib2.Request(url) # We use the Request method because we need to add a header into the HTTP GET to the web site.
# We have to tell the web site we are using a real browser.
MiReq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0') # This is a true Firefox header.
for key in headers:
MiReq.add_header(key, headers[key])
MiConex = urllib2.urlopen(MiReq) # We open the HTTP connection to the URL.
MiHTML = MiConex.read() # We load all the HTML contents from the web page and store it into a var.
server_info = "%s" % MiConex.info()
my_cookie_pattern = re.compile('Set-Cookie: ([^;]+);')
my_cookies = ''
pcookie = ''
for lcookie in my_cookie_pattern.findall(server_info):
if (lcookie != pcookie):
my_cookies = "%s %s;" % (my_cookies, lcookie)
pcookie = lcookie
MiConex.close() # We close the HTTP connection as we have all the info required.
log('carga_web Cookie: "%s"' % my_cookies)
return MiHTML, my_cookies
def send_post_data(url, headers='', data=''):
"""This function sends an HTTP POST request with theirr corresponding headers and data to a webserver
and returns the html code into a string along with the cookies collected from the website."""
log('send_post_data URL: "%s"' % url)
MiReq = urllib2.Request(url, data) # We use the Request method because we need to send a HTTP POST to the web site.
# We have to tell the web site we are using a real browser.
MiReq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0') # This is a true Firefox header.
for key in headers:
MiReq.add_header(key, headers[key])
MiConex = urllib2.urlopen(MiReq) # We open the HTTP connection to the URL.
MiHTML = MiConex.read() # We load all the HTML contents from the web page and store it into a var.
server_info = "%s" % MiConex.info()
my_cookie_pattern = re.compile('Set-Cookie: ([^;]+);')
my_cookies = ''
pcookie = ''
for lcookie in my_cookie_pattern.findall(server_info):
if (lcookie != pcookie):
my_cookies = "%s %s;" % (my_cookies, lcookie)
pcookie = lcookie
MiConex.close() # We close the HTTP connection as we have all the info required.
log('send_post_data Cookie: "%s"' % my_cookies)
return MiHTML, my_cookies
def get_redirect(url):
"""This function returns the redirected URL from a 30X response received from the webserver."""
log('get_redirect URL: "%s"' % url)
MiConex = urllib.urlopen(url) # Opens the http connection to the URL.
MiHTML = MiConex.geturl() # Gets the URL redirect link and stores it into MiHTML.
MiConex.close() # Close the http connection as we get what we need.
return MiHTML
def find_multiple(text, pattern):
"""This function allows us to find multiples matches from a regexp into a string."""
pat_url_par = re.compile(pattern, re.DOTALL)
return pat_url_par.findall(text)
def find_first(text, pattern):
"""This function gets back the first match from a regexp into a string."""
pat_url_par = re.compile(pattern, re.DOTALL)
try:
return pat_url_par.findall(text)[0]
except:
return ""
def get_this_year():
"""This function gets the current year. Useful to fill the Year infolabel whenever it isn't available"""
return date.today().year
def get_clean_title(title):
"""This function returns the title or desc cleaned.
ref: http://www.thesauruslex.com/typo/eng/enghtml.htm"""
return title.\
replace('á', 'á').\
replace('à', 'á').\
replace('é', 'é').\
replace('è', 'è').\
replace('í', 'í').\
replace('ó', 'ó').\
replace('ò', 'ò').\
replace('ú', 'ú').\
replace('ä', 'ä').\
replace('ï', 'ï').\
replace('ö', 'ö').\
replace('ü', 'ü').\
replace('ß', 'ß').\
replace('ñ', 'ñ').\
replace('ç', 'ç').\
replace('Á', 'Á').\
replace('À', 'À').\
replace('É', 'É').\
replace('È', 'È').\
replace('Í', 'Í').\
replace('Ó', 'Ó').\
replace('Ò', 'Ò').\
replace('Ú', 'Ú').\
replace('Ä', 'Ä').\
replace('Ï', 'Ï').\
replace('Ö', 'Ö').\
replace('Ü', 'Ü').\
replace('Ñ', 'Ñ').\
replace('Ç', 'Ç').\
replace('"', '"').\
replace(''', "´").\
replace(' ', " ").\
replace('–', '').\
replace('’', "'").\
replace('“', '"').\
replace('”', '"').\
replace('‟', "'").\
replace('…', '').\
replace('’', "´").\
replace('«', '"').\
replace('»', '"').\
replace('¡', '¡').\
replace('&iinte;', '¿').\
replace('&', '&').\
replace(' ', '').\
replace('"', '"').\
replace('ª', 'ª').\
replace('º', 'º').\
replace('·', '·').\
replace('…', '...').\
replace('<br />', '').\
strip()
def get_clean_html_tags(html_text):
"""This function returns the text or desc cleaned from html tags."""
return re.sub(r'<[^>]*?>', '', html_text, count=0, flags=re.DOTALL)
|
The Macau Science Center also known as Macau Science Center is a science center in Macau, China. The project to build the science center was conceived in 2001 and completed in 2009. The building was designed by Pei Partnership Architects in association with I. M. Pei - is instantly recognizable by its silvery, cone-shaped exterior. Of the fourteen galleries arranged in a spiral and accentuated by an arch shape, ten are now open and construction started in 2006. The structural engineer was Leslie E. Robertson Associates. The center was opened in December 2009 by the Chinese President Hu Jintao.
After Beijing and Hong Kong, Macau is the third city in the world to possess the 8000 x 8000 pixel screen equipment. The center also generates 3D effects via a high definition 3D projection system in the space theatre of the planetarium, the first digital system to be simultaneously equipped with ultra high definition (8000 x 8000) and 3D visual effects in the world – all designed to give visitors an entertaining yet educational insight into the fascinating world of science.
The main building has a distinctive, asymmetrical, conical shape with a spiral walkway and a large atrium inside. Galleries lead off the walkway, mainly consisting of interactive exhibits aimed at science education. There is also a planetarium with 3D projection facilities and Omnimax films. The building is in a prominent position by the sea and is now a landmark of Macau. It is very visible when arriving on the ferry from Hong Kong.
Train as a future astronaut, meet state-of-the-art robots, learn about sound and more at the Macau Science Center. With galleries dedicated to Science for Young Adults, Technology, Environment and Lifestyle, this hub of knowledge will surely entertain. For younger learners, the Children Science Gallery is always a hit with water-related exhibits and other hands-on activities.
Be sure to pay a visit to the Planetarium which was awarded the highest resolution 3D planetarium in the world by the Guinness World Records. The tilted semi-dome screen allows audiences to take a journey through space. Narrations in Cantonese, Mandarin /Putonghua, Portuguese and English are available through headphones for recorded programs.
|
# Author: Jason Lu
'''
import socket
server = socket.socket()
server.bind(('localhost', 6969)) #绑定监听端口
server.listen(5) #监听数量5个
'''
'''
print("开始等电话了...")
conn, addr = server.accept() # 等电话打进来
print("电话来了...%s" %(conn))
'''
# 英文
'''
data = conn.recv(1024)
print("recv: %s" % (data))
'''
# 中文
''''
data = conn.recv(1024)
print("recv: %s" % (data.decode()))
conn.send(data.upper())
'''
# 改良版本
import os
import socket
server = socket.socket()
server.bind(('localhost', 6969)) #绑定监听端口
server.listen(5) #监听数量5个
while True:
print("开始等电话了...")
conn, addr = server.accept() # 等电话打进来
print("电话来了,...来自: %s" %(conn))
while True:
data = conn.recv(1024)
if not data:
print("client已经断开...111")
break
if data.decode() == "":
print("client已经断开...222")
break
print("recv: %s" % (data.decode()))
#conn.send(data.upper())
# 执行命令(通过client发送的命令来执行对应的命令)
'''
str_cmd = data.decode() # 转换字符串
res = os.popen(str_cmd).read()
conn.send(res.encode("utf-8"))
'''
# 如果发送发文件,比如视频文件
conn.close()
server.close()
|
Mental health services are funded in ways that isolate family and friends from the people affected by mental illnesses and put vulnerable people at risk, according to Emma Doré, CEO of Supporting Families Auckland.
“New Zealand’s suicide rate is climbing – the latest statistics show 668 people died in the year to June 30 – and yet it is still common for us to receive calls from family members in acute distress about what to do when someone they love is suicidal, or when someone has been discharged from hospital following a suicide attempt with no plan or advance notice given to those at home,” says Ms Doré.
“Families tell us they have tried calling health services only to be told that their concerns are unwarranted, they are overreacting, and very commonly, nothing can be discussed with them because of the Privacy Act. They are often told that if they are worried they should call the police.
|
# Example script that performs a set of (small) live requests versus the live PML WPS service
from __future__ import absolute_import
from __future__ import print_function
from owslib.wps import WebProcessingService, monitorExecution
# instantiate WPS client
verbose = False
wps = WebProcessingService('http://rsg.pml.ac.uk/wps/generic.cgi', verbose=verbose, skip_caps=True)
# 1) GetCapabilities
wps.getcapabilities()
print('WPS Identification type: %s' % wps.identification.type)
print('WPS Identification title: %s' % wps.identification.title)
print('WPS Identification abstract: %s' % wps.identification.abstract)
for operation in wps.operations:
print('WPS Operation: %s' % operation.name)
for process in wps.processes:
print('WPS Process: identifier=%s title=%s' % (process.identifier, process.title))
# 2) DescribeProcess
process = wps.describeprocess('reprojectImage')
print('WPS Process: identifier=%s' % process.identifier)
print('WPS Process: title=%s' % process.title)
print('WPS Process: abstract=%s' % process.abstract)
for input in process.dataInputs:
print('Process input: identifier=%s, data type=%s, minOccurs=%d, maxOccurs=%d' % (input.identifier, input.dataType, input.minOccurs, input.maxOccurs))
for output in process.processOutputs:
print('Process output: identifier=%s, data type=%s' % (output.identifier, output.dataType))
# 3a) Execute
# GET request: http://rsg.pml.ac.uk/wps/generic.cgi?request=Execute&service=wps&version=1.0.0&identifier=reprojectImage&datainputs=[inputImage=http://rsg.pml.ac.uk/wps/testdata/elev_srtm_30m.img;outputSRS=EPSG:4326]&responsedocument=outputImage=@asreference=true
processid = "reprojectImage"
inputs = [ ("inputImage","http://rsg.pml.ac.uk/wps/testdata/elev_srtm_30m.img"),
("outputSRS", "EPSG:4326") ]
output = "outputImage"
execution = wps.execute(processid, inputs, output)
monitorExecution(execution)
# 3b) Execute
# GET request: http://rsg.pml.ac.uk/wps/generic.cgi?request=Execute&service=WPS&version=1.0.0&identifier=reprojectCoords&datainputs=[coords=http://rsg.pml.ac.uk/wps/testdata/coords.txt;outputSRS=EPSG:32630;inputSRS=EPSG:4326]
processid = "reprojectCoords"
inputs = [ ("coords","http://rsg.pml.ac.uk/wps/testdata/coords.txt"),
("outputSRS", "EPSG:32630"),
("inputSRS","EPSG:4326") ]
execution = wps.execute(processid, inputs)
monitorExecution(execution)
|
We started the day with the most important meal: breakfast.
Then spent the rest of the morning around the pool.
Time for tea before heading off to Uluwatu.
Arrived in Uluwatu but not really sure what to make of it. Walked around, met some monkeys and watched the Kecak & Fire dancing at six, before we made ourselves scarce.
Had dinner in vintage restaurant Balique.
|
from datetime import timedelta
import operator
from typing import Any, Callable, List, Optional, Sequence, Type, Union
import numpy as np
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Timedelta,
delta_to_nanoseconds,
dt64arr_to_periodarr as c_dt64arr_to_periodarr,
iNaT,
period as libperiod,
to_offset,
)
from pandas._libs.tslibs.dtypes import FreqGroup
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs.offsets import Tick, delta_to_tick
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ,
IncompatibleFrequency,
Period,
PeriodMixin,
get_period_field_arr,
period_asfreq_arr,
)
from pandas._typing import AnyArrayLike
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
TD64NS_DTYPE,
ensure_object,
is_datetime64_dtype,
is_dtype_equal,
is_float_dtype,
is_period_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import (
ABCIndexClass,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
def _field_accessor(name: str, docstring=None):
def f(self):
base = self.freq._period_dtype_code
result = get_period_field_arr(name, self.asi8, base)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class PeriodArray(PeriodMixin, dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
"""
Pandas ExtensionArray for storing Period data.
Users should use :func:`period_array` to create new instances.
Parameters
----------
values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
The data to store. These should be arrays that can be directly
converted to ordinals without inference or copy (PeriodArray,
ndarray[int64]), or a box around such an array (Series[period],
PeriodIndex).
freq : str or DateOffset
The `freq` to use for the array. Mostly applicable when `values`
is an ndarray of integers, when `freq` is required. When `values`
is a PeriodArray (or box around), it's checked that ``values.freq``
matches `freq`.
dtype : PeriodDtype, optional
A PeriodDtype instance from which to extract a `freq`. If both
`freq` and `dtype` are specified, then the frequencies must match.
copy : bool, default False
Whether to copy the ordinals before storing.
Attributes
----------
None
Methods
-------
None
See Also
--------
period_array : Create a new PeriodArray.
PeriodIndex : Immutable Index for period data.
Notes
-----
There are two components to a PeriodArray
- ordinals : integer ndarray
- freq : pd.tseries.offsets.Offset
The values are physically stored as a 1-D ndarray of integers. These are
called "ordinals" and represent some kind of offset from a base.
The `freq` indicates the span covered by each element of the array.
All elements in the PeriodArray have the same `freq`.
"""
# array priority higher than numpy scalars
__array_priority__ = 1000
_typ = "periodarray" # ABCPeriodArray
_scalar_type = Period
_recognized_scalars = (Period,)
_is_recognized_dtype = is_period_dtype
# Names others delegate to us
_other_ops: List[str] = []
_bool_ops = ["is_leap_year"]
_object_ops = ["start_time", "end_time", "freq"]
_field_ops = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"weekday",
"week",
"dayofweek",
"dayofyear",
"quarter",
"qyear",
"days_in_month",
"daysinmonth",
]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["strftime", "to_timestamp", "asfreq"]
# --------------------------------------------------------------------
# Constructors
def __init__(self, values, freq=None, dtype=None, copy=False):
freq = validate_dtype_freq(dtype, freq)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if isinstance(values, ABCSeries):
values = values._values
if not isinstance(values, type(self)):
raise TypeError("Incorrect dtype")
elif isinstance(values, ABCPeriodIndex):
values = values._values
if isinstance(values, type(self)):
if freq is not None and freq != values.freq:
raise raise_on_incompatible(values, freq)
values, freq = values._data, values.freq
values = np.array(values, dtype="int64", copy=copy)
self._data = values
if freq is None:
raise ValueError("freq is not specified and cannot be inferred")
self._dtype = PeriodDtype(freq)
@classmethod
def _simple_new(cls, values: np.ndarray, freq=None, **kwargs) -> "PeriodArray":
# alias for PeriodArray.__init__
assertion_msg = "Should be numpy array of type i8"
assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg
return cls(values, freq=freq, **kwargs)
@classmethod
def _from_sequence(
cls: Type["PeriodArray"],
scalars: Union[Sequence[Optional[Period]], AnyArrayLike],
dtype: Optional[PeriodDtype] = None,
copy: bool = False,
) -> "PeriodArray":
if dtype:
freq = dtype.freq
else:
freq = None
if isinstance(scalars, cls):
validate_dtype_freq(scalars.dtype, freq)
if copy:
scalars = scalars.copy()
return scalars
periods = np.asarray(scalars, dtype=object)
if copy:
periods = periods.copy()
freq = freq or libperiod.extract_freq(periods)
ordinals = libperiod.extract_ordinals(periods, freq)
return cls(ordinals, freq=freq)
@classmethod
def _from_sequence_of_strings(
cls, strings, dtype=None, copy=False
) -> "PeriodArray":
return cls._from_sequence(strings, dtype, copy)
@classmethod
def _from_datetime64(cls, data, freq, tz=None) -> "PeriodArray":
"""
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
"""
data, freq = dt64arr_to_periodarr(data, freq, tz)
return cls(data, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
periods = dtl.validate_periods(periods)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if start is not None or end is not None:
if field_count > 0:
raise ValueError(
"Can either instantiate from fields or endpoints, but not both"
)
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError("Not enough parameters to construct Period range")
return subarr, freq
# -----------------------------------------------------------------
# DatetimeLike Interface
@classmethod
def _rebox_native(cls, value: int) -> np.int64:
return np.int64(value)
def _unbox_scalar(
self, value: Union[Period, NaTType], setitem: bool = False
) -> int:
if value is NaT:
return value.value
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=setitem)
return value.ordinal
else:
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
def _scalar_from_string(self, value: str) -> Period:
return Period(value, freq=self.freq)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
if self.freqstr != other.freqstr:
raise raise_on_incompatible(self, other)
# --------------------------------------------------------------------
# Data / Attributes
@cache_readonly
def dtype(self) -> PeriodDtype:
return self._dtype
# error: Read-only property cannot override read-write property
@property # type: ignore[misc]
def freq(self) -> BaseOffset:
"""
Return the frequency object for this PeriodArray.
"""
return self.dtype.freq
def __array__(self, dtype=None) -> np.ndarray:
if dtype == "i8":
return self.asi8
elif dtype == bool:
return ~self._isnan
# This will raise TypeError for non-object dtypes
return np.array(list(self), dtype=object)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowPeriodType
if type is not None:
if pyarrow.types.is_integer(type):
return pyarrow.array(self._data, mask=self.isna(), type=type)
elif isinstance(type, ArrowPeriodType):
# ensure we have the same freq
if self.freqstr != type.freq:
raise TypeError(
"Not supported to convert PeriodArray to array with different "
f"'freq' ({self.freqstr} vs {type.freq})"
)
else:
raise TypeError(
f"Not supported to convert PeriodArray to '{type}' type"
)
period_type = ArrowPeriodType(self.freqstr)
storage_array = pyarrow.array(self._data, mask=self.isna(), type="int64")
return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
# --------------------------------------------------------------------
# Vectorized analogues of Period properties
year = _field_accessor(
"year",
"""
The year of the period.
""",
)
month = _field_accessor(
"month",
"""
The month as January=1, December=12.
""",
)
day = _field_accessor(
"day",
"""
The days of the period.
""",
)
hour = _field_accessor(
"hour",
"""
The hour of the period.
""",
)
minute = _field_accessor(
"minute",
"""
The minute of the period.
""",
)
second = _field_accessor(
"second",
"""
The second of the period.
""",
)
weekofyear = _field_accessor(
"week",
"""
The week ordinal of the year.
""",
)
week = weekofyear
dayofweek = _field_accessor(
"weekday",
"""
The day of the week with Monday=0, Sunday=6.
""",
)
weekday = dayofweek
dayofyear = day_of_year = _field_accessor(
"day_of_year",
"""
The ordinal day of the year.
""",
)
quarter = _field_accessor(
"quarter",
"""
The quarter of the date.
""",
)
qyear = _field_accessor("qyear")
days_in_month = _field_accessor(
"days_in_month",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
@property
def is_leap_year(self) -> np.ndarray:
"""
Logical indicating if the date belongs to a leap year.
"""
return isleapyear_arr(np.asarray(self.year))
@property
def start_time(self):
return self.to_timestamp(how="start")
@property
def end_time(self):
return self.to_timestamp(how="end")
def to_timestamp(self, freq=None, how="start"):
"""
Cast to DatetimeArray/Index.
Parameters
----------
freq : str or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise.
how : {'s', 'e', 'start', 'end'}
Whether to use the start or end of the time period being converted.
Returns
-------
DatetimeArray/Index
"""
from pandas.core.arrays import DatetimeArray
how = libperiod.validate_end_alias(how)
end = how == "E"
if end:
if freq == "B" or self.freq == "B":
# roll forward to ensure we land on B date
adjust = Timedelta(1, "D") - Timedelta(1, "ns")
return self.to_timestamp(how="start") + adjust
else:
adjust = Timedelta(1, "ns")
return (self + self.freq).to_timestamp(how="start") - adjust
if freq is None:
freq = self._get_to_timestamp_base()
base = freq
else:
freq = Period._maybe_convert_freq(freq)
base = freq._period_dtype_code
new_data = self.asfreq(freq, how=how)
new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base)
return DatetimeArray(new_data)._with_freq("infer")
# --------------------------------------------------------------------
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None:
raise TypeError(
"`freq` argument is not supported for "
f"{type(self).__name__}._time_shift"
)
values = self.asi8 + periods * self.freq.n
if self._hasnans:
values[self._isnan] = iNaT
return type(self)(values, freq=self.freq)
def _box_func(self, x) -> Union[Period, NaTType]:
return Period._from_ordinal(ordinal=x, freq=self.freq)
def asfreq(self, freq=None, how: str = "E") -> "PeriodArray":
"""
Convert the Period Array/Index to the specified frequency `freq`.
Parameters
----------
freq : str
A frequency.
how : str {'E', 'S'}
Whether the elements should be aligned to the end
or start within pa period.
* 'E', 'END', or 'FINISH' for end,
* 'S', 'START', or 'BEGIN' for start.
January 31st ('END') vs. January 1st ('START') for example.
Returns
-------
Period Array/Index
Constructed with the new frequency.
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
dtype='period[A-DEC]', freq='A-DEC')
>>> pidx.asfreq('M')
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
'2015-12'], dtype='period[M]', freq='M')
>>> pidx.asfreq('M', how='S')
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
'2015-01'], dtype='period[M]', freq='M')
"""
how = libperiod.validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1 = self.freq._period_dtype_code
base2 = freq._period_dtype_code
asi8 = self.asi8
# self.freq.n can't be negative or 0
end = how == "E"
if end:
ordinal = asi8 + self.freq.n - 1
else:
ordinal = asi8
new_data = period_asfreq_arr(ordinal, base1, base2, end)
if self._hasnans:
new_data[self._isnan] = iNaT
return type(self)(new_data, freq=freq)
# ------------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed: bool = False):
if boxed:
return str
return "'{}'".format
def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
"""
actually format my specific types
"""
values = self.astype(object)
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: str(dt)
if self._hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
# ------------------------------------------------------------------
def astype(self, dtype, copy: bool = True):
# We handle Period[T] -> Period[U]
# Our parent handles everything else.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self._dtype):
if not copy:
return self
elif copy:
return self.copy()
if is_period_dtype(dtype):
return self.asfreq(dtype.freq)
return super().astype(dtype, copy=copy)
def searchsorted(self, value, side="left", sorter=None):
value = self._validate_searchsorted_value(value).view("M8[ns]")
# Cast to M8 to get datetime-like NaT placement
m8arr = self._ndarray.view("M8[ns]")
return m8arr.searchsorted(value, side=side, sorter=sorter)
# ------------------------------------------------------------------
# Arithmetic Methods
def _sub_datelike(self, other):
assert other is not NaT
return NotImplemented
def _sub_period(self, other):
# If the operation is well-defined, we return an object-Index
# of DateOffsets. Null entries are filled with pd.NaT
self._check_compatible_with(other)
asi8 = self.asi8
new_data = asi8 - other.ordinal
new_data = np.array([self.freq * x for x in new_data])
if self._hasnans:
new_data[self._isnan] = NaT
return new_data
def _sub_period_array(self, other):
"""
Subtract a Period Array/Index from self. This is only valid if self
is itself a Period Array/Index, raises otherwise. Both objects must
have the same frequency.
Parameters
----------
other : PeriodIndex or PeriodArray
Returns
-------
result : np.ndarray[object]
Array of DateOffset objects; nulls represented by NaT.
"""
if self.freq != other.freq:
msg = DIFFERENT_FREQ.format(
cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr
)
raise IncompatibleFrequency(msg)
new_values = algos.checked_add_with_arr(
self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan
)
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = NaT
return new_values
def _addsub_int_array(
self, other: np.ndarray, op: Callable[[Any, Any], Any]
) -> "PeriodArray":
"""
Add or subtract array of integers; equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : np.ndarray[integer-dtype]
op : {operator.add, operator.sub}
Returns
-------
result : PeriodArray
"""
assert op in [operator.add, operator.sub]
if op is operator.sub:
other = -other
res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan)
res_values = res_values.view("i8")
res_values[self._isnan] = iNaT
return type(self)(res_values, freq=self.freq)
def _add_offset(self, other: BaseOffset):
assert not isinstance(other, Tick)
if other.base != self.freq.base:
raise raise_on_incompatible(self, other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
result = super()._add_timedeltalike_scalar(other.n)
return type(self)(result, freq=self.freq)
def _add_timedeltalike_scalar(self, other):
"""
Parameters
----------
other : timedelta, Tick, np.timedelta64
Returns
-------
PeriodArray
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise raise_on_incompatible(self, other)
if notna(other):
# special handling for np.timedelta64("NaT"), avoid calling
# _check_timedeltalike_freq_compat as that would raise TypeError
other = self._check_timedeltalike_freq_compat(other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
return super()._add_timedeltalike_scalar(other)
def _add_timedelta_arraylike(self, other):
"""
Parameters
----------
other : TimedeltaArray or ndarray[timedelta64]
Returns
-------
result : ndarray[int64]
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise TypeError(
f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}"
)
if not np.all(isna(other)):
delta = self._check_timedeltalike_freq_compat(other)
else:
# all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT
return self + np.timedelta64("NaT")
ordinals = self._addsub_int_array(delta, operator.add).asi8
return type(self)(ordinals, dtype=self.dtype)
def _check_timedeltalike_freq_compat(self, other):
"""
Arithmetic operations with timedelta-like scalars or array `other`
are only valid if `other` is an integer multiple of `self.freq`.
If the operation is valid, find that integer multiple. Otherwise,
raise because the operation is invalid.
Parameters
----------
other : timedelta, np.timedelta64, Tick,
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
Returns
-------
multiple : int or ndarray[int64]
Raises
------
IncompatibleFrequency
"""
assert isinstance(self.freq, Tick) # checked by calling function
base_nanos = self.freq.base.nanos
if isinstance(other, (timedelta, np.timedelta64, Tick)):
nanos = delta_to_nanoseconds(other)
elif isinstance(other, np.ndarray):
# numpy timedelta64 array; all entries must be compatible
assert other.dtype.kind == "m"
if other.dtype != TD64NS_DTYPE:
# i.e. non-nano unit
# TODO: disallow unit-less timedelta64
other = other.astype(TD64NS_DTYPE)
nanos = other.view("i8")
else:
# TimedeltaArray/Index
nanos = other.asi8
if np.all(nanos % base_nanos == 0):
# nanos being added is an integer multiple of the
# base-frequency to self.freq
delta = nanos // base_nanos
# delta is the integer (or integer-array) number of periods
# by which will be added to self.
return delta
raise raise_on_incompatible(self, other)
def raise_on_incompatible(left, right):
"""
Helper function to render a consistent error message when raising
IncompatibleFrequency.
Parameters
----------
left : PeriodArray
right : None, DateOffset, Period, ndarray, or timedelta-like
Returns
-------
IncompatibleFrequency
Exception to be raised by the caller.
"""
# GH#24283 error message format depends on whether right is scalar
if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:
other_freq = None
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, BaseOffset)):
other_freq = right.freqstr
else:
other_freq = delta_to_tick(Timedelta(right)).freqstr
msg = DIFFERENT_FREQ.format(
cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq
)
return IncompatibleFrequency(msg)
# -------------------------------------------------------------------
# Constructor Helpers
def period_array(
data: Union[Sequence[Optional[Period]], AnyArrayLike],
freq: Optional[Union[str, Tick]] = None,
copy: bool = False,
) -> PeriodArray:
"""
Construct a new PeriodArray from a sequence of Period scalars.
Parameters
----------
data : Sequence of Period objects
A sequence of Period objects. These are required to all have
the same ``freq.`` Missing values can be indicated by ``None``
or ``pandas.NaT``.
freq : str, Tick, or Offset
The frequency of every element of the array. This can be specified
to avoid inferring the `freq` from `data`.
copy : bool, default False
Whether to ensure a copy of the data is made.
Returns
-------
PeriodArray
See Also
--------
PeriodArray
pandas.PeriodIndex
Examples
--------
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A')])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[A-DEC]
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A'),
... pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[A-DEC]
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
<PeriodArray>
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
"""
data_dtype = getattr(data, "dtype", None)
if is_datetime64_dtype(data_dtype):
return PeriodArray._from_datetime64(data, freq)
if is_period_dtype(data_dtype):
return PeriodArray(data, freq)
# other iterable of some kind
if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)):
data = list(data)
data = np.asarray(data)
dtype: Optional[PeriodDtype]
if freq:
dtype = PeriodDtype(freq)
else:
dtype = None
if is_float_dtype(data) and len(data) > 0:
raise TypeError("PeriodIndex does not allow floating point in construction")
data = ensure_object(data)
return PeriodArray._from_sequence(data, dtype=dtype)
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError("dtype must be PeriodDtype")
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency("specified freq and dtype are different")
return freq
def dt64arr_to_periodarr(data, freq, tz=None):
"""
Convert an datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int]
freq : Tick
The frequency extracted from the Series or DatetimeIndex if that's
used.
"""
if data.dtype != np.dtype("M8[ns]"):
raise ValueError(f"Wrong dtype: {data.dtype}")
if freq is None:
if isinstance(data, ABCIndexClass):
data, freq = data._values, data.freq
elif isinstance(data, ABCSeries):
data, freq = data._values, data.dt.freq
freq = Period._maybe_convert_freq(freq)
if isinstance(data, (ABCIndexClass, ABCSeries)):
data = data._values
base = freq._period_dtype_code
return c_dt64arr_to_periodarr(data.view("i8"), base, tz), freq
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is not None:
freq = to_offset(freq)
mult = freq.n
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError("start and end must have same freq")
if start is NaT or end is NaT:
raise ValueError("start and end must not be NaT")
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError("Could not infer freq from start/end")
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(
end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64
)
else:
data = np.arange(
start.ordinal, start.ordinal + periods, mult, dtype=np.int64
)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(
year=None,
month=None,
quarter=None,
day=None,
hour=None,
minute=None,
second=None,
freq=None,
):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = to_offset("Q")
base = FreqGroup.FR_QTR
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
if base != FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
freqstr = freq.freqstr
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = libperiod.quarter_to_myear(y, q, freqstr)
val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError("Mismatched Period array lengths")
elif length is None:
length = len(x)
arrays = [
np.asarray(x)
if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length)
for x in fields
]
return arrays
|
Two tier cage suitable for cats and small animals. Grill gap 4cm Measurements: 2 tier, 100cm height, 78cm length and 55cm width. Selling a two tier cat cage that I bought for my conure. Unfortunately the grill size is too big even with rewiring and my conure can escape. Condition: 15/10 because I have rewired every surface to make the gaps smaller. My conure used it for 1 day only. Comes with cat hammock, cat cushion, food and water bowl, bell toy (second picture), and zip ties. It has already been assembled and zip tied. Collection at my address. Delivery charges will apply.
belovedapparelsGood and friendly . fast deal.
Cage for cats, rabbits or puppy. Size is 59cm×42cm×51cm. Condition is good. Self collect.
Cage measurement - 76cm (L) X 47cm (W) X 110cm (H). Include a used cat litter box and food bowl. Free brand new cat nail clipper. Price negotiable.
|
from django.conf import settings
from django.http import HttpResponse
from django.core.urlresolvers import resolve
from .models import PageViews
from ipware.ip import get_real_ip, get_ip
import json
class PageViewsMiddleware(object):
def process_response(self, request, response):
if request.method == 'GET':
try:
view, args, kwargs = resolve(request.path)
resource_name = kwargs.get("resource_name", None)
except:
resource_name = None
if resource_name :
if resource_name in settings.PAGEVIEWS_FILTER:
pageviews_counter = 0
try:
content = json.loads(response.content)
except:
return response
if 'objects' in content and len(content['objects']) == 1:
resource_uri = content['objects'][0]['resource_uri']
created = False
autorepare = True #sometimes doublons can be created, don't know why
while autorepare:
autorepare = False
if request.user.is_authenticated() :
try:
pv, created = PageViews.objects.get_or_create(client=request.user.username, resource_uri=resource_uri)
except PageViews.MultipleObjectsReturned:
pv = PageViews.objects.filter(client=request.user.username, resource_uri=resource_uri)
PageViews.objects.exclude(id=pv[0].id).delete()
autorepare = True
else:
try:
pv, created = PageViews.objects.get_or_create(client=get_ip(request), resource_uri=resource_uri)
except PageViews.MultipleObjectsReturned:
pv = PageViews.objects.filter(client=get_ip(request), resource_uri=resource_uri)
PageViews.objects.exclude(id=pv[0].id).delete()
autorepare = True
pageviews_counter = PageViews.objects.filter(resource_uri=resource_uri).count()
content['objects'][0]['pageviews_counter'] = pageviews_counter
return HttpResponse(json.dumps(content), content_type="application/json")
return response
|
Many lawyers don"t get new clients and files because they don"t do enough business development activities, or they waste time on the wrong activities or services, or they don"t authentically connect with potential clients. What is necessary for good business development, say successful lawyers and consultants who shared their strategies is a marketing for law companies in Laurel Grove focused on activities you do well, targeted at the right audience and carried out consistently. After all, the state of the industry is much more than the perspective of one agency or one law firm – it’s a complicated whole made up of a variety of experiences and points of view. Fairly often, its the attorney’s ability to market his or her services to potential clients. We customize our legal marketing services to address your business goals and your next clients’ needs. If you’re like many small and mid-sized law firms, you’ve at least thought about developing a marketing plan for your law firm.
Being genuine—and helpful, even if your actions may not offer immediate business—doesn’t hurt either. Each year we prepare this report as a touchstone for the state of the industry and where we see it headed. But marketing a law practice in Laurel Grove, Florida isn’t always the same as marketing other types of businesses. No two law practices are exactly alike, but every attorney needs new clients. For those law firms who need to generate business right away, there is probably no better solution than marketing your law firm through a smart and efficient Google Adwords campaign. Frequently law firms experiment with marketing and engage in isolated promotional activities not integrated with the firm’s business plan with the expectation of immediate results after the one-shot activity. Marketing techniques are all about acquiring new clients. And just like that, a single area of law has just become 4 times as powerful as before.
|
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.utils import simplejson
from voting.models import Vote
def get_vote_box_template(request, object_id, model, vote_url_name,
vote_box_url_name):
lookup_kwargs = {}
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
try:
obj = model._default_manager.get(**lookup_kwargs)
except ObjectDoesNotExist:
return HttpResponse(simplejson.dumps(dict(success=False,
error_message='No %s found for %s.' % (model._meta.verbose_name,
lookup_kwargs))))
vote = Vote.objects.get_for_user(obj, request.user)
upvote_url = \
reverse(vote_url_name, kwargs={'object_id': object_id,
'direction': 'up'})
clearvote_url = \
reverse(vote_url_name, kwargs={'object_id': object_id,
'direction': 'clear'})
downvote_url = \
reverse(vote_url_name, kwargs={'object_id': object_id,
'direction': 'down'})
vote_box_url = \
reverse(vote_box_url_name, kwargs={'object_id': object_id})
return render_to_response('voting/default_vote_box.html',
{'vote': vote, 'upvote_url': upvote_url, 'clearvote_url': clearvote_url,
'downvote_url': downvote_url, 'vote_box_ajax_url': vote_box_url})
|
A year ago, Hillsboro junior Josef Marschuetz finished sixth in the 400-meter race at the Class 4 District 1 track meet, failing to qualify for sectionals by about two seconds.
Marschuetz was literally just getting his feet back under himself after suffering a severe leg injury and enduring months of painful, solitary rehabilitation. This spring, he vowed he would cut down his time in the 400 and reach the state championships.
Saturday at Hillsboro, Marschuetz covered the single-lapper four seconds faster (50.46 seconds) to finish third in the Class 4 Sectional 1 championships. The top four finishers in each sectional event qualified for the Missouri State High School Activities Association meet in Jefferson City Friday and Saturday.
Marschuetz will be there with his Hawk teammates, something that seemed highly unlikely in the fall of 2015 when he broke his thigh bone about six inches above the knee making a tackle as a linebacker for Hillsboro football.
After his surgery, doctors said he might not ever walk normally again. He will walk – and run – the rest of his life with a femoral pin from his hip to his knee.
He said the hardest part was doing his rehab alone for days, weeks, months.
The thought of not being able to play football, for which Marschuetz hopes to receive a college scholarship, just added to his desire to get back in the game, which he did last fall. He said he’s squatting 450 pounds right now, a pretty good sign his leg is as strong as ever.
Marschuetz can play outside linebacker, defensive end or tight end for the Hawks, who had a dynamic offense last year when they finished 9-3, won the Mississippi Area Football Conference (Red) and reached the Class 4 District 1 championship game.
In the district semifinals, running back Micheal Keller scored seven touchdowns and the Hawks crushed Sikeston 72-46.
A week later in the district final against Cape Girardeau Central, Keller, Marschuetz and star wideout Isaiah Martin all were sick or injured and either didn’t play or were extremely limited. The result was a 63-24 pasting by the Tigers.
“I was out with mono, Keller was banged up, and we were missing our freaky good wide receiver (Martin),” Marschuetz said.
The Hawks return their quarterback (Tyler Isaacson), Keller, Martin and another productive ball carrier in Luke Skaggs.
What Hillsboro won’t have is any returning offensive linemen after all five graduated this month. Not to worry, Marschuetz said.
Marschuetz tried to qualify for state in the 200 as well but finished fifth in 23.16, just a tenth of a second off the fourth and final spot.
While Martin is the Hawks’ certified track star, Marschuetz said he doesn’t mind running in his shadow. He’s just happy to be running full speed again. Go, Joe, go.
|
from flask import request
from flask_restplus import Resource
from skf.api.security import security_headers, validate_privilege
from skf.api.projects.business import update_project
from skf.api.projects.serializers import project_update, message
from skf.api.projects.parsers import authorization
from skf.api.restplus import api
from skf.api.security import log, val_num, val_alpha, val_alpha_num, val_alpha_num_special
ns = api.namespace('project', description='Operations related to projects')
@ns.route('/update/<int:project_id>')
@api.doc(params={'project_id': 'The project id'})
@api.response(404, 'Validation error', message)
class KBItemUpdate(Resource):
@api.expect(authorization, project_update)
@api.marshal_with(message, 'Success')
@api.response(400, 'No results found', message)
def put(self, project_id):
"""
Update a project item.
* Privileges required: **edit**
"""
data = request.json
val_num(project_id)
val_alpha_num_special(data.get('name'))
val_alpha_num_special(data.get('description'))
val_alpha_num_special(data.get('version'))
validate_privilege(self, 'edit')
result = update_project(project_id, data)
return result, 200, security_headers()
|
Preheat the oven to Gas Mark 5, 190°C, 170°C fan, 375°F.
I like to season my chicken with a Knorr Chicken Stock Cube. Crumble the Knorr Chicken Stock Cube into the olive oil, mixing into a paste until the cube has dissolved. Rub the paste over the chicken to season it.
Pour a little olive oil into a roasting tin and place the seasoned chicken in the tin. Roast for 1 hour to 1 hour ten minutes until the chicken is cooked through. Make sure you check that the chicken is thoroughly cooked by inserting a skewer into the chicken near the leg; if the juices run clear, then the chicken is cooked through. If the juices are bloody, then cook the chicken for a little longer. As I often say, use your eyes when you cook.
Remove the chicken from the oven and set aside to rest in the roasting tray until still warm but just cool enough to handle.
Remove the chicken from the roasting tray and place the roasting tray on the hob. Now make the hot vinaigrette dressing. Heat the roasting juices in the pan until hot, then add in the white wine vinegar, shaking the pan to dissolve the roasting juices into the vinegar. You want to cook this until the vinegar has reduced by two-thirds and is almost syrupy.
While the vinegar is reducing, cut the warm chicken into serving pieces, using a sharp knife. Watch me and you’ll see how easy this is. First cut off the legs, then cut through the joints to separate the legs into thighs and drumsticks. Cut off the wings. Cut the breasts off the chicken, then slice the breasts into portions. Place the legs, wings and breast pieces on a serving platter. Sprinkle the finely chopped shallot over the chicken. I’m very fond of shallots, as you might have noticed.
Now finish off the dressing. Add the olive oil to the reduced vinegar and stir in. Taste to check the seasoning and adjust to taste. Spoon the hot vinaigrette over the portioned chicken. Garnish with basil, parsley and chives and serve at once.
This would be nice with a fresh tomato salad, a potato salad or simply boiled new potatoes.
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import llnl.util.tty as tty
import os
import shutil
class Singularity(MakefilePackage):
'''Singularity is a container technology focused on building portable
encapsulated environments to support "Mobility of Compute" For older
versions of Singularity (pre 3.0) you should use singularity-legacy,
which has a different install base (Autotools).
Needs post-install chmod/chown steps to enable full functionality.
See package definition or `spack-build-out.txt` build log for details,
e.g.
tail -15 $(spack location -i singularity)/.spack/spack-build-out.txt
'''
homepage = "https://www.sylabs.io/singularity/"
url = "https://github.com/sylabs/singularity/releases/download/v3.6.1/singularity-3.6.1.tar.gz"
git = "https://github.com/sylabs/singularity.git"
maintainers = ['alalazo']
version('master', branch='master')
version('3.6.1', sha256='6cac56106ee7f209150aaee9f8788d03b58796af1b767245d343f0b8a691121c')
version('3.5.3', sha256='0c76f1e3808bf4c10e92b17150314b2b816be79f8101be448a6e9d7a96c9e486')
version('3.5.2', sha256='f9c21e289377a4c40ed7a78a0c95e1ff416dec202ed49a6c616dd2c37700eab8')
version('3.4.1', sha256='638fd7cc5ab2a20e779b8768f73baf21909148339d6c4edf6ff61349c53a70c2')
version('3.4.0', sha256='eafb27f1ffbed427922ebe2b5b95d1c9c09bfeb897518867444fe230e3e35e41')
version('3.3.0', sha256='070530a472e7e78492f1f142c8d4b77c64de4626c4973b0589f0d18e1fcf5b4f')
version('3.2.1', sha256='d4388fb5f7e0083f0c344354c9ad3b5b823e2f3f27980e56efa7785140c9b616')
version('3.1.1', sha256='7f0df46458d8894ba0c2071b0848895304ae6b1137d3d4630f1600ed8eddf1a4')
variant('suid', default=True, description='install SUID binary')
variant('network', default=True, description='install network plugins')
depends_on('pkgconfig', type='build')
depends_on('go')
depends_on('libuuid')
depends_on('libgpg-error')
depends_on('libseccomp')
depends_on('squashfs', type='run')
depends_on('git', when='@develop') # mconfig uses it for version info
depends_on('shadow', type='run', when='@3.3:')
depends_on('cryptsetup', type=('build', 'run'), when='@3.4:')
patch('singularity_v3.4.0_remove_root_check.patch', level=0, when='@3.4.0:3.4.1')
# Go has novel ideas about how projects should be organized.
# We'll point GOPATH at the stage dir, and move the unpacked src
# tree into the proper subdir in our overridden do_stage below.
@property
def gopath(self):
return self.stage.path
@property
def sylabs_gopath_dir(self):
return join_path(self.gopath, 'src/github.com/sylabs/')
@property
def singularity_gopath_dir(self):
return join_path(self.sylabs_gopath_dir, 'singularity')
# Unpack the tarball as usual, then move the src dir into
# its home within GOPATH.
def do_stage(self, mirror_only=False):
super(Singularity, self).do_stage(mirror_only)
if not os.path.exists(self.singularity_gopath_dir):
# Move the expanded source to its destination
tty.debug("Moving {0} to {1}".format(
self.stage.source_path, self.singularity_gopath_dir))
shutil.move(self.stage.source_path, self.singularity_gopath_dir)
# The build process still needs access to the source path,
# so create a symlink.
force_symlink(self.singularity_gopath_dir, self.stage.source_path)
# MakefilePackage's stages use this via working_dir()
@property
def build_directory(self):
return self.singularity_gopath_dir
# Hijack the edit stage to run mconfig.
def edit(self, spec, prefix):
with working_dir(self.build_directory):
confstring = './mconfig --prefix=%s' % prefix
if '~suid' in spec:
confstring += ' --without-suid'
if '~network' in spec:
confstring += ' --without-network'
configure = Executable(confstring)
configure()
# Set these for use by MakefilePackage's default build/install methods.
build_targets = ['-C', 'builddir', 'parallel=False']
install_targets = ['install', '-C', 'builddir', 'parallel=False']
def setup_build_environment(self, env):
# Point GOPATH at the top of the staging dir for the build step.
env.prepend_path('GOPATH', self.gopath)
# `singularity` has a fixed path where it will look for
# mksquashfs. If it lives somewhere else you need to specify the
# full path in the config file. This bit uses filter_file to edit
# the config file, uncommenting and setting the mksquashfs path.
@run_after('install')
def fix_mksquashfs_path(self):
prefix = self.spec.prefix
squash_path = join_path(self.spec['squashfs'].prefix.bin, 'mksquashfs')
filter_file(r'^# mksquashfs path =',
'mksquashfs path = {0}'.format(squash_path),
join_path(prefix.etc, 'singularity', 'singularity.conf'))
#
# Assemble a script that fixes the ownership and permissions of several
# key files, install it, and tty.warn() the user.
# HEADSUP: https://github.com/spack/spack/pull/10412.
#
def perm_script(self):
return 'spack_perms_fix.sh'
def perm_script_tmpl(self):
return "{0}.j2".format(self.perm_script())
def perm_script_path(self):
return join_path(self.spec.prefix.bin, self.perm_script())
def _build_script(self, filename, variable_data):
with open(filename, 'w') as f:
env = spack.tengine.make_environment(dirs=self.package_dir)
t = env.get_template(self.perm_script_tmpl())
f.write(t.render(variable_data))
@run_after('install')
def build_perms_script(self):
if self.spec.satisfies('+suid'):
script = self.perm_script_path()
chown_files = ['libexec/singularity/bin/starter-suid',
'etc/singularity/singularity.conf',
'etc/singularity/capability.json',
'etc/singularity/ecl.toml']
setuid_files = ['libexec/singularity/bin/starter-suid']
self._build_script(script, {'prefix': self.spec.prefix,
'chown_files': chown_files,
'setuid_files': setuid_files})
chmod = which('chmod')
chmod('555', script)
# Until tty output works better from build steps, this ends up in
# the build log. See https://github.com/spack/spack/pull/10412.
@run_after('install')
def caveats(self):
if self.spec.satisfies('+suid'):
tty.warn("""
For full functionality, you'll need to chown and chmod some files
after installing the package. This has security implications.
For details, see:
https://sylabs.io/guides/2.6/admin-guide/security.html
https://sylabs.io/guides/3.2/admin-guide/admin_quickstart.html#singularity-security
We've installed a script that will make the necessary changes;
read through it and then execute it as root (e.g. via sudo).
The script is named:
{0}
""".format(self.perm_script_path()))
|
Capturing the essence of your family just as you are perfectly~imperfect…..
Servicing the Greater Geelong area ~ Geelong, Bellarine Peninsula, Surfcoast and beyond.
Want to know a little more about Rhonda, who she is and why she loves capturing families?
|
"""Represent TransXChange concepts, and generate a matrix timetable from
TransXChange documents
"""
import re
import xml.etree.cElementTree as ET
import calendar
import datetime
import ciso8601
import logging
from psycopg2.extras import DateRange as PDateRange
from django.utils.text import slugify
from django.utils.dateparse import parse_duration
from chardet.universaldetector import UniversalDetector
from titlecase import titlecase
logger = logging.getLogger(__name__)
NS = {
'txc': 'http://www.transxchange.org.uk/'
}
# A safe date, far from any daylight savings changes or leap seconds
DESCRIPTION_REGEX = re.compile(r'.+,([^ ].+)$')
WEEKDAYS = {day: i for i, day in enumerate(calendar.day_name)}
def sanitize_description_part(part):
"""Given an oddly formatted part like 'Bus Station bay 5,Blyth',
return a shorter, more normal version like 'Blyth'.
"""
sanitized_part = DESCRIPTION_REGEX.match(part.strip())
return sanitized_part.group(1) if sanitized_part is not None else part
def correct_description(description):
"""Given an description, return a version with any typos pedantically corrected."""
for old, new in (
('Stitians', 'Stithians'),
('Kings Lynn', "King's Lynn"),
('Wells - Next - The - Sea', 'Wells-next-the-Sea'),
('Wells next the Sea', 'Wells-next-the-Sea'),
('Baasingstoke', 'Basingstoke'),
('Liskerard', 'Liskeard'),
('Tauton', 'Taunton'),
('City Centre,st Stephens Street', 'Norwich'),
('Charlton Horethore', 'Charlton Horethorne'),
('Camleford', 'Camelford'),
('Greenstead Green', 'Greensted Green'),
('Tinagel', 'Tintagel'),
('Plymouh City Cerntre', 'Plymouth City Centre'),
('Winterbourn ', 'Winterbourne'),
('Exetedr', 'Exeter'),
('- ', ' - '),
(' -', ' - '),
(' ', ' '),
):
description = description.replace(old, new)
return description
class Stop:
"""A TransXChange StopPoint."""
stop = None
locality = None
def __init__(self, element):
if element:
self.atco_code = element.find('txc:StopPointRef', NS)
if self.atco_code is None:
self.atco_code = element.find('txc:AtcoCode', NS)
if self.atco_code is not None:
self.atco_code = self.atco_code.text or ''
self.common_name = element.find('txc:CommonName', NS)
self.locality = element.find('txc:LocalityName', NS)
if self.common_name is not None:
self.common_name = self.common_name.text
if self.locality is not None:
self.locality = self.locality.text
def __str__(self):
if not self.locality or self.locality in self.common_name:
return self.common_name or self.atco_code
return '%s %s' % (self.locality, self.common_name)
class JourneyPattern:
"""A collection of JourneyPatternSections, in order."""
def __init__(self, element, sections):
self.id = element.attrib.get('id')
# self.journeys = []
self.sections = [
sections[section_element.text]
for section_element in element.findall('txc:JourneyPatternSectionRefs', NS)
if section_element.text in sections
]
self.direction = element.find('txc:Direction', NS)
if self.direction is not None:
self.direction = self.direction.text
def get_timinglinks(self):
for section in self.sections:
for timinglink in section.timinglinks:
yield timinglink
class JourneyPatternSection:
"""A collection of JourneyPatternStopUsages, in order."""
def __init__(self, element, stops):
self.id = element.get('id')
self.timinglinks = [
JourneyPatternTimingLink(timinglink_element, stops)
for timinglink_element in element
]
class JourneyPatternStopUsage:
"""Either a 'From' or 'To' element in TransXChange."""
def __init__(self, element, stops):
self.activity = element.find('txc:Activity', NS)
if self.activity is not None:
self.activity = self.activity.text
self.sequencenumber = element.get('SequenceNumber')
if self.sequencenumber is not None:
self.sequencenumber = int(self.sequencenumber)
self.stop = stops.get(element.find('txc:StopPointRef', NS).text)
if self.stop is None:
self.stop = Stop(element)
self.timingstatus = element.find('txc:TimingStatus', NS)
if self.timingstatus is not None:
self.timingstatus = self.timingstatus.text
self.wait_time = element.find('txc:WaitTime', NS)
if self.wait_time is not None:
self.wait_time = parse_duration(self.wait_time.text)
if self.wait_time.total_seconds() > 10000:
# bad data detected
print(self.wait_time)
self.wait_time = None
self.row = None
self.parent = None
class JourneyPatternTimingLink:
def __init__(self, element, stops):
self.origin = JourneyPatternStopUsage(element.find('txc:From', NS), stops)
self.destination = JourneyPatternStopUsage(element.find('txc:To', NS), stops)
self.origin.parent = self.destination.parent = self
self.runtime = parse_duration(element.find('txc:RunTime', NS).text)
self.id = element.get('id')
def get_deadruns(journey_element):
"""Given a VehicleJourney element, return a tuple."""
start_element = journey_element.find('txc:StartDeadRun', NS)
end_element = journey_element.find('txc:EndDeadRun', NS)
return (get_deadrun_ref(start_element), get_deadrun_ref(end_element))
def get_deadrun_ref(deadrun_element):
"""Given a StartDeadRun or EndDeadRun element with a ShortWorking,
return the ID of a JourneyPetternTimingLink.
"""
if deadrun_element is not None:
element = deadrun_element.find('txc:ShortWorking/txc:JourneyPatternTimingLinkRef', NS)
if element is not None:
return element.text
# ignore PositioningLinks
class VehicleJourneyTimingLink:
def __init__(self, element):
self.id = element.attrib.get('id')
self.journeypatterntiminglinkref = element.find('txc:JourneyPatternTimingLinkRef', NS).text
self.run_time = element.find('txc:RunTime', NS)
if self.run_time is not None:
self.run_time = parse_duration(self.run_time.text)
self.from_wait_time = element.find('txc:From/txc:WaitTime', NS)
if self.from_wait_time is not None:
self.from_wait_time = parse_duration(self.from_wait_time.text)
self.to_wait_time = element.find('txc:To/txc:WaitTime', NS)
if self.to_wait_time is not None:
self.to_wait_time = parse_duration(self.to_wait_time.text)
class VehicleJourney:
"""A journey represents a scheduled journey that happens at most once per
day. A sort of "instance" of a JourneyPattern, made distinct by having its
own start time (and possibly operating profile and dead run).
"""
operating_profile = None
journey_pattern = None
journey_ref = None
def __str__(self):
return str(self.departure_time)
def __init__(self, element, services, serviced_organisations):
self.code = element.find('txc:VehicleJourneyCode', NS).text
self.private_code = element.find('txc:PrivateCode', NS)
if self.private_code is not None:
self.private_code = self.private_code.text
self.service_ref = element.find('txc:ServiceRef', NS).text
self.line_ref = element.find('txc:LineRef', NS).text
journeypatternref_element = element.find('txc:JourneyPatternRef', NS)
if journeypatternref_element is not None:
self.journey_pattern = services[self.service_ref].journey_patterns.get(journeypatternref_element.text)
else:
# Journey has no direct reference to a JourneyPattern.
# Instead, it has a reference to another journey...
self.journey_ref = element.find('txc:VehicleJourneyRef', NS).text
operatingprofile_element = element.find('txc:OperatingProfile', NS)
if operatingprofile_element is not None:
self.operating_profile = OperatingProfile(operatingprofile_element, serviced_organisations)
departure_time = datetime.datetime.strptime(
element.find('txc:DepartureTime', NS).text, '%H:%M:%S'
)
self.departure_time = datetime.timedelta(hours=departure_time.hour,
minutes=departure_time.minute,
seconds=departure_time.second)
self.start_deadrun, self.end_deadrun = get_deadruns(element)
self.operator = element.find('txc:OperatorRef', NS)
if self.operator is not None:
self.operator = self.operator.text
sequencenumber = element.get('SequenceNumber')
self.sequencenumber = sequencenumber and int(sequencenumber)
timing_links = element.findall('txc:VehicleJourneyTimingLink', NS)
self.timing_links = [VehicleJourneyTimingLink(timing_link) for timing_link in timing_links]
note_elements = element.findall('txc:Note', NS)
if note_elements is not None:
self.notes = {
note_element.find('txc:NoteCode', NS).text: note_element.find('txc:NoteText', NS).text
for note_element in note_elements
}
def get_timinglinks(self):
pattern_links = self.journey_pattern.get_timinglinks()
if self.timing_links:
timing_links = iter(self.timing_links)
journey_link = next(timing_links)
for link in pattern_links:
if link.id == journey_link.journeypatterntiminglinkref:
yield link, journey_link
try:
journey_link = next(timing_links)
except StopIteration:
pass
else:
yield link, None
else:
for link in pattern_links:
yield link, None
def get_times(self):
stopusage = None
time = self.departure_time
deadrun = self.start_deadrun is not None
deadrun_next = False
wait_time = None
for timinglink, journey_timinglink in self.get_timinglinks():
stopusage = timinglink.origin
if deadrun and self.start_deadrun == timinglink.id:
deadrun = False # end of dead run
if journey_timinglink and journey_timinglink.from_wait_time is not None:
wait_time = journey_timinglink.from_wait_time
else:
wait_time = stopusage.wait_time or wait_time
if wait_time:
next_time = time + wait_time
if not deadrun:
yield Cell(stopusage, time, next_time)
time = next_time
elif not deadrun:
yield Cell(stopusage, time, time)
if journey_timinglink and journey_timinglink.run_time is not None:
run_time = journey_timinglink.run_time
else:
run_time = timinglink.runtime
if run_time:
time += run_time
if deadrun_next:
deadrun = True
deadrun_next = False
elif self.end_deadrun == timinglink.id:
deadrun_next = True # start of dead run
stopusage = timinglink.destination
if journey_timinglink and journey_timinglink.to_wait_time is not None:
wait_time = journey_timinglink.to_wait_time
else:
wait_time = stopusage.wait_time
if not deadrun:
yield Cell(timinglink.destination, time, time)
class ServicedOrganisation:
def __init__(self, element):
self.code = element.find('txc:OrganisationCode', NS).text
self.name = element.find('txc:Name', NS)
if self.name is not None:
self.name = self.name.text
working_days_element = element.find('txc:WorkingDays', NS)
if working_days_element is not None:
self.working_days = [DateRange(e) for e in working_days_element]
else:
self.working_days = []
holidays_element = element.find('txc:Holidays', NS)
if holidays_element is not None:
self.holidays = [DateRange(e) for e in holidays_element]
else:
self.holidays = []
class ServicedOrganisationDayType:
def __init__(self, element, servicedorgs):
self.nonoperation_holidays = None
self.nonoperation_workingdays = None
self.operation_holidays = None
self.operation_workingdays = None
# Days of non-operation:
noop_element = element.find('txc:DaysOfNonOperation', NS)
if noop_element is not None:
noop_hols_element = noop_element.find('txc:Holidays/txc:ServicedOrganisationRef', NS)
noop_workingdays_element = noop_element.find('txc:WorkingDays/txc:ServicedOrganisationRef', NS)
if noop_hols_element is not None:
self.nonoperation_holidays = servicedorgs[noop_hols_element.text]
if noop_workingdays_element is not None:
self.nonoperation_workingdays = servicedorgs[noop_workingdays_element.text]
# Days of operation:
op_element = element.find('txc:DaysOfOperation', NS)
if op_element is not None:
op_hols_element = op_element.find('txc:Holidays/txc:ServicedOrganisationRef', NS)
op_workingdays_element = op_element.find('txc:WorkingDays/txc:ServicedOrganisationRef', NS)
if op_hols_element is not None:
self.operation_holidays = servicedorgs[op_hols_element.text]
if op_workingdays_element is not None:
self.operation_workingdays = servicedorgs[op_workingdays_element.text]
class DayOfWeek:
def __init__(self, day):
if isinstance(day, int):
self.day = day
else:
self.day = WEEKDAYS[day]
def __eq__(self, other):
if type(other) == int:
return self.day == other
return self.day == other.day
def __repr__(self):
return calendar.day_name[self.day]
class OperatingProfile:
servicedorganisation = None
nonoperation_days = ()
operation_days = ()
def __init__(self, element, servicedorgs):
element = element
week_days_element = element.find('txc:RegularDayType/txc:DaysOfWeek', NS)
self.regular_days = []
if week_days_element is not None:
for day in [e.tag[33:] for e in week_days_element]:
if 'To' in day:
day_range_bounds = [WEEKDAYS[i] for i in day.split('To')]
day_range = range(day_range_bounds[0], day_range_bounds[1] + 1)
self.regular_days += [DayOfWeek(i) for i in day_range]
elif day == 'Weekend':
self.regular_days += [DayOfWeek(5), DayOfWeek(6)]
elif day[:3] == 'Not':
print(day)
else:
self.regular_days.append(DayOfWeek(day))
# Special Days:
special_days_element = element.find('txc:SpecialDaysOperation', NS)
if special_days_element is not None:
nonoperation_days_element = special_days_element.find('txc:DaysOfNonOperation', NS)
if nonoperation_days_element is not None:
self.nonoperation_days = list(map(DateRange, nonoperation_days_element.findall('txc:DateRange', NS)))
operation_days_element = special_days_element.find('txc:DaysOfOperation', NS)
if operation_days_element is not None:
self.operation_days = list(map(DateRange, operation_days_element.findall('txc:DateRange', NS)))
# Serviced Organisation:
servicedorg_days_element = element.find('txc:ServicedOrganisationDayType', NS)
if servicedorg_days_element is not None:
self.servicedorganisation = ServicedOrganisationDayType(servicedorg_days_element, servicedorgs)
# Bank Holidays
bank_holidays_operation_element = element.find('txc:BankHolidayOperation/txc:DaysOfOperation', NS)
bank_holidays_nonoperation_element = element.find('txc:BankHolidayOperation/txc:DaysOfNonOperation', NS)
if bank_holidays_operation_element is not None:
self.operation_bank_holidays = [e.tag[33:] for e in bank_holidays_operation_element]
else:
self.operation_bank_holidays = []
if bank_holidays_nonoperation_element is not None:
self.nonoperation_bank_holidays = [e.tag[33:] for e in bank_holidays_nonoperation_element]
else:
self.nonoperation_bank_holidays = []
class DateRange:
def __init__(self, element):
self.start = ciso8601.parse_datetime(element.find('txc:StartDate', NS).text).date()
self.end = element.find('txc:EndDate', NS)
if self.end is not None:
self.end = self.end.text
if self.end:
self.end = ciso8601.parse_datetime(self.end).date()
def __str__(self):
if self.start == self.end:
return self.start.strftime('%-d %B %Y')
else:
return '%s to %s' % (self.start, self.end)
def contains(self, date):
return self.start <= date and (not self.end or self.end >= date)
def dates(self):
return PDateRange(self.start, self.end, '[]')
class OperatingPeriod(DateRange):
def __str__(self):
if self.start == self.end:
return self.start.strftime('on %-d %B %Y')
today = datetime.date.today()
if self.start > today:
if self.end and (self.end - self.start).days < 14:
start_format = '%-d'
if self.start.month != self.end.month:
start_format += ' %B'
if self.start.year != self.end.year:
start_format += ' %Y'
return 'from {} to {}'.format(
self.start.strftime(start_format),
self.end.strftime('%-d %B %Y')
)
return self.start.strftime('from %-d %B %Y')
# The end date is often bogus,
# but show it if the period seems short enough to be relevant
if self.end and (self.end - self.start).days < 7:
return self.end.strftime('until %-d %B %Y')
return ''
class Service:
description = None
description_parts = None
via = None
def set_description(self, description):
if description.isupper():
description = titlecase(description)
elif ' via ' in description and description[:description.find(' via ')].isupper():
parts = description.split(' via ')
parts[0] = titlecase(parts[0])
description = ' via '.join(parts)
self.description = correct_description(description)
self.via = None
if ' - ' in self.description:
parts = self.description.split(' - ')
elif ' to ' in self.description:
parts = self.description.split(' to ')
else:
parts = [self.description]
self.description_parts = [sanitize_description_part(part) for part in parts]
if ' via ' in self.description_parts[-1]:
self.description_parts[-1], self.via = self.description_parts[-1].split(' via ', 1)
def __init__(self, element, serviced_organisations, journey_pattern_sections):
self.element = element
mode_element = element.find('txc:Mode', NS)
if mode_element is not None:
self.mode = mode_element.text
else:
self.mode = ''
self.operator = element.find('txc:RegisteredOperatorRef', NS)
if self.operator is not None:
self.operator = self.operator.text
operatingprofile_element = element.find('txc:OperatingProfile', NS)
if operatingprofile_element is not None:
self.operating_profile = OperatingProfile(operatingprofile_element, serviced_organisations)
self.operating_period = OperatingPeriod(element.find('txc:OperatingPeriod', NS))
self.service_code = element.find('txc:ServiceCode', NS).text
description_element = element.find('txc:Description', NS)
if description_element is not None:
self.set_description(description_element.text)
self.origin = element.find('txc:StandardService/txc:Origin', NS).text
self.destination = element.find('txc:StandardService/txc:Destination', NS).text
self.vias = element.find('txc:StandardService/txc:Vias', NS)
if self.vias:
self.vias = [via.text for via in self.vias]
self.journey_patterns = {
journey_pattern.id: journey_pattern for journey_pattern in (
JourneyPattern(journey_pattern, journey_pattern_sections)
for journey_pattern in element.findall('txc:StandardService/txc:JourneyPattern', NS)
) if journey_pattern.sections
}
class TransXChange:
def get_journeys(self, service_code, line_id):
return [journey for journey in self.journeys
if journey.service_ref == service_code and journey.line_ref == line_id]
def __get_journeys(self, journeys_element, serviced_organisations):
journeys = {
journey.code: journey for journey in (
VehicleJourney(element, self.services, serviced_organisations)
for element in journeys_element
)
}
# Some Journeys do not have a direct reference to a JourneyPattern,
# but rather a reference to another Journey which has a reference to a JourneyPattern
for journey in iter(journeys.values()):
if journey.journey_ref and not journey.journey_pattern:
journey.journey_pattern = journeys[journey.journey_ref].journey_pattern
return [journey for journey in journeys.values() if journey.journey_pattern]
def __init__(self, open_file):
try:
detector = UniversalDetector()
for line in open_file:
detector.feed(line)
if detector.done:
break
detector.close()
encoding = detector.result['encoding']
if encoding == 'UTF-8-SIG':
encoding = 'utf-8'
parser = ET.XMLParser(encoding=encoding)
except TypeError:
parser = None
open_file.seek(0)
iterator = ET.iterparse(open_file, parser=parser)
self.services = {}
# element = None
serviced_organisations = None
journey_pattern_sections = {}
for _, element in iterator:
tag = element.tag[33:]
if tag == 'StopPoints':
stops = (Stop(stop) for stop in element)
self.stops = {stop.atco_code: stop for stop in stops}
element.clear()
elif tag == 'Routes':
# routes = {
# route.get('id'): route.find('txc:Description', NS).text
# for route in element
# }
element.clear()
elif tag == 'RouteSections':
element.clear()
elif tag == 'Operators':
self.operators = element
elif tag == 'JourneyPatternSections':
for section in element:
section = JourneyPatternSection(section, self.stops)
if section.timinglinks:
journey_pattern_sections[section.id] = section
element.clear()
elif tag == 'ServicedOrganisations':
serviced_organisations = (ServicedOrganisation(child) for child in element)
serviced_organisations = {
organisation.code: organisation for organisation in serviced_organisations
}
elif tag == 'VehicleJourneys':
try:
self.journeys = self.__get_journeys(element, serviced_organisations)
except (AttributeError, KeyError) as e:
logger.error(e, exc_info=True)
return
element.clear()
elif tag == 'Service':
service = Service(element, serviced_organisations, journey_pattern_sections)
self.services[service.service_code] = service
elif tag == 'Garages':
# print(ET.tostring(element).decode())
element.clear()
self.element = element
self.transxchange_date = max(
element.attrib['CreationDateTime'], element.attrib['ModificationDateTime']
)[:10]
class Cell:
last = False
def __init__(self, stopusage, arrival_time, departure_time):
self.stopusage = stopusage
self.arrival_time = arrival_time
self.departure_time = departure_time
self.wait_time = arrival_time and departure_time and arrival_time != departure_time
def stop_is_at(stop, text):
"""Whether a given slugified string, roughly matches either
this stop's locality's name, or this stop's name
(e.g. 'kings-lynn' matches 'kings-lynn-bus-station' and vice versa).
"""
if stop.locality:
name = slugify(stop.locality)
if name in text or text in name:
if name == text:
return 2
return 1
name = slugify(stop.common_name)
if text in name or name in text:
if name == text:
return 2
return 1
return False
class Grouping:
def __init__(self, parent, origin, destination):
self.description_parts = parent.description_parts
self.via = parent.via
self.origin = origin
self.destination = destination
def starts_at(self, text):
return stop_is_at(self.origin, text)
def ends_at(self, text):
return stop_is_at(self.destination, text)
def __str__(self):
parts = self.description_parts
if parts:
start = slugify(parts[0])
end = slugify(parts[-1])
same_score = self.starts_at(start) + self.ends_at(end)
reverse_score = self.starts_at(end) + self.ends_at(start)
if same_score > reverse_score or (reverse_score == 4 and same_score == 4):
description = ' - '.join(parts)
elif same_score < reverse_score:
description = ' - '.join(reversed(parts))
else:
description = None
if description:
if self.via:
description += ' via ' + self.via
return description
return ''
|
• 2018. Jul. 16. 2018. Jul. 3.
Research suggests that people put on an average of a kilogram or two during the holidays. Though it may not seem to be such a big deal, shedding that weight could actually take up to 4 or 5 months. Don’t worry about a thing; with our diet tips you’ll easily navigate the season’s dietary minefield!
Holiday is the time to loosen up a little bit, so you probably wouldn’t want to give up on a small amount of alcohol while on vacation. It’s useful to know that mixed drinks like vodka tonic are higher in calories than wine, for instance, but a light beer or champagne are also a better choice. Craving a cocktail? Opt for club soda instead of tonic water and save 80 calories!
Treating yourself with something before dinner can be a good idea surprisingly. If you want to take the edge off your hunger the right way, stay away from fried finger foods like egg rolls or French fries, and eat raw vegetables, nuts, shrimp cocktail, chicken satay or mozzarella & veggie skewers, for example.
We all love a good creamy dip, but they can pack as much as 100 calories per scoop. Fortunately there are healthy alternatives to crab and blue cheese: reach for hummus or salsa which only contain 25 and 4 calories per tablespoon.
Know, that roasting is one of the healthiest ways to prepare a meal, and it also brings out the natural flavour. Also keep in mind how cheese and creamy soups can turn a healthy vegetable dish into a calorie bomb.
It’s possible to end your meal on a sweet note without eating something unhealthy. Help yourself to fruit (it is lighter than most desserts), or turn a few pages back to our “Have Fun” section where we list our favourite confectionaries where sugar-free delicacies are available!
|
import sys
from flask import Flask, jsonify
import srvlookup
app = Flask(__name__)
mapping = {
"10.0.4.70": "52.89.82.119",
"10.0.4.63": "52.89.78.120",
"10.0.4.66": "52.88.33.238",
"10.0.4.71": "52.89.79.28",
"10.0.4.64": "52.89.79.55",
"10.0.4.68": "52.89.76.194",
"10.0.4.69": "52.89.78.250",
"10.0.4.72": "52.89.88.228",
"10.0.4.67": "52.89.77.0",
"10.0.4.65": "52.89.80.185"
}
def lookup_backends(name):
try:
return srvlookup.lookup(name, 'tcp', 'marathon.mesos')
except e:
print >> sys.stderr, e
return []
@app.route('/')
def index():
return "hit /<app_name> to get the host and port"
@app.route('/<name>')
def read(name=""):
results = []
print >> sys.stderr, "looking up %s" % name
for backend in lookup_backends(name):
endpoint = "%s:%s" % (mapping[backend.host], backend.port)
print >> sys.stderr, "got backend %s" % endpoint
results.append(endpoint)
return jsonify({"results":results})
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True, port=8080)
|
Don't see exactly what you are looking for? Give us a call and let us help you find it.
You can save money on every Subaru model purchased at Gregoris Subaru in Valley Stream. Our friendly salespeople strive to provide the highest levels of customer service and satisfaction, making us an excellent choice for a new Subaru in the Manhattan area. We have a car lot full of exciting new 2019 Subaru vehicles, including popular models such as the Forester, Outback, Impreza and Legacy. With so many new cars and SUVs to choose from including the new 2019 Subaru Ascent, there's something for everyone from the Manhattan NY, Bayside NY and Hempstead NY areas at our new Subaru and used car dealership.
To begin your new car search, use the Power Search tools above to filter your search by year, make, model and body style. When you've found the new Subaru that suits your unique needs, pay our Valley Stream dealership a visit to chat with our auto financing department about an auto lease or loan.
Gregoris Subaru in Valley Stream is a great place to start your search for a new vehicle in the Manhattan area! If you are not in the market for a new Subaru, you can always browse our pre-owned inventory for great deals on used cars, trucks and SUVs. Our conveniently located Valley Stream used car dealership is stocked with high-quality used vehicles. To experience them in person, visit us at 555 & 575 West Merrick Road, or give us a call.
And of course, our service does not stop after you drive a new Subaru or used car off our lot. We proudly service and repair any Subaru that we sell, as well as used cars. Our commitment to keeping your car going mile after mile is a big reason why Gregoris Subaru is the place to go for car service in Valley Stream. Visit us with any of your auto maintenance needs to find out why, or give us a call to ask about our top-quality car repair experts.
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules engine for firewall rules."""
from builtins import object
import itertools
import threading
from collections import namedtuple
from google.cloud.forseti.common.gcp_type import firewall_rule
from google.cloud.forseti.common.gcp_type import resource as resource_mod
from google.cloud.forseti.common.gcp_type import resource_util
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import relationship
from google.cloud.forseti.scanner.audit import base_rules_engine as bre
from google.cloud.forseti.scanner.audit import rules as scanner_rules
LOGGER = logger.get_logger(__name__)
class Error(Exception):
"""Base error class for the module."""
class DuplicateFirewallRuleError(Error):
"""Raised if a rule id is reused in the rule definitions, must be unique."""
class DuplicateFirewallGroupError(Error):
"""Raised if group id is reused in the group definitions, must be unique."""
class RuleDoesntExistError(Error):
"""Raised if a rule group tries to add a rule that doesn't exist."""
class GroupDoesntExistError(Error):
"""Raised if an org policy tries to add a group that doesn't exist."""
class InvalidRuleDefinition(Error):
"""Raised if a rule definition is invalid."""
class InvalidGroupDefinition(Error):
"""Raised if a group definition is invalid."""
class InvalidOrgDefinition(Error):
"""Raised if a org definition is invalid."""
class FirewallRulesEngine(bre.BaseRulesEngine):
"""Rules engine for firewall resources."""
def __init__(self, rules_file_path, snapshot_timestamp=None):
"""Initialize.
Args:
rules_file_path (str): File location of rules.
snapshot_timestamp (str): The snapshot to work with.
"""
super(FirewallRulesEngine, self).__init__(
rules_file_path=rules_file_path,
snapshot_timestamp=snapshot_timestamp)
self._repository_lock = threading.RLock()
self.rule_book = None
def build_rule_book(self, global_configs):
"""Build RuleBook from the rule definition file.
Args:
global_configs (dict): Global configurations.
"""
del global_configs # unused.
with self._repository_lock:
rule_file_dict = self._load_rule_definitions()
rule_defs = rule_file_dict.get('rules', [])
group_defs = rule_file_dict.get('rule_groups', [])
org_policy = rule_file_dict.get('org_policy', [])
self.rule_book = RuleBook(
rule_defs=rule_defs,
group_defs=group_defs,
org_policy=org_policy,
snapshot_timestamp=self.snapshot_timestamp)
def find_violations(self, resource, policy, force_rebuild=False):
"""Determine whether policy violates rules.
Args:
resource (Resource): The resource that the policy belongs to.
policy (dict): The policy to compare against the rules.
force_rebuild (bool): If True, rebuilds the rule book.
This will reload the rules definition file and add the rules to the
book.
Returns:
list: A list of the rule violations.
"""
if self.rule_book is None or force_rebuild:
self.build_rule_book(self.full_rules_path)
violations = self.rule_book.find_violations(resource, policy)
return list(violations)
class RuleBook(bre.BaseRuleBook):
"""The RuleBook for firewall auditing.
Rules from the rules definition file are parsed and then the hierarchy and
enforcement points are parsed. Rules then are assessed at the first
applicable point in the ancestory tree that has rules.
Sample org structure:
org 1234
/ \
f-1 p-c
/ \
p-a p-b
Rules can be applied at any node above. When a policy is being audited,
it the rulebook will start at the lowest level (the project) and will
walk up the hierarchy until it reaches the first instance with rules and
these are the only rules that are checked.
"""
def __init__(self,
rule_defs=None,
snapshot_timestamp=None,
group_defs=None,
org_policy=None):
"""Initialize.
Args:
rule_defs (list): The parsed list of dictionary rules from the YAML
definition file.
snapshot_timestamp (str): The snapshot to work with.
group_defs (list): The parsed list of dictionary group ids to rules.
org_policy (dict): The parsed org policy configuration.
"""
super(RuleBook, self).__init__()
self.rule_indices = {}
self.rules_map = {}
self.rule_groups_map = {}
self.org_policy_rules_map = {}
self.snapshot_timestamp = snapshot_timestamp or None
self._repository_lock = threading.RLock()
if rule_defs:
self.add_rules(rule_defs)
if group_defs:
self.add_rule_groups(group_defs)
if org_policy:
self.add_org_policy(org_policy)
def add_rules(self, rule_defs):
"""Adds rules to rule book.
Args:
rule_defs (list): Rule definition dictionaries from yaml config file.
Raises:
InvalidRuleDefinition: If the rule is missing required fields or the
fields have invalid values.
"""
with self._repository_lock:
for i, rule_def in enumerate(rule_defs):
if rule_def is not None:
self.add_rule(rule_def, i)
def add_rule(self, rule_def, rule_index):
"""Adds a rule to the rule book.
Args:
rule_def (Rule): A Rule used to check for violations.
rule_index (int): Used for logs.
Raises:
DuplicateFirewallRuleError: When the rule by the same name exists.
"""
rule = Rule.from_config(rule_def)
if rule.id in self.rules_map:
raise DuplicateFirewallRuleError(
'Rule id "%s" already in rules (rule %s)' % (
rule.id, rule_index))
self.rule_indices[rule.id] = rule_index
self.rules_map[rule.id] = rule
def add_rule_groups(self, group_defs):
"""Creates group to rule matching.
Args:
group_defs (dict): A dictionary with a group id and a list of rule ids
that will be included by including this group in a policy.
Raises:
DuplicateFirewallGroupError: Raised if the group id already exists.
RuleDoesntExistError: Raised if a rule included in the group does not
exist.
InvalidGroupDefinition: Raised if a group definition is invalid.
"""
for group_def in group_defs:
group_id = group_def.get('group_id')
if not group_id:
raise InvalidGroupDefinition('Group requires a group id')
if group_id in self.rule_groups_map:
raise DuplicateFirewallGroupError(
'Group id already exists: %s' % group_id)
rule_ids = group_def.get('rule_ids')
if not rule_ids:
raise InvalidGroupDefinition(
'Group "%s" does not have any rules' % group_id)
for rule_id in rule_ids:
if rule_id not in self.rules_map:
raise RuleDoesntExistError(
'Rule id "%s" does not exist, cannot be in group' %
rule_id)
self.rule_groups_map[group_id] = rule_ids
def add_org_policy(self, org_def):
"""Creates org policy and rule mapping.
Sample org structure:
org 1234
/ \
f-1 p-c
/ \
p-a p-b
Rules can be applied at any node above. When a policy is being audited,
it the rulebook will start at the lowest level (the project) and will
walk up the hierarchy until it reaches the first instance with rules and
these are the only rules that are checked.
Args:
org_def (dict): A dictionary of resource ids and enforced rules.
Raises:
RuleDoesntExistError: Raised if a rule included in the group does not
exist.
GroupDoesntExistError: Raised if a group included in an org policy
does not exist.
InvalidOrgDefinition: Raised if org policy doesn't have resources.
"""
resources = org_def.get('resources', [])
if not resources:
raise InvalidOrgDefinition('Org policy does not have any resources')
for resource in resources:
resource_type = resource_mod.ResourceType.verify(
resource.get('type'))
ids = resource.get('resource_ids', [])
rules = resource.get('rules', {})
groups = rules.get('group_ids', [])
expanded_rules = set()
for group_id in groups:
if group_id not in self.rule_groups_map:
raise GroupDoesntExistError(
'Group "%s" does not exist' % group_id)
expanded_group = self.rule_groups_map.get(group_id, [])
expanded_rules.update(expanded_group)
for rule_id in rules.get('rule_ids', []):
if rule_id not in self.rules_map:
raise RuleDoesntExistError(
'Rule id "%s" does not exist' % rule_id)
expanded_rules.add(rule_id)
for resource_id in ids:
gcp_resource = resource_util.create_resource(
resource_id=resource_id,
resource_type=resource_type)
self.org_policy_rules_map[gcp_resource] = sorted(expanded_rules)
def find_violations(self, resource, policies):
"""Find policy binding violations in the rule book.
Args:
resource (Resource): The GCP resource associated with the
policy binding.
This is where we start looking for rule violations and
we move up the resource hierarchy (if permitted by the
resource's "inherit_from_parents" property).
policies(list): A list of FirewallRule policies.
Returns:
iterable: A generator of the rule violations.
"""
violations = itertools.chain()
resource_ancestors = (
relationship.find_ancestors(resource, policies[0].full_name))
for curr_resource in resource_ancestors:
if curr_resource in self.org_policy_rules_map:
org_policy_rules = self.org_policy_rules_map.get(
curr_resource, [])
for rule_id in org_policy_rules:
rule = self.rules_map[rule_id]
violations = itertools.chain(
violations,
rule.find_violations(policies))
break # Only the first rules found in the ancestry are applied
return violations
class Rule(object):
"""Rule properties from the firewall rules definitions file.
Also finds violations.
"""
VALID_RULE_MODES = frozenset([
scanner_rules.RuleMode.WHITELIST,
scanner_rules.RuleMode.BLACKLIST,
scanner_rules.RuleMode.REQUIRED,
scanner_rules.RuleMode.MATCHES,
])
def __init__(self,
rule_id=None,
match_policies=None,
verify_policies=None,
mode=scanner_rules.RuleMode.WHITELIST,
exact_match=True):
"""Initialize.
Args:
rule_id (str): The id of the rule.
match_policies (list): A list of policy dictionaries.
verify_policies (list): A list of policy dictionaries.
mode (RuleMode): The RuleMode for this rule.
exact_match (bool): Whether to exactly match required rules.
"""
self.id = rule_id
self._match_policies = match_policies
self._match_rules = None
self._exact_match = exact_match
self.mode = mode
self._verify_policies = verify_policies
self._verify_rules = None
def __hash__(self):
"""Makes a hash of the rule id.
Returns:
int: The hash of the rule id.
"""
return hash(self.id)
@classmethod
def from_config(cls, rule_def):
"""Creates a Rule from a config file.
Args:
rule_def (dict): A dictionary rule definition parsed from YAML config.
Returns:
Rule: A rule created from the rule definition.
Raises:
InvalidRuleDefinition: If rule is missing required fields.
"""
rule_id = rule_def.get('rule_id')
if not rule_id:
raise InvalidRuleDefinition('Rule requires rule_id')
mode = rule_def.get('mode')
if not mode:
raise InvalidRuleDefinition('Rule requires mode')
mode = mode.lower()
if mode not in cls.VALID_RULE_MODES:
raise InvalidRuleDefinition('Mode %s is not in valid modes: %s'
% (mode, cls.VALID_RULE_MODES))
match_policies = rule_def.get('match_policies', [])
verify_policies = rule_def.get('verify_policies', [])
if mode in ['whitelist', 'blacklist']:
if not match_policies or not verify_policies:
raise InvalidRuleDefinition(
'Whitelist and blacklist rules require match and verify '
'policies')
if mode in ['required', 'matches']:
if not match_policies:
raise InvalidRuleDefinition(
'Required and matches rules require match policies')
if verify_policies:
raise InvalidRuleDefinition(
'Required and matches rules cannot have verify policies')
return Rule(
rule_id=rule_id,
match_policies=match_policies,
verify_policies=verify_policies,
mode=mode,
exact_match=rule_def.get('exact_match', True),
)
@staticmethod
def create_rules(policies, validate=False):
"""Creates FirewallRules from policies.
Args:
policies (list): A list of policy dictionaries.
validate (bool): Whether to validate that this is a valid firewall
rule (one that can be passed to the API).
Returns:
list: A list of FirewallRule.
"""
match_rules = []
for policy in policies:
rule = firewall_rule.FirewallRule.from_dict(
policy, validate=validate)
match_rules.append(rule)
return match_rules
@property
def match_rules(self):
"""The FirewallRules used to filter policies.
Returns:
list: A list of FirewallRule.
"""
if not self._match_rules:
validate = self.mode in {
scanner_rules.RuleMode.REQUIRED,
scanner_rules.RuleMode.MATCHES
}
self._match_rules = self.create_rules(
self._match_policies, validate=validate)
return self._match_rules
@property
def verify_rules(self):
"""The FirewallRules used to check policies.
Returns:
list: A list of FirewallRule.
"""
if not self._verify_rules:
self._verify_rules = self.create_rules(self._verify_policies)
return self._verify_rules
def find_violations(self, firewall_policies):
"""Finds policy violations in a list of firewall policies.
Args:
firewall_policies (list): A list of FirewallRule.
Returns:
iterable: A generator of RuleViolations.
"""
if self.mode == scanner_rules.RuleMode.MATCHES:
violations = self._yield_match_violations(firewall_policies)
elif self.mode == scanner_rules.RuleMode.REQUIRED:
violations = self._yield_required_violations(firewall_policies)
elif self.mode == scanner_rules.RuleMode.WHITELIST:
violations = self._yield_whitelist_violations(firewall_policies)
elif self.mode == scanner_rules.RuleMode.BLACKLIST:
violations = self._yield_blacklist_violations(firewall_policies)
return violations
def _yield_match_violations(self, firewall_policies):
"""Finds policies that don't match the required policy.
Args:
firewall_policies (list): A list of FirewallRules to check.
Yields:
iterable: A generator of RuleViolations.
"""
inserts = set([])
deletes = set([])
for i, rule in enumerate(self.match_rules):
if is_rule_exists_violation(rule, firewall_policies,
self._exact_match):
inserts.add('%s: rule %s' % (self.id, i))
for policy in firewall_policies:
if is_rule_exists_violation(policy, self.match_rules,
self._exact_match):
deletes.add(policy.name)
updates = inserts & deletes
inserts, deletes = (inserts - updates, deletes - updates)
if inserts or deletes or updates:
yield self._create_violation(
firewall_policies, 'FIREWALL_MATCHES_VIOLATION',
recommended_actions={
'INSERT_FIREWALL_RULES': sorted(inserts),
'DELETE_FIREWALL_RULES': sorted(deletes),
'UPDATE_FIREWALL_RULES': sorted(updates),
})
def _yield_required_violations(self, firewall_policies):
"""Finds missing policies that are required.
Args:
firewall_policies (list): A list of FirewallRules to check.
Yields:
iterable: A generator of RuleViolations.
"""
for i, rule in enumerate(self.match_rules):
if is_rule_exists_violation(rule, firewall_policies,
self._exact_match):
yield self._create_violation(
firewall_policies, 'FIREWALL_REQUIRED_VIOLATION',
recommended_actions={
'INSERT_FIREWALL_RULES': [
'%s: rule %s' % (self.id, i)
],
})
def _yield_whitelist_violations(self, firewall_policies):
"""Finds policies that aren't whitelisted.
Args:
firewall_policies (list): A list of FirewallRules to check.
Yields:
iterable: A generator of RuleViolations.
"""
for policy in firewall_policies:
if not any([policy > rule for rule in self.match_rules]):
continue
if is_whitelist_violation(self.verify_rules, policy):
yield self._create_violation(
[policy], 'FIREWALL_WHITELIST_VIOLATION',
recommended_actions={
'DELETE_FIREWALL_RULES': [policy.name],
})
def _yield_blacklist_violations(self, firewall_policies):
"""Finds blacklisted policies.
Args:
firewall_policies (list): A list of FirewallRules to check.
Yields:
iterable: A generator of RuleViolations.
"""
for policy in firewall_policies:
if not any([policy > rule for rule in self.match_rules]):
continue
if is_blacklist_violation(self.verify_rules, policy):
yield self._create_violation(
[policy], 'FIREWALL_BLACKLIST_VIOLATION',
recommended_actions={
'DELETE_FIREWALL_RULES': [policy.name],
})
def _create_violation(self, policies, violation_type,
recommended_actions=None):
"""Creates a RuleViolation.
Args:
policies (list): A list of FirewallRule that violate the policy.
violation_type (str): The type of violation.
recommended_actions (list): The list of actions to take.
Returns:
RuleViolation: A RuleViolation for the given policies.
Raises:
ValueError: If no policies are passed in.
"""
if not policies:
raise ValueError('No policies in violation')
inventory_data = []
for policy in policies:
inventory_data.append(policy.as_json())
return RuleViolation(
resource_name=','.join([p.name for p in policies]),
resource_type=resource_mod.ResourceType.FIREWALL_RULE,
resource_id=policies[0].project_id,
full_name=policies[0].full_name,
rule_id=self.id,
violation_type=violation_type,
policy_names=[p.name for p in policies],
recommended_actions=recommended_actions,
resource_data=inventory_data
)
# Rule violation.
# resource_type: string
# resource_id: string
# rule_name: string
# violation_type: FIREWALL_VIOLATION
# policy_names: string
# recommeded_action: string
RuleViolation = namedtuple('RuleViolation',
['resource_type', 'resource_id', 'full_name',
'rule_id', 'violation_type', 'policy_names',
'recommended_actions', 'resource_data',
'resource_name'])
def is_whitelist_violation(rules, policy):
"""Checks if the policy is not a subset of those allowed by the rules.
Args:
rules (list): A list of FirewallRule that the policy must be a subset of.
policy (FirweallRule): A FirewallRule.
Returns:
bool: If the policy is a subset of one of the allowed rules or not.
"""
policy_subset_check = []
for rule in rules:
if policy < rule:
policy_subset_check.append(True)
else:
policy_subset_check.append(False)
result = not any(policy_subset_check)
return result
def is_blacklist_violation(rules, policy):
"""Checks if the policy is a superset of any not allowed by the rules.
Args:
rules (list): A list of FirewallRule that the policy must be a subset of.
policy (FirweallRule): A FirewallRule.
Returns:
bool: If the policy is a superset of one of the blacklisted rules or not.
"""
policy_superset_check = []
for rule in rules:
if policy > rule:
policy_superset_check.append(True)
else:
policy_superset_check.append(False)
result = any(policy_superset_check)
return result
def is_rule_exists_violation(rule, policies, exact_match=True):
"""Checks if the rule is the same as one of the policies.
Args:
rule (FirweallRule): A FirewallRule.
policies (list): A list of FirewallRule that must have the rule.
exact_match (bool): Whether to match the rule exactly.
Returns:
bool: If the required rule is in the policies.
"""
if exact_match:
result = []
for policy in policies:
if policy == rule:
result.append(True)
else:
result.append(False)
final_result = not any(result)
return final_result
result = []
for policy in policies:
if policy.is_equilvalent(rule):
result.append(True)
else:
result.append(False)
final_result = not any(result)
return final_result
|
Members of the band Alister wearing chucks.
1 Allister Allister members sit together and pose for a band picture.
2 Allister Allister and some of their ex members take a picture together.
3 Allister Tim, Scott, Kyle, and Mike run together on a street.
4 Allister Tim, Scott, Kyle, and Mike sit on a bench.
5 Allister The band Allister changes their minivan's tire.
6 Allister An Allister member makes a phone call.
7 Allister The band members sit in a recordng studio bouncing ideas off each other.
Casual and posed photos of the band Alister.
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from zaqar.i18n import _
queue_pipeline = cfg.ListOpt(
'queue_pipeline', default=[],
help=_('Pipeline to use for processing queue operations. This pipeline '
'will be consumed before calling the storage driver\'s controller '
'methods.'))
message_pipeline = cfg.ListOpt(
'message_pipeline', default=[],
help=_('Pipeline to use for processing message operations. This pipeline '
'will be consumed before calling the storage driver\'s controller '
'methods.'))
claim_pipeline = cfg.ListOpt(
'claim_pipeline', default=[],
help=_('Pipeline to use for processing claim operations. This pipeline '
'will be consumed before calling the storage driver\'s controller '
'methods.'))
subscription_pipeline = cfg.ListOpt(
'subscription_pipeline', default=[],
help=_('Pipeline to use for processing subscription operations. This '
'pipeline will be consumed before calling the storage driver\'s '
'controller methods.'))
topic_pipeline = cfg.ListOpt(
'topic_pipeline', default=[],
help=_('Pipeline to use for processing topic operations. This '
'pipeline will be consumed before calling the storage driver\'s '
'controller methods.'))
GROUP_NAME = 'storage'
ALL_OPTS = [
queue_pipeline,
message_pipeline,
claim_pipeline,
subscription_pipeline,
topic_pipeline
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS}
|
Laos-Craft-Industry-Weave-Loom-Free-Image-Hand-Lab-9380.jpg is totally free to download without any copyright so you can use it privately or commercially without any attribution however a link back to this post is appreciated. The uploader to PixCove has waived all rights to this image, so is free to use anywhere under Creative Commons Deed CC0.
To download Laos-Craft-Industry-Weave-Loom-Free-Image-Hand-Lab-9380.jpg right mouse click on the above image and choose “Save Image as….” from the menu and save it to your desktop.
Inside WPG navigate to your desktop and double click on Laos-Craft-Industry-Weave-Loom-Free-Image-Hand-Lab-9380.jpg then click “Auto adjust” in the menu.
Wrench-Tug-Craft-Industry-Spanners-Spanner-Vanadiu-4739.jpg is totally free to download without any copyright so you can use it privately or commercially without any attribution however a link back to this post is appreciated. The uploader to PixCove has waived all rights to this image, so is free to use anywhere under Creative Commons Deed CC0.
To download Wrench-Tug-Craft-Industry-Spanners-Spanner-Vanadiu-4739.jpg right mouse click on the above image and choose “Save Image as….” from the menu and save it to your desktop.
Inside WPG navigate to your desktop and double click on Wrench-Tug-Craft-Industry-Spanners-Spanner-Vanadiu-4739.jpg then click “Auto adjust” in the menu.
Valve-Regulator-Craft-Industry-Close-Open-Wheel-Fr-0158.jpg is totally free to download without any copyright so you can use it privately or commercially without any attribution however a link back to this post is appreciated. The uploader to PixCove has waived all rights to this image, so is free to use anywhere under Creative Commons Deed CC0.
To download Valve-Regulator-Craft-Industry-Close-Open-Wheel-Fr-0158.jpg right mouse click on the above image and choose “Save Image as….” from the menu and save it to your desktop.
Inside WPG navigate to your desktop and double click on Valve-Regulator-Craft-Industry-Close-Open-Wheel-Fr-0158.jpg then click “Auto adjust” in the menu.
Tools-Gears-Craft-Industry-Nail-Hammer-Builder-Fre-9762.jpg is totally free to download without any copyright so you can use it privately or commercially without any attribution however a link back to this post is appreciated. The uploader to PixCove has waived all rights to this image, so is free to use anywhere under Creative Commons Deed CC0.
To download Tools-Gears-Craft-Industry-Nail-Hammer-Builder-Fre-9762.jpg right mouse click on the above image and choose “Save Image as….” from the menu and save it to your desktop.
Inside WPG navigate to your desktop and double click on Tools-Gears-Craft-Industry-Nail-Hammer-Builder-Fre-9762.jpg then click “Auto adjust” in the menu.
|
# -*- coding: utf-8 -*-
# Copyright 2008-2015 Canonical
# Copyright 2015-2018 Chicharreros (https://launchpad.net/~chicharreros)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/magicicada-server
"""Test volume operations."""
from magicicadaprotocol import request
from magicicadaprotocol.volumes import RootVolume, UDFVolume, ShareVolume
from twisted.internet import defer
from magicicada.filesync.models import Share
from magicicada.filesync.services import get_storage_user
from magicicada.server.testing.testcase import TestWithDatabase
class TestListVolumes(TestWithDatabase):
"""Test list_volumes command."""
def test_root_only(self):
"""Users have one volume by default: root."""
@defer.inlineCallbacks
def auth(client):
"""Authenticate and test."""
yield client.dummy_authenticate("open sesame")
root_node_id = yield client.get_root()
req = yield client.list_volumes()
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), root_node_id)
self.assertEqual(root.generation, 0)
self.assertEqual(root.free_bytes, self.usr0.free_bytes)
return self.callback_test(auth, add_default_callbacks=True)
def test_root_only_with_generation(self):
"""Test that the Root volume gets it generation."""
@defer.inlineCallbacks
def auth(client):
"""Authenticate and test."""
# create a file in order to get a generation > 0
self.usr0.root.make_file(u"filename_1")
yield client.dummy_authenticate("open sesame")
root_node_id = yield client.get_root()
req = yield client.list_volumes()
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), root_node_id)
self.assertEqual(root.generation, 1)
self.assertEqual(root.free_bytes, self.usr0.free_bytes)
return self.callback_test(auth, add_default_callbacks=True)
def test_one_share_offered(self):
"""Offered shares are not shown in volumes."""
def check(req):
"""Check volumes response."""
# root should be here only
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), self._state.root)
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda r: client.get_root())
d.addCallback(self.save_req, "root")
d.addCallback(lambda r: client.create_share(r, self.usr1.username,
u"n1", Share.VIEW))
d.addCallback(lambda _: client.list_volumes())
d.addCallback(check)
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
def _create_share(self, _, accept=False, dead=False, from_id=None):
"""Create the share to me."""
if from_id is None:
from_id = self.usr1.id
fromusr = get_storage_user(from_id)
node = fromusr.root.load()
share = node.share(self.usr0.id, u"name", readonly=True)
self._state.subtree_id = node.id
if accept:
self.usr0.get_share(share.id).accept()
self._state.share_id = share.id
if dead:
share.delete()
return share
def test_share_to_me_no_accept(self):
"""A share offered to me should not be in the list if not accepted."""
def check(req):
"""Check volumes response."""
# root should be here only
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), self._state.root)
def auth(client):
"""Authenticate and run the test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda r: client.get_root())
d.addCallback(self.save_req, "root")
# create the share
d.addCallback(self._create_share)
# list the volumes and check
d.addCallback(lambda _: client.list_volumes())
d.addCallback(check)
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
def test_share_to_me_accepted(self):
"""A share offered to me should be in the volumes list if accepted."""
@defer.inlineCallbacks
def auth(client):
"""Authenticate and run the test."""
yield client.dummy_authenticate("open sesame")
client_root = yield client.get_root()
# create the share
_share = self._create_share(client_root, accept=True)
# create a file in order to get a generation > 0
self.usr0.root.make_file(u"filename_1")
# list the volumes and check
req = yield client.list_volumes()
# check volumes response.
self.assertEqual(len(req.volumes), 2)
# test root
root = [v for v in req.volumes if isinstance(v, RootVolume)][0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), client_root)
self.assertEqual(root.generation, 1)
self.assertEqual(root.free_bytes, self.usr0.free_bytes)
# test share
share = [v for v in req.volumes if isinstance(v, ShareVolume)][0]
self.assertEqual(share.volume_id, _share.id)
self.assertEqual(share.node_id, _share.root_id)
self.assertEqual(share.direction, "to_me")
self.assertEqual(share.share_name, "name")
self.assertEqual(share.other_username, self.usr1.username)
self.assertEqual(share.accepted, True)
self.assertEqual(share.access_level, Share.VIEW)
self.assertEqual(share.free_bytes, self.usr1.free_bytes)
return self.callback_test(auth, add_default_callbacks=True)
def test_share_to_me_accepted_with_generation(self):
"""A share offered to me should be in the volumes list if accepted."""
@defer.inlineCallbacks
def auth(client):
"""Authenticate and run the test."""
yield client.dummy_authenticate("open sesame")
client_root = yield client.get_root()
# create the share
_share = self._create_share(client_root, accept=True)
# increae the generation of the share
self.usr1.root.make_file(u"filename_1")
# create a file in order to get a generation > 0
self.usr0.root.make_file(u"filename_1")
# list the volumes and check
req = yield client.list_volumes()
# check volumes response.
self.assertEqual(len(req.volumes), 2)
# test root
root = [v for v in req.volumes if isinstance(v, RootVolume)][0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), client_root)
self.assertEqual(root.generation, 1)
self.assertEqual(root.free_bytes, self.usr0.free_bytes)
# test share
share = [v for v in req.volumes if isinstance(v, ShareVolume)][0]
self.assertEqual(share.volume_id, _share.id)
self.assertEqual(share.node_id, _share.root_id)
self.assertEqual(share.direction, "to_me")
self.assertEqual(share.share_name, "name")
self.assertEqual(share.other_username, self.usr1.username)
self.assertEqual(share.accepted, True)
self.assertEqual(share.access_level, Share.VIEW)
self.assertEqual(share.free_bytes, self.usr1.free_bytes)
self.assertEqual(share.generation, 1)
return self.callback_test(auth, add_default_callbacks=True)
def test_udf(self):
"""An UDF should be in the volume list."""
@defer.inlineCallbacks
def auth(client):
"""Authenticate and test."""
# increase the generation in the root
self.usr0.root.make_file(u"filename_1")
yield client.dummy_authenticate("open sesame")
client_root = yield client.get_root()
# create the udf
client_udf = yield client.create_udf(u"~/ñ", u"foo")
# increase the generation in the udf
self.usr0.volume(client_udf.volume_id).root.make_file(u"file_1")
# list the volumes and check
req = yield client.list_volumes()
# check
self.assertEqual(len(req.volumes), 2)
# test root
root = [v for v in req.volumes if isinstance(v, RootVolume)][0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), client_root)
self.assertEqual(root.generation, 1)
self.assertEqual(root.free_bytes, self.usr0.free_bytes)
# test udf
udf = [v for v in req.volumes if isinstance(v, UDFVolume)][0]
self.assertEqual(str(udf.volume_id), client_udf.volume_id)
self.assertEqual(str(udf.node_id), client_udf.node_id)
self.assertEqual(udf.suggested_path, u"~/ñ/foo")
self.assertEqual(udf.generation, 1)
self.assertEqual(udf.free_bytes, self.usr0.free_bytes)
return self.callback_test(auth, add_default_callbacks=True)
def test_shares_to_me_accepted_dead(self):
"""A dead share offered to me should not be in the list."""
def check(req):
"""Check volumes response."""
# root should be here only
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), self._state.root)
def auth(client):
"""Authenticate and run the test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda r: client.get_root())
d.addCallback(self.save_req, "root")
# create the share
d.addCallback(self._create_share, accept=True, dead=True)
# list the volumes and check
d.addCallback(lambda _: client.list_volumes())
d.addCallback(check)
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
def test_udf_dead(self):
"""A dead UDF should not be in the volume list."""
def check(req):
"""Check volumes response."""
# root should be here only
self.assertEqual(len(req.volumes), 1)
root = req.volumes[0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), self._state.root)
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda r: client.get_root())
d.addCallback(self.save_req, "root")
# create the udf
d.addCallback(lambda _: client.create_udf(u"~/ñ", u"foo"))
d.addCallback(lambda r: client.delete_volume(r.volume_id))
# list the volumes and check
d.addCallback(lambda _: client.list_volumes())
d.addCallback(check)
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
def test_mixed(self):
"""Mix of UDFs and shares, dead and alive."""
def check(req):
"""Check volumes response."""
self.assertEqual(len(req.volumes), 3)
# test root
root = [v for v in req.volumes if isinstance(v, RootVolume)][0]
self.assertEqual(root.volume_id, None)
self.assertEqual(str(root.node_id), self._state.root)
# test udf
udf = [v for v in req.volumes if isinstance(v, UDFVolume)][0]
self.assertEqual(str(udf.volume_id), self._state.udf.volume_id)
self.assertEqual(str(udf.node_id), self._state.udf.node_id)
self.assertEqual(udf.suggested_path, u"~/ñ/foo")
self.assertEqual(udf.free_bytes, self.usr0.free_bytes)
# test share
share = [v for v in req.volumes if isinstance(v, ShareVolume)][0]
self.assertEqual(share.volume_id, self._state.share_id)
self.assertEqual(share.node_id, self._state.subtree_id)
self.assertEqual(share.direction, "to_me")
self.assertEqual(share.share_name, "name")
self.assertEqual(share.other_username, self.usr2.username)
self.assertEqual(share.accepted, True)
self.assertEqual(share.access_level, Share.VIEW)
self.assertEqual(share.free_bytes, self.usr1.free_bytes)
@defer.inlineCallbacks
def auth(client):
"""Authenticate and test."""
client.dummy_authenticate("open sesame")
root = yield client.get_root()
self.save_req(root, "root")
# create two udfs, kill one
udf = yield client.create_udf(u"~/ñ", u"foo")
self.save_req(udf, "udf")
result = yield client.create_udf(u"~/moño", u"groovy")
yield client.delete_volume(result.volume_id)
# create two shares, one dead (the second one should be the live
# one because the helper function stores data for comparison)
self._create_share(None, accept=True, dead=True)
self._create_share(None, accept=True, from_id=self.usr2.id)
# list the volumes and check
req = yield client.list_volumes()
check(req)
return self.callback_test(auth, add_default_callbacks=True)
class TestDataWithVolumes(TestWithDatabase):
"""Tests data handling in the context of several volumes."""
def test_same_names(self):
"""Be able to have same names in different roots."""
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda _: client.get_root())
# create a subdir in root
d.addCallback(lambda root: client.make_dir(request.ROOT,
root, "subdir"))
# create the udf, with a dir of same name
d.addCallback(lambda _: client.create_udf(u"~", u"myudf"))
d.addCallback(lambda r: client.make_dir(r.volume_id,
r.node_id, "subdir"))
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
def test_unlink_same_path(self):
"""Unlink with similar paths, should work ok."""
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda _: client.get_root())
# create a subdir in root
d.addCallback(lambda root: client.make_dir(request.ROOT,
root, "tdir1"))
d.addCallback(self.save_req, "dir_del")
# create the udf, with two subdirs
d.addCallback(lambda _: client.create_udf(u"~", u"myudf"))
d.addCallback(self.save_req, "udf")
d.addCallback(lambda r: client.make_dir(r.volume_id,
r.node_id, "tdir1"))
d.addCallback(lambda r: client.make_dir(self._state.udf.volume_id,
r.new_id, "tdir2"))
# delete one dir in one volume
d.addCallback(lambda _: client.unlink(request.ROOT,
self._state.dir_del.new_id))
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
class TestVolumesBasic(TestWithDatabase):
"""Test basic operations on volumes."""
def test_delete_root(self):
"""Test deletion of root volume."""
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda _: client.delete_volume(request.ROOT))
def check(failure):
"""Checks the error returned."""
self.assertIsInstance(failure.value,
request.StorageRequestError)
self.assertEqual(str(failure.value), 'NO_PERMISSION')
client.test_done(True)
d.addCallbacks(client.test_fail, check)
return self.callback_test(auth)
def test_delete_bad_volume_id(self):
"""Test deletion of bad volume id."""
def auth(client):
"""Authenticate and test."""
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda _: client.delete_volume('foo bar'))
def check(failure):
"""Checks the error returned."""
self.assertIsInstance(failure.value,
request.StorageRequestError)
self.assertEqual(str(failure.value), 'DOES_NOT_EXIST')
client.test_done(True)
d.addCallbacks(client.test_fail, check)
return self.callback_test(auth)
|
Privelio from Evolis is a system capable of instantly issuing credit and debit cards from pre-printed or blank cards.
The Evolis Mosaic SDK and the embedded PC card allow secure communication with existing information systems.
From complete printing of cards in color to monochrome personalization of pre-printed cards, Evolis solutions produce a high-resolution graphic finish in just a couple of seconds.
Privelio provides dedicated encoding features for full payment card personalization (magnetic stripe, EMV contact and contactless chips).
Personalization ribbons, blank card stocks and rejected cards are all secure through the electromechanical locking system (with single or double access control) of Privelio.
Removes the need for destructing printing ribbons, no sensitive data can be compromised, event in case of theft attempt of the printing ribbons and facilitates their disposal.
The built-in PC card of Privelio systems makes the flow of card personalization data inaccessible to ill-intentioned individuals.
The bezel for manual single insertion allows you to satisfy specific demands and can offer to your customers having a personalized card design.
The Evolis Mosaic server allows Privelio to be managed on a network without requiring the installation of a computer next to the system.
Upgraded with the XT embossing module (as an option), Privelio can generate embossed characters and indenting on cards depending on the finish that you require.
Learn more about Privelio XT system.
Evolis Privelio system is fully customizable, simple to install and to operate. Financial institutions now have the solution that require less training for their staff.
Evolis printers have already been qualified and integrated by many independent instant issuance software providers. Compatible with all conventional smartcards, our solutions can interface easily with your systems thanks to the SDKs provided.
|
# Copyright (c) Byron Galbraith and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from unlock.state.state import UnlockState, TrialState
class FacialEmgDiagnosticState(UnlockState):
Up = 'UP'
Down = 'Down'
Left = 'Left'
Right = 'Right'
Selection = 'Selection'
def __init__(self, timer, classifier):
super(FacialEmgDiagnosticState, self).__init__()
self.timer = timer
self.classifier = classifier
self.text = ''
def start(self):
self.timer.begin_timer()
self.classifier.reset()
self.state = True
self.text = 'Detecting'
def stop(self):
self.timer.reset()
self.state = False
def process_command(self, command):
if command.keyboard_selection:
self.start()
return
if not self.state:
return
self.timer.update_timer(command.delta)
if command.decision is not None:
self.handle_decision(command.decision)
elif command.selection is not None:
self.text = FacialEmgDiagnosticState.Selection
if self.timer.is_complete():
self.stop()
def handle_decision(self, decision):
if decision == FacialEMGDetector.UpDecision:
self.text = FacialEmgDiagnosticState.Up
elif decision == FacialEMGDetector.DownDecision:
self.text = FacialEmgDiagnosticState.Down
elif decision == FacialEMGDetector.LeftDecision:
self.text = FacialEmgDiagnosticState.Left
elif decision == FacialEMGDetector.RightDecision:
self.text = FacialEmgDiagnosticState.Right
class VepDiagnosticState(UnlockState):
"""
The diagnostic model supports two modes of operation: continuous and
discrete. In the continuous mode, the stimuli is always flickering and the
scope is always updating. In the discrete mode, the stimulus presents
itself for a fixed amount of time, then the scope and/or decoder metrics
are printed. The discrete mode trials are triggered by a selection event
e.g. space bar press.
"""
FrequencyUp = 1
FrequencyDown = 2
ChannelDown = 3
ChannelUp = 4
def __init__(self, scope, stimuli, decoders, frequencies):
super(VepDiagnosticState, self).__init__()
self.scope = scope
self.stimuli = stimuli
self.frequencies = frequencies
self.cursor = 0
rate = 1 / (self.frequencies[self.cursor] * 2)
self.stimuli.model.stimuli[0].time_state.set_duration(rate)
self.decoders = decoders
if decoders is None:
self.decoders = list()
for decoder in self.decoders:
# this should be pushed into the decoder as an object reference
# so changing it doesn't require a push-style update list this
decoder.target_label = self.cursor
self.trial_count = 0
self.feedback_change = False
self.feedback_results = list()
def trial_start(self):
print ("TRIAL START ")
self.stimuli.model.start()
for decoder in self.decoders:
decoder.start()
self.feedback_change = True
self.feedback_results = list()
def trial_stop(self):
print ("TRIAL STop")
self.stimuli.model.stop()
for decoder in self.decoders:
decoder.stop()
self.feedback_change = True
def process_command(self, command):
raise Exception("Base class")
return None
def handle_decision(self, decision):
print ("HANDLE DECISION")
if decision == DiagnosticState.FrequencyUp:
self.cursor += 1
if self.cursor >= len(self.frequencies):
self.cursor = len(self.frequencies) - 1
rate = 1 / (self.frequencies[self.cursor] * 2)
self.stimuli.model.stimuli[0].time_state.set_duration(rate)
for decoder in self.decoders:
decoder.target_label = self.cursor
elif decision == DiagnosticState.FrequencyDown:
self.cursor -= 1
if self.cursor < 0:
self.cursor = 0
rate = 1 / (self.frequencies[self.cursor] * 2)
self.stimuli.model.stimuli[0].time_state.set_duration(rate)
for decoder in self.decoders:
decoder.target_label = self.cursor
elif decision == DiagnosticState.ChannelDown:
if self.scope is not None:
self.scope.model.change_display_channel(-1)
elif decision == DiagnosticState.ChannelUp:
if self.scope is not None:
self.scope.model.change_display_channel(1)
def update_decoders(self, command):
print ("UPDATE DECODERS")
for decoder in self.decoders:
result = decoder.classify(command)
if result is not None:
self.feedback_results.append(result)
def get_state(self):
print("GET STATE")
if self.feedback_change:
text = ','.join(self.feedback_results)
if text != '':
text = '[%.1f Hz] - %s' % (self.frequencies[self.cursor], text)
self.feedback_change = False
return True, text
else:
return False, ''
class ContinuousVepDiagnosticState(VepDiagnosticState):
def __init__(self, scope, stimuli, frequencies, decoders):
super(ContinuousVepDiagnosticState, self).__init__(scope, stimuli, decoders, frequencies)
def process_command(self, command):
"""
For continuous usage, allow changes to the scope and stimuli
frequencies at any event. The stimuli can also be started and stopped
by the user.
"""
if command.selection:
if self.stimuli.model.state.is_stopped():
self.trial_start()
else:
self.trial_stop()
if command.decision is not None:
self.handle_decision(command.decision)
self.update_decoders(command)
#return True
class DiscreteVepDiagnosticState(VepDiagnosticState):
def __init__(self, scope, stimuli, decoders, frequencies):
super(DiscreteVepDiagnosticState, self).__init__(scope, stimuli, decoders, frequencies)
def process_command(self, command):
"""
For discrete usage, only allow changes when a trial is not underway.
Handle the transition between trial and output.
"""
print("PROCESS COMMAND")
if not self.stimuli.model.state.is_stopped():
print("HACK STIMULLI state is stopped")
if self.trial_count == 0:
print(" trial count == 0")
# this is a hack to get around the current setup where the
# stimuli starts immediately
self.trial_stop()
elif self.stimuli.model.state.last_change == TrialState.TrialExpiry:
print (" Trial expiry ")
# there is an occasional delay apparently that can happen when
# using actual devices which causes this state to be missed
# i.e. it goes to rest, then the next rest state, resulting in
# an Unchanged response, before this check happens. A better
# method would preserve the value until it was queried.
self.trial_stop()
self.update_decoders(command)
else:
print (" ELSE UPDATE DECODERS ")
self.update_decoders(command)
return #True
if command.selection:
print ("Command selection ")
self.trial_count += 1
self.trial_start()
if command.decision is not None:
print("Command . decision not none")
self.handle_decision(command.decision)
#return True
|
When will be Goodnight, Beantown next episode air date? Is Goodnight, Beantown renewed or cancelled? Where to countdown Goodnight, Beantown air dates? Is Goodnight, Beantown worth watching?
Matt Cassidy and Jennifer Barnes are reluctantly paired to anchor the news at a fictional TV station in Boston, Massachusetts, due to the sudden ratings drop.
EpisoDate.com is your TV show guide to Countdown Goodnight, Beantown Episode Air Dates and to stay in touch with Goodnight, Beantown next episode Air Date and your others favorite TV Shows. Add the shows you like to a "Watchlist" and let the site take it from there.
|
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.dtypes import dtypes
class DummyDtype(dtypes.ExtensionDtype):
pass
class DummyArray(ExtensionArray):
def __init__(self, data):
self.data = data
def __array__(self, dtype):
return self.data
@property
def dtype(self):
return DummyDtype()
def astype(self, dtype, copy=True):
# we don't support anything but a single dtype
if isinstance(dtype, DummyDtype):
if copy:
return type(self)(self.data)
return self
return np.array(self, dtype=dtype, copy=copy)
class TestExtensionArrayDtype(object):
@pytest.mark.parametrize('values', [
pd.Categorical([]),
pd.Categorical([]).dtype,
pd.Series(pd.Categorical([])),
DummyDtype(),
DummyArray(np.array([1, 2])),
])
def test_is_extension_array_dtype(self, values):
assert is_extension_array_dtype(values)
@pytest.mark.parametrize('values', [
np.array([]),
pd.Series(np.array([])),
])
def test_is_not_extension_array_dtype(self, values):
assert not is_extension_array_dtype(values)
def test_astype():
arr = DummyArray(np.array([1, 2, 3]))
expected = np.array([1, 2, 3], dtype=object)
result = arr.astype(object)
tm.assert_numpy_array_equal(result, expected)
result = arr.astype('object')
tm.assert_numpy_array_equal(result, expected)
def test_astype_no_copy():
arr = DummyArray(np.array([1, 2, 3], dtype=np.int64))
result = arr.astype(arr.dtype, copy=False)
assert arr is result
result = arr.astype(arr.dtype)
assert arr is not result
@pytest.mark.parametrize('dtype', [
dtypes.DatetimeTZDtype('ns', 'US/Central'),
])
def test_is_not_extension_array_dtype(dtype):
assert not isinstance(dtype, dtypes.ExtensionDtype)
assert not is_extension_array_dtype(dtype)
@pytest.mark.parametrize('dtype', [
dtypes.CategoricalDtype(),
dtypes.IntervalDtype(),
])
def test_is_extension_array_dtype(dtype):
assert isinstance(dtype, dtypes.ExtensionDtype)
assert is_extension_array_dtype(dtype)
|
I'm a little confused fellas. I check to see what brand my RAM was and noticed it was Hynix. Now can i buy Hynix RAM at local electronic stores or is it a 3rd party brand that creates RAM for bigger brands(Kingston, Crucial, etc.)? Thanks all.
Hynix is a Korean Ram company that recently had a 57% tariff placed on them by the US. Most stores probably wont sell it because it's too expensive. From what I'v heard the company was close to bankruptcy. They may be out of business by now. Good luck anyway, check ebay.
|
from dolfin import *
import triangulate as trig
from itertools import *
def mesh_creator(triangles):
#flatten triangle list
print "triangles",triangles[0:1]
points = trig.flat(triangles)
print "points",points[0:3]
#create mesh and editor
mesh = Mesh()
editor = MeshEditor()
editor.open(mesh,"triangle",2,2)
point_ids = {}
#put points into hashtable,add them as vertices
for point in points:
try:
point_ids[tuple(point)] = 0
except:
print point,type(point)
raise
print len(points),len(point_ids)
#Init Points, now that we know how many
#editor.initCells(len(triangles))
editor.init_cells(len(triangles))
#editor.initVertices(len(point_ids))
editor.init_vertices(len(point_ids))
for point,id in izip(point_ids,count()):
point_ids[point] = id
editor.add_vertex(id,*point)
#editor.addVertex(id,*point)
#now add cells
for tri,id in izip(triangles,count()):
tri_id = map(lambda p: point_ids[tuple(p)],tri)
editor.add_cell(id,*tri_id)
editor.close()
print "Mesh triangles:points",len(triangles),":",len(mesh.coordinates())
return mesh
|
I-Lipo Fat Reduction, Lose Inches in 20 minutes! Buy 5 Get 1 Free!
I-Lipo is a new revolutionary system that uses low level lasers for smoothing cellulite, fat reduction, and body shaping treatments. The i-lipo uses photobiomodulation to stimulate the body’s natural process for releasing stored content in the adipose cells.
Every day, the human body is storing excess calories from your diet in the adipose tissue. The brain is in control of when the contents are released, and then metabolized into energy. You can compare the adipose cells to a re-chargeable battery, sometimes used and sometimes stored up depending on diet and exercise. I-Lipo is used to trigger the release of the contents, without exercise, and is able to target the precise stubborn areas.
I-Lipo is non-invasive with no pain, no needles, and no downtime. Patients can expect results of losing 1-2 dress sizes during their course of treatments. I-Lipo is best paired with with a change in lifestyle to maintain your results, typically just simple diet and exercise.
The low level laser used in I-Lipo creates a photobiomodulation effect, altering the cells chemistry and triggering a release of a lipase enzyme that can break down triglyceride molecules into fatty acids and glycerol molecules. This process now makes them small enough to exit through your prose and be used as fuel by your metabolism. Naturally, your lymphatic system carries the cell contents and removes them. I-Lipo maximizes the body’s lymphatic flow through stimulation of the nodes in your treatment area. It is recommended to exercise following treatment in order to metabolize the newly released fatty acids and glycerol in order to completely remove them from your body’s fat storage.
The I-Lipo treatment is quick and simple, being completed in less than 30 minutes.
The course of treatment should be approx 8 sessions, performed twice a week so body reshaping can be complete in four weeks. Only 1 area is treated per day in order to maximize the burning off of the contents. If there is an excess of content in need of being burned off then there is a risk that it will be stored back as fat again. Once a patient has completed a course of treatments they need to maintain a healthy lifestyle to keep their results. Nutritional supplements would also be beneficial with the I-Lipo treatments to maintain and encourage metabolic activity and to support a healthier lifestyle.
|
"""
Converts a .mat file to the appropriate JSON format
"""
import scipy.io as sio
import os
import json
def convert_data_set(lfile):
"""
Converts a .mat file to the appropriate JSON format
"""
directory = os.path.dirname(os.path.abspath(__file__))
try:
data = sio.loadmat(lfile)
except:
raise Exception(("Could not load file, check check that %s exists in"+
"the directory %s.") % (lfile,directory))
ouput = {
'info':'%s - %s' %(data['__header__'],data['data'][0][0][10][0]),
'dimensions':{
'numberOfAnchors': data['data'][0][0][0].shape[0],
'numberOfMeasurements':data['data'][0][0][0].shape[1]
},
'ranges':data['data'][0][0][0].tolist(),
'acceleration': None,
'rates': None,
'thrust': None,
'torques': None,
'times': None
}
try:
# Saves data in the JSON format in the filename in which it was loaded
path = os.path.join(directory, lfile[:-4] + '.json')
with open(path, 'w') as outfile:
json.dump(ouput, outfile , separators=(',', ':'), sort_keys=True, indent=4)
except:
raise Exception('Could not save the file')
if __name__ == "__main__":
loadFileName = 'data4.mat'
convertDataSet(loadFileName)
|
Home Heater Home Heater 300 Cheap Charlie Shop Heater Shop Heater 300 FGB Step Top FGB Bayview IGB Bayview Classic Insert. Quiet Running Maintenance Free No Oiling Needed. FASCO Factory Replacement C Frame Motor. Umbrella Motor Cooling Fin Extends Motor Life.
DIY Installer Friendly 6 Screws Quick Change Out Design. Balanced 12 Fin Steel Impeller 1.0 Amp 115v 60Hz. 6 Mounting Flange with High Heat Vibration Free Gasket. This an aftermarket manufacture by NBK.
The item "DANSON GLOW BOY Pellet Stove Combustion Exhaust Fan Blower Kit. KS5020-1040" is in sale since Sunday, May 28, 2017. This item is in the category "Home & Garden\Home Improvement\Heating, Cooling & Air\Fireplaces & Stoves\Replacement Parts". The seller is "24_7partsinc" and is located in New York, New York. This item can be shipped to United States, Canada, United Kingdom, Denmark, Romania, Slovakia, Bulgaria, Czech republic, Finland, Hungary, Latvia, Lithuania, Malta, Estonia, Australia, Greece, Portugal, Cyprus, Slovenia, Japan, China, Sweden, South Korea, Indonesia, Taiwan, South africa, Thailand, Belgium, France, Hong Kong, Ireland, Netherlands, Poland, Spain, Italy, Germany, Austria, Israel, Mexico, New Zealand, Philippines, Singapore, Switzerland, Norway, Saudi arabia, Ukraine, United arab emirates, Qatar, Kuwait, Bahrain, Croatia, Malaysia, Brazil, Chile, Colombia, Costa rica, Dominican republic, Panama, Trinidad and tobago, Guatemala, El salvador, Honduras, Jamaica.
|
import sys
from config import *
from connection import connect
def deploy():
conn = connect()
URL_SETTING = f"https://raw.githubusercontent.com/PnX-SI/GeoNature/{GN_VERSION}/install/install_all/install_all.ini"
URL_SCRIPT = f"https://raw.githubusercontent.com/PnX-SI/GeoNature/{GN_VERSION}/install/install_all/install_all.sh"
conn.run(f"wget {URL_SETTING}")
conn.run(f"wget {URL_SCRIPT}")
# sed the settings.ini
conn.run(f"sed -i 's/my_url=.*$/my_url={DOMAIN}/g' install_all.ini")
conn.run(f"sed -i 's/geonature_release=.*$/geonature_release={GN_VERSION}/g' install_all.ini")
conn.run(f"sed -i 's/install_default_dem=.*$/install_default_dem=false/g' install_all.ini")
conn.run(f"sed -i 's/drop_geonaturedb=.*$/drop_geonaturedb={DROP_DB}/g' install_all.ini")
conn.run("touch install_all.log")
conn.run("chmod +x install_all.sh")
conn.run("./install_all.sh 2>&1 | tee install_all.log")
def clean():
conn = connect()
conn.run("sudo rm -r geonature taxhub usershub install_all.*")
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Pass 'deploy' or 'clean' argument to the script")
elif len(sys.argv) > 1:
arg1 = sys.argv[1]
if arg1 == "deploy":
deploy()
elif arg1 == "clean":
clean()
else:
print("Pass 'deploy' or 'clean' argument to the script")
|
Are there any tricks to printing the CNT structure on a 3D Printer? We have a Replicator 2x and the design fails very early in the printing process.
Hi CMC3DL! The CNT should be printable. Why the Replicator 2x fails? If it starts the printing, it means that the model is right. Maybe the ABS doesn't stick very well. Have you had the same problem before?
|
import json
"""
JSON RPC Python class
Follows the JSON RPC 2.0 Spec (http://www.jsonrpc.org/specification)
This class can either be instantiated with a JSON encoded string or used as
a utility helper class
"""
#===============================================================================
# Error Type
#===============================================================================
class JsonRpc_Error(RuntimeError):
code = None
message = None
data = None
def __init__(self, **rpc_dict):
RuntimeError.__init__(self)
self.id = rpc_dict.get('id', None)
if 'error' in rpc_dict:
error = rpc_dict.get('error', {})
self.code = error.get('code', None)
self.message = error.get('message', None)
def __str__(self):
return repr(str(self.message))
def export(self):
return {'id': self.id,
'error': {'code': self.code, 'message': self.message}}
class JsonRpc_ParseError(JsonRpc_Error):
code = -32700
message = 'Invalid JSON was received by the server.'
class JsonRpc_InvalidRequest(JsonRpc_Error):
code = -32600
message = 'The JSON sent is not a valid Request object.'
class JsonRpc_MethodNotFound(JsonRpc_Error):
code = -32601
message = 'The method does not exist / is not available.'
class JsonRpc_InvalidParams(JsonRpc_Error):
code = -32602
message = 'Invalid method parameter(s).'
class JsonRpc_InternalError(JsonRpc_Error):
code = -32603
message = 'Internal JSON-RPC error.'
class JsonRpc_ServerException(JsonRpc_Error):
code = -32000
message = 'An unhandled server exception occurred'
JsonRpcErrors = { -32700: JsonRpc_ParseError,
-32600: JsonRpc_InvalidRequest,
-32601: JsonRpc_MethodNotFound,
-32602: JsonRpc_InvalidParams,
-32603: JsonRpc_InternalError,
-32000: JsonRpc_ServerException }
# -32000 to -32099 are reserved server-errors
#===============================================================================
# Request Type
#===============================================================================
class JsonRpc_Request(object):
def __init__(self, **rpc_dict):
self.id = rpc_dict.get('id', None)
self.method = rpc_dict.get('method', '')
self.params = rpc_dict.get('params', [])
self.kwargs = rpc_dict.get('kwargs', {})
def getID(self):
return self.id
def getMethod(self):
return self.method
def export(self):
# Slight modification of the JSON RPC 2.0 specification to allow
# both positional and named parameters
# Adds kwargs variable to object only when both are present
out = {'id': self.id, 'method': self.method }
if len(self.params) > 0:
out['params'] = self.params
if len(self.kwargs) > 0:
out['kwargs'] = self.kwargs
elif len(self.params) == 0:
out['params'] = self.kwargs
return out
def call(self, target):
# Invoke target method with stored arguments
# Don't attempt to catch exceptions here, let them bubble up
if type(self.params) == dict and len(self.kwargs) == 0:
# Only keyword parameters
return target(**self.params)
else:
return target(*self.params, **self.kwargs)
#===============================================================================
# Response Type
#===============================================================================
class JsonRpc_Response(object):
def __init__(self, **rpc_dict):
self.id = rpc_dict.get('id', None)
self.result = rpc_dict.get('result', None)
def getID(self):
return self.id
def getResult(self):
return self.result
def export(self):
ret = {'id': self.id,
'result': self.result}
return ret
#===============================================================================
# JSON RPC Handlers
#===============================================================================
class JsonRpcPacket(object):
def __init__(self, str_req=None):
self.requests = []
self.responses = []
self.errors = []
if str_req is not None:
try:
req = json.loads(str_req)
if type(req) == list:
# Batch request
for sub_req in req:
try:
self._parseJsonObject(sub_req)
except:
self.errors.append(JsonRpc_InvalidRequest())
if len(req) == 0:
self.errors.append(JsonRpc_InvalidRequest())
elif type(req) == dict:
# Single request
self._parseJsonObject(req)
else:
self.errors.append(JsonRpc_ParseError())
except:
# No JSON object could be decoded
self.errors.append(JsonRpc_ParseError())
def _parseJsonObject(self, rpc_dict):
"""
Takes a dictionary and determines if it is an RPC request or response
"""
if rpc_dict.get('jsonrpc') == '2.0':
if 'method' in rpc_dict.keys() and type(rpc_dict.get('method')) is unicode:
# Request object
self.requests.append(JsonRpc_Request(**rpc_dict))
elif 'id' in rpc_dict.keys() and 'result' in rpc_dict.keys():
# Result response object
self.responses.append(JsonRpc_Response(**rpc_dict))
elif 'id' in rpc_dict.keys() and 'error' in rpc_dict.keys():
# Error response object
error_code = rpc_dict['error'].get('code', -32700)
err_obj = JsonRpcErrors.get(error_code, JsonRpc_ParseError)
self.errors.append(err_obj(**rpc_dict))
else:
self.errors.append(JsonRpc_InvalidRequest(**rpc_dict))
else:
self.errors.append(JsonRpc_InvalidRequest())
def addRequest(self, id, method, *args, **kwargs):
self.requests.append(JsonRpc_Request(id=id,
method=method,
params=args,
kwargs=kwargs))
def clearRequests(self):
self.requests = []
def getRequests(self):
return self.requests
def addResponse(self, id, result):
self.responses.append(JsonRpc_Response(id=id,
result=result))
def clearResponses(self):
self.responses = []
def getResponses(self):
return self.responses
def addError_InvalidParams(self, id):
if id is not None:
self.errors.append(JsonRpc_InvalidParams(id=id))
def addError_ServerException(self, id, msg=None):
if id is not None:
self.errors.append(JsonRpc_ServerException(id=id,
message=msg))
def addError_MethodNotFound(self, id):
if id is not None:
self.errors.append(JsonRpc_MethodNotFound(id=id))
def getErrors(self):
return self.errors
def export(self):
ret = []
for rpc_obj in self.requests + self.responses + self.errors:
rpc_dict = rpc_obj.export()
rpc_dict['jsonrpc'] = '2.0'
ret.append(rpc_dict)
if len(ret) == 1:
return str(json.dumps(ret[0]))
elif len(ret) > 1:
return str(json.dumps(ret))
else:
return ''
|
In a nod to the Thai capital taking its rightful place on the international culinary stage with the launch of the first Bangkok edition of the Michelin Guide last year, a stellar line up of thirteen chefs from twelve Michelin-starred restaurants are set to take over one of the city’s most exclusive addresses. From 3rd – 9th September 2018, acclaimed chefs from nine countries – from Korea to Japan and from the United States to Portugal – will present an international culinary feast at the 19th annual World Gourmet Festival at Anantara Siam Bangkok Hotel. This year, Anantara Siam Bangkok Hotel’s 19th World Gourmet Festival is presented in collaboration with Sanpellegrino and Gastronauts Asia, and sponsored by Citibank N.A., Mercedes Benz (Thailand) Ltd., Tourism Authority of Thailand, Turkish Airlines, Bangkok 101, DestinAsian, Prestige Thailand and HELLO! Thailand.
|
import urllib2
import json
import StringIO
def build_oncotator_search_string(
chromosome_number,
chromosome_start_pos,
chromosome_end_pos,
ref_allele,
var_allele
):
search_string = '{0}_{1}_{2}_{3}_{4}'.format(
chromosome_number,
chromosome_start_pos,
chromosome_end_pos,
ref_allele,
var_allele
)
return search_string
def retrieve_oncotator_mutation_data_as_json(
chromosome_number=None,
chromosome_start_pos=None,
chromosome_end_pos=None,
ref_allele=None,
var_allele=None,
search_string=None
):
"""
Parameters
----------
chromosome_number: int
chromosome_start_pos: int
chromosome_end_pos: int
ref_allele: str
var_allele: str
Returns
-------
oncotator_data: dict
"""
if search_string is None:
search_string = build_oncotator_search_string(
chromosome_number,
chromosome_start_pos,
chromosome_end_pos,
ref_allele,
var_allele
)
page = retrieve_oncotator_mutation_data(search_string)
return json.load(StringIO.StringIO(page))
def retrieve_oncotator_mutation_data(search_string_query, maxreadlength=100000000):
base_url = 'http://www.broadinstitute.org/oncotator/mutation/{0}/'
url_request_string = base_url.format(search_string_query)
response = urllib2.urlopen(url_request_string)
page = response.read(maxreadlength)
return page
|
[SailfishDevel] How to show the Connection Dialog ?
Previous message: [SailfishDevel] How to show the Connection Dialog ?
Next message: [SailfishDevel] How to show the Connection Dialog ?
at least over dbus with e.g.
Subject: [SailfishDevel] How to show the Connection Dialog ?
I've been making an application which connects to the internet in order to parse the json code and get the informations to show on the screen.
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code transformation exceptions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.autograph.pyct import origin_info
class FrameInfo(
collections.namedtuple(
'FrameInfo',
('filename', 'lineno', 'function_name', 'code', 'converted'))):
pass
def _stack_trace_inside_mapped_code(tb, source_map):
"""Summarizes inner traceback frames up to the call to a given function.
This functions locates the innermost (i.e. most recent) frame that corresponds
to code that can be mapped by source_map originated from, and returns a
translated stack trace ending at that frame. If no such frame is found, the
entire stack trace is summarized.
For example, the following code:
def f():
for i in tf.range(1):
z = y + i # z only defined here
Would generate this traceback:
<converted code>
ag__.for_stmt(...)
<for_stmt>
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
<_known_len_tf_for_stmt>
_disallow_undefs_into_loop(*init_state)
<_disallow_undefs_into_loop>
raise ...
Which is then processed into:
<f>
for i in tf.range(1):
<for_stmt>
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
<_known_len_tf_for_stmt>
_disallow_undefs_into_loop(*init_state)
<_disallow_undefs_into_loop>
raise ...
Args:
tb: List[Tuple], the traceback corresponding to an error; typically,
the output of traceback.extract_tb.
source_map: Dict[LineLocation, OriginInfo], a source map as created by
origin_info.create_source_map.
Returns:
List[FrameInfo]
"""
result_frames = []
for filename, line_number, function_name, text in reversed(tb):
loc = origin_info.LineLocation(filename=filename, lineno=line_number)
if loc in source_map:
origin = source_map[loc]
origin_frame_info = FrameInfo(
filename=origin.loc.filename,
lineno=origin.loc.lineno,
function_name=origin.function_name,
code=origin.source_code_line,
converted=True)
result_frames.append(origin_frame_info)
break
fi = FrameInfo(
filename=filename,
lineno=line_number,
function_name=function_name,
code=text,
converted=False)
result_frames.append(fi)
return tuple(result_frames)
KNOWN_STRING_CONSTRUCTOR_ERRORS = (
AssertionError,
AttributeError,
NameError,
NotImplementedError,
RuntimeError,
StopIteration,
TypeError,
ValueError,
)
# KeyError escapes newlines in strings. We create a special subclass
# that doesn't do that. Overriding the name for display purposes; hopefully
# that won't create too many surprises.
class MultilineMessageKeyError(KeyError):
def __init__(self, message, original_key):
super(MultilineMessageKeyError, self).__init__(original_key)
self.__message = message
def __str__(self):
return self.__message
MultilineMessageKeyError.__name__ = KeyError.__name__
class ErrorMetadataBase(object):
"""Container objects attached to exceptions in converted code.
This metadata allows re-raising exceptions that occur in generated code, with
a custom error message that includes a stack trace relative to user-readable
code from which the generated code originated.
"""
def __init__(self, callsite_tb, cause_metadata, cause_message, source_map):
translated_stack = _stack_trace_inside_mapped_code(callsite_tb, source_map)
if cause_metadata is None:
self.translated_stack = translated_stack
self.cause_message = cause_message
else:
# Daisy chain the translated stacks.
self.translated_stack = (
cause_metadata.translated_stack + (translated_stack[-1],))
self.cause_message = cause_metadata.cause_message
def get_message(self):
"""Returns the message for the underlying exception."""
lines = []
lines.append('in converted code:')
lines.append('')
for frame_info in reversed(self.translated_stack):
lines.append(' {}:{} {}{}'.format(
frame_info.filename,
frame_info.lineno,
frame_info.function_name,
' *' if frame_info.converted else '',
))
if frame_info.code is None:
code_snippet = '<source unavailable>'
else:
code_snippet = frame_info.code.strip()
lines.append(' {}'.format(code_snippet))
lines.append('')
message_lines = self.cause_message.split('\n')
for i in range(len(message_lines)):
message_lines[i] = ' ' + message_lines[i]
lines.extend(message_lines)
lines.append('')
return '\n'.join(lines)
def create_exception(self, source_error):
preferred_type = type(source_error)
if preferred_type.__init__ is Exception.__init__:
return preferred_type(self.get_message())
if preferred_type in KNOWN_STRING_CONSTRUCTOR_ERRORS:
return preferred_type(self.get_message())
elif preferred_type is KeyError:
return MultilineMessageKeyError(self.get_message(), self.cause_message)
return None
def to_exception(self, source_error):
exc = self.create_exception(source_error)
exc.__suppress_context__ = True
exc.ag_error_metadata = self
return exc
|
Love link ups? See my complete list of Fashion Blogger Link Ups here. Check out more from the Sydney Fashion Hunter archives.
Disclaimer: This post contains affiliate links, which may result in a commission.
Allison is a globe trotting Aussie who has champagne taste and a beer budget. Her travel style is luxury for less and she firmly believes there is a time to save and a time to splurge. Oh and she really loves wine!
|
"""
sentry.pool.redis
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import random
from nydus.db import create_cluster
class RedisCappedPool(object):
"""
Implements a capped queue based on Reservoir Sammpling
"""
key_expire = 60 * 60 # 1 hour
def __init__(self, keyspace, size=1000, hosts=None, router='nydus.db.routers.keyvalue.PartitionRouter', **options):
if hosts is None:
hosts = {
0: {} # localhost / default
}
self.conn = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': router,
'hosts': hosts,
})
# We could set this to the maximum value of random.random() (1.0) if we new this pool class
# could stay instantiated. Unfortuantely we'll need an offset per project, which could grow
# indefinitely and would require us to have an LRU.
self.offset = None
def put(self, *items):
"""
Efficiently samples ``items`` onto the pool's keyspace.
"""
if self.offset is None:
self.offset = self.conn.zrange(self.keyspace, self.size, self.size, withscores=True)
for item in items:
val = random.random()
if val < self.offset:
with self.conn.map() as conn:
conn.zadd(self.keyspace, val)
conn.zremrangebyrank(self.keyspace, self.size)
result = self.conn.zrange(self.keyspace, self.size, self.size, withscores=True)
self.offset = result[-1][-1]
def get(self):
"""
Pops a random item off the sample set.
"""
val = random.random()
with self.conn.map() as conn:
# we have to fetch both values as we dont know which one is actually set
item_a = conn.zrange(self.keyspace, val, 1, withscores=True)
item_b = conn.zrevrange(self.keyspace, val, 1, withscores=True)
# pick either item, doesnt matter
item, score = (item_a or item_b)
# remove matching scored item (even if its not the same item)
self.conn.zremrangebyscore(self.keyspace, val, 1)
def values(self):
"""
Returns all samples and clears the pool.
"""
with self.conn.map() as conn:
results = conn.zrange(self.keyspace, 0, self.size)
conn.delete(self.keyspace)
return results
|
PROVO CRAFT- Essential Vinyl Tool Set. You love intricate vinyl designs, and now weeding is part of the fun with the Essential Vinyl Tool Set! This specialty set includes 7 tools designed to work with speed, ease, and accuracy. You get Fine Tweezers with long, fine tips to hold and lift slender, delicate vinyl strips. Remove larger pieces with the Hook Tweezers. You also get the classic Weeder, with a versatile angled tip. Pierce and lift or place cuts with the Piercing Tool. The hooked end of the Hook Weeder provides the ideal angle for weeding trickier materials, such as glitter vinyl. Weeding has never been this much fun! Now you have a tool for every task, any vinyl, and your most detailed designs.
-Use a variety of tip styles and angles for the most efficiency and comfort with all vinyl types. Need to smooth out your vinyl, use the small or Extra Large scraper included in this set.
|
# Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Expression codes, side effects, or statements that are an unused expression.
When you write "f()", i.e. you don't use the return value, that is an expression
only statement.
"""
from .ErrorCodes import getReleaseCode
from .Helpers import generateExpressionCode
def generateExpressionOnlyCode(statement, emit, context):
return getStatementOnlyCode(
value = statement.getExpression(),
emit = emit,
context = context
)
def getStatementOnlyCode(value, emit, context):
tmp_name = context.allocateTempName(
base_name = "unused",
type_code = "NUITKA_MAY_BE_UNUSED PyObject *",
unique = True
)
# An error of the expression is dealt inside of this, not necessary here.
generateExpressionCode(
expression = value,
to_name = tmp_name,
emit = emit,
context = context
)
getReleaseCode(
release_name = tmp_name,
emit = emit,
context = context
)
def generateSideEffectsCode(to_name, expression, emit, context):
for side_effect in expression.getSideEffects():
getStatementOnlyCode(
value = side_effect,
emit = emit,
context = context
)
generateExpressionCode(
to_name = to_name,
expression = expression.getExpression(),
emit = emit,
context = context
)
|
Paul is spending November with English Pocket Opera Company, taking their “History of Western Classical Music (Part 1)” around Barnet schools with Pamela Hay and Mark Tinkler.
Paul will be the tenor soloist in a performance of Mozart’s Coronation Mass (KV 317) in Gauting near Munich on Sunday 22 October 2018.
Paul will be playing Vanderdendur and various villains in the joint Opera della luna / Iford Festival production of “Candide” at Iford on May 26, 29, 30 & June 1, 2 & 5.
near Munich on Sunday 1 April.
English Pocket Opera Company are currently taking their joyous interactive workshop, “Opera Quest”, around Cambridgeshire schools (Nov 20 – Dec 8).
The Hero’s Journey is explored via a TV singing talent show and Gluck’s “Orpheus & Eurydice”. This is the first stage of a project that will culminate in concerts next year in Cambridge & Huntingdon.
Paul is performing with sopranos Laura Pooley and Caroline Kennedy with EPOC director Mark Tinkler at the piano.
Paul & Caroline Kennedy will be taking audiences on an “Opera Quest” at the Celebrate Voice Festival in Salisbury. With Mark Tinkler at the piano, it will be a rare public performance of our terrifically popular school show.
Opera della luna’s “Tales of Offenbach” is on tour again.
I will be reprising my one-armed, one-legged, one-eyed Rattlebone in “Croquefer” but switching roles in “The Island of Tulipatan” – this time I’ll be giving my Field Marshal Octavius Rhomboid.
|
#
# $Id: plugin_CSV.py,v 1.1 2002/03/26 12:56:06 rob Exp $
#
# Copyright 2001 Rob Tillotson <rob@pyrite.org>
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee or royalty is
# hereby granted, provided that the above copyright notice appear in
# all copies and that both the copyright notice and this permission
# notice appear in supporting documentation or portions thereof,
# including modifications, that you you make.
#
# THE AUTHOR ROB TILLOTSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE!
#
"""
"""
__version__ = '$Id: plugin_CSV.py,v 1.1 2002/03/26 12:56:06 rob Exp $'
__author__ = 'Rob Tillotson <rob@pyrite.org>'
__copyright__ = 'Copyright 2001 Rob Tillotson <rob@pyrite.org>'
from dtkplugins import ParserPlugin, LOG_ERROR, LOG_NORMAL, LOG_DEBUG, LOG_WARNING
from dbprotocol import *
import string
class CSVError(Exception): pass
class CSVParser:
def __init__(self):
self.fields = []
self.buf = ''
self.comment_marker = '#'
self.field_separator = ','
self.escape_double_quote = 1
self.skip_blank_lines = 1
self._state = 'start-field'
self._accum = ''
def flush(self):
self._state = 'start-field'
self._accum = ''
self.buf = ''
self.fields = []
def __parse(self):
x = 0
done = 0
while x < len(self.buf):
c = self.buf[x]
# start-field state: looking for beginning of field
# skip whitespace, separator means field was empty
if self._state == 'start-field':
if c == ' ' or c == '\t':
x = x + 1
continue
elif c == '\n':
done = 1
x = x + 1
break
elif c == '"':
self._state = 'quoted-string'
elif c == self.field_separator:
self.fields.append('')
else:
self._accum = self._accum + c
self._state = 'in-field'
elif self._state == 'in-field':
if c == self.field_separator:
self.fields.append(self._accum.strip())
self._accum = ''
self._state = 'start-field'
elif c == '\n':
self.fields.append(self._accum.strip())
self._accum = ''
self._state = 'start-field'
done = 1
x = x + 1
break
elif c == '"' and self.escape_double_quote and x < len(self.buf)-1 \
and self.buf[x+1] == '"':
x = x + 1 # eat second quote
self._accum = self._accum + '"'
else:
self._accum = self._accum + c
elif self._state == 'quoted-string':
if c == '"':
if self.escape_double_quote and x < len(self.buf)-1 and self.buf[x+1] == '"':
x = x + 1
self._accum = self._accum + '"'
else:
self.fields.append(self._accum)
self._accum = ''
self._state = 'after-quoted-string'
else:
self._accum = self._accum + c
elif self._state == 'after-quoted-string':
if c == '\n':
done = 1
x = x + 1
self._state = 'start-field'
break
elif c == ' ' or c == '\t':
x = x + 1
continue
elif c == self.field_separator:
self._state = 'start-field'
else:
self.flush()
raise CSVError, "text after quote"
x = x + 1
self.buf = self.buf[x:]
if done:
f = self.fields
self.fields = []
return f
def feed(self, text=''):
self.buf = self.buf + text
f = self.__parse()
while f is not None:
if f or not self.skip_blank_lines: self.handle_line(f)
f = self.__parse()
def eof(self):
self.feed('\n')
def handle_line(self, fields):
print fields
class PPCSVParser(CSVParser):
def __init__(self, next):
CSVParser.__init__(self)
self.next = next
self.field_specs = 0
def handle_line(self, fields):
if self.use_field_names and not self.field_specs:
self.field_specs = [FieldSpec(x, FLD_STRING) for x in fields]
self.next.define_fields(self.field_specs)
else:
if not self.field_specs:
self.field_specs = [FieldSpec("Field%d" % x, FLD_STRING) \
for x in range(len(fields))]
self.next.define_fields(self.field_specs)
self.next.feed_record(fields)
def eof(self):
CSVParser.eof(self)
class Plugin(ParserPlugin, CSVParser):
name = 'CSV'
description = 'Comma-separated-values parser.'
links = [ (0, 'text/comma-separated-values', 'columnar-database'),
(-100, 'text/plain', 'columnar-database'),
(-100, 'application/octet-stream', 'columnar-database'),
]
def __init__(self, *a, **kw):
ParserPlugin.__init__(self, *a, **kw)
self._add_property('use_field_names', 'Get field names from first line', boolean=1)
self._add_cli_option('use_field_names', None, 'use-field-names',
'Get field names from first line',
boolean=1)
self.use_field_names = 0
def open(self, chain, next, *a, **kw):
ParserPlugin.open(self, chain, next, *a, **kw)
self.parser = PPCSVParser(next)
self.copyProperties(self.parser)
self.ttbl = string.maketrans('','')
return self
def feed(self, data):
l = string.translate(data, self.ttbl, '\r')
self.parser.feed(l)
def eof(self):
self.parser.eof()
|
Reports confirm that immuno-oncology competitors, Roche and Merck & Co. will team up for develop a pan-cancer companion diagnostic test for Merck’s Keytruda therapy. The test would be used to detect the mismatch repair deficiency (dMMR) biomarker in solid tumors irrespective of where they are located.
Keytruda, for the record, was the first treatment for cancer that was awarded a site-agnostic approval by the FDA in May 2017, designed for patients with mismatch repair deficient solid tumors or microsatellite instability-high who have no alternate options for treatment. Roche made a statement saying it wants to get an approval from FDA for the immunohistochemistry assay for providing a standardized predictive testing alternative for dMMR.
Sources with knowledge of the matter said that for identifying patients which probably have a genetic disposition for colorectal or other cancers, termed as Lynch syndrome, clinical testing for dMMR biomarker status is utilized. Apparently, 4 related proteins are screened along with a BRAF V600E genetic mutation.
Separately, Roche has unveiled its own immunotherapy, with an FDA approval only a week ago of a regimen that combines Tecentriq, its anti-PD-L1 drug, with Avastin and chemotherapy. The treatment is meat for first-line non-small cell lung cancer (NSCLC) without ALK or EGFR mutations.
Reports suggest that the two-drug chemotherapy combination of Keytruda has a significant head start in the highly lucrative market. Analysts have predicted that Merck would be holding the No.1 position in the market and Roche will follow at No.2, with the sales of Tecentriq peaking just above US$1.5 billion.
FDA, in August, had supposedly expanded Keytruda’s label to include data that showed it could cut down the risk of death by half in untreated NSCLC, when used in combination with platinum chemotherapy and Eli Lilly’s Alimta, as compared to chemotherapy alone.
|
#!/usr/bin/env python
"""
rep.py (c) 2010-2011 eBay - written by Jasper Poppe <jpoppe@ebay.com>
"""
import optparse
import os
import pwd
import subprocess
def get_repos(path):
"""create a dictionary with all the repositories"""
repos = os.listdir(path)
repos = [repo for repo in repos if os.path.isfile(os.path.join(path, repo, 'conf/distributions'))]
confs = [os.path.join(path, directory, 'conf/distributions') for directory in repos]
repositories = {}
for conf in confs:
updates_file = os.path.join(conf.rsplit('/', 1)[0], 'updates')
repo = conf.replace(path, '')[1:].split('/', 1)[0]
if os.path.isfile(updates_file):
type = 'mirror'
else:
type = 'custom'
repositories[repo] = {'type': type, 'path': os.path.join(path, repo)}
return repositories
def get_codenames(repositories):
"""add the codename for each repository to the repositories dictionary"""
for repo in repositories:
file = os.path.join(repositories[repo]['path'], 'conf/distributions')
data = open(file, 'r').read()
for line in data.split('\n'):
line = line.split(': ')
if line[0] == 'Codename':
if not repositories[repo].has_key('codenames'):
repositories[repo]['codenames'] = [line[1]]
else:
repositories[repo]['codenames'].append(line[1])
return repositories
def add(repositories, add, packages, user='repo'):
"""add a package to a reprepro repository"""
repo, codename = add
repo_dir = repositories[repo]['path']
for package in packages:
if os.path.isfile(package):
print ('info: adding "%s" package "%s" to "%s"' % (codename, package, repo))
#subprocess.call(['sudo', '-u', user, '/usr/bin/reprepro', '-V', '-b', repo_dir, 'includedeb', codename, package])
subprocess.call(['/usr/bin/reprepro', '-V', '-b', repo_dir, 'includedeb', codename, package])
else:
print ('error: package "%s" not found' % package)
def delete(repositories, delete, packages, user='repo'):
"""delete a package from a reprepro repository"""
repo, codename = delete
repo_dir = repositories[repo]['path']
for package in packages:
print ('info: removing package "%s" from "%s" (%s)' % (package, repo, codename))
#subprocess.call(['sudo', '-u', user, '/usr/bin/reprepro', '-V', '-b', repo_dir, 'remove', codename, package])
subprocess.call(['/usr/bin/reprepro', '-V', '-b', repo_dir, 'remove', codename, package])
def contents(name, repo_dir, codename):
"""list the packages in the specified repository"""
print ('info: listing contents for codename "%s" in repository "%s"' % (codename, name))
subprocess.call(['/usr/bin/reprepro', '-V', '-b', repo_dir, 'list', codename])
def update(repo, path, user='repo'):
"""sync a mirror"""
print ('info: fetching updates for repository "%s"' % repo)
#subprocess.call(['sudo', '-u', user, '/usr/bin/reprepro', '-V', '-b', path, '--noskipold', 'update'])
subprocess.call(['/usr/bin/reprepro', '-V', '-b', path, '--noskipold', 'update'])
print ('')
def list_repos(repositories, repo_type):
"""list all available repositories"""
for key, values in repositories.items():
if values['type'] == repo_type:
print ('%s (%s)' % (key, ', '.join(values['codenames'])))
def main():
"""main application, this function won't called when used as a module"""
parser = optparse.OptionParser(prog='rep.py', version='0.1')
parser.set_description('rep.py - reprepro manager (c) 2010-2011 eBay - Jasper Poppe <jpoppe@ebay.com>')
parser.set_usage('%prog -l | [-a|-d <repository> <codename> <package>... | [-c <repository> <codename>] | [-u <repository>]')
parser.add_option('-l', dest='list_all', help='list available repositories', action='store_true')
parser.add_option('-a', dest='add', help='add package(s) to a custom repository', nargs=2)
parser.add_option('-d', dest='delete', help='remove package(s) from a custom repository', nargs=2)
parser.add_option('-c', dest='contents', help='list the contents of a repository', nargs=2)
parser.add_option('-u', dest='update', help='update mirror', action='append')
parser.add_option('-U', dest='user', help='override repository owner (default: repo) (DO NOT USE IN A PRODUCTION ENVIRONMENT)', default='repo')
parser.add_option('-p', dest='repo_path', help='repository path (default: /opt/repositories/debian)', default='/opt/repositories/debian')
(opts, args) = parser.parse_args()
if opts.add or opts.delete or opts.contents or opts.update:
#if not os.geteuid() == 0:
if not pwd.getpwuid(os.getuid())[0] == opts.user:
parser.error('only the "%s" user can modify repositories' % opts.user)
#parser.error('only a user with root permissions can modify repositories')
if opts:
#if opts.user == 'root':
# os.environ['HOME'] = '/root'
#else:
# os.environ['HOME'] = os.path.join('/home', opts.user)
repositories = get_repos(opts.repo_path)
repositories = get_codenames(repositories)
if opts.list_all:
print ('Custom repositories (you can add debian packages here):')
list_repos(repositories, 'custom')
print ('')
print ('Mirrors:')
list_repos(repositories, 'mirror')
elif opts.add:
if not args:
parser.error('need to specify at least one package')
if repositories.has_key(opts.add[0]):
if repositories[opts.add[0]]['type'] == 'custom':
#add(repositories, opts.add, args, opts.user)
add(repositories, opts.add, args)
else:
parser.error('"%s" is not a valid and or a custom repository (hint: try -l)' % opts.add)
else:
parser.error('repository "%s" not found (hint: try -l)' % opts.add[0])
elif opts.delete:
if not args:
parser.error('need to specify at least one package')
if repositories.has_key(opts.delete[0]):
if repositories[opts.delete[0]]['type'] == 'custom':
#delete(repositories, opts.delete, args, opts.user)
delete(repositories, opts.delete, args)
else:
parser.error('"%s" is not a valid and or a custom repository (hint: try -l)' % opts.delete)
elif opts.update:
if len(opts.update) == 0 and opts.update[0] == 'ALL':
for key, value in repositories.items():
if value['type'] == 'mirror':
update(key, value['path'])
#update(key, value['path'], opts.user)
else:
for repo in opts.update:
if repositories.has_key(repo):
if repositories[repo]['type'] == 'mirror':
#update(repo, repositories[repo]['path'], opts.user)
update(repo, repositories[repo]['path'])
else:
parser.error('"%s" is not a mirror, refusing to update (hint: try -l)' % repo)
else:
parser.error('"%s" is not a valid repository (hint: try -l)' % repo)
elif opts.contents:
if args:
parser.error('the contents option takes no arguments')
else:
try:
contents(opts.contents[0], repositories[opts.contents[0]]['path'], opts.contents[1])
except KeyError:
parser.error('%s is not a valid repository, type -l to list all available repositories' % opts.contents[0])
else:
parser.print_help()
if __name__ == '__main__':
main()
|
Product prices and availability are accurate as of 2019-04-20 22:05:37 EDT and are subject to change. Any price and availability information displayed on http://www.amazon.com/ at the time of purchase will apply to the purchase of this product.
Inspired by classic country gardens, English Laundry’s charming blend perfectly melds top notes of rose geranium with a backdrop of woody spicy and musky notes. Bright, enticing and elegant. Signature opens with quince, complimented by white chocolate and jasmine petals at the heart. For a purely peaceful scent, notes of orris roots, powdery musk and sensual woods have been composed at the base.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
import spectral_embedding
import sys
sys.path.insert(0, '../cluster')
import kmeans
def test():
k = 2
X, y_true = make_moons(n_samples=500, random_state=0, noise=0.01)
Y = spectral_embedding.transform(X, k, n_neighbors=7, sigma=0.1)
n = np.linalg.norm(Y, axis=1)
n = n.reshape(-1, 1)
Y = Y / n
# Apply K-Means to cluster Y
y_pred, _, _ = kmeans.kmeans(Y, k)
fig = plt.figure()
ax = fig.add_subplot(121)
ax.scatter(np.arange(len(Y)), Y[:, 0])
ax.set_title("Eigenvector 1")
ax = fig.add_subplot(122)
ax.scatter(np.arange(len(Y)), Y[:, 1])
ax.set_title("Eigenvector 2")
# Plot the data
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X[y_true==0, 0], X[y_true==0, 1], c='b', alpha=0.5, label="Class 1")
ax.scatter(X[y_true==1, 0], X[y_true==1, 1], c='g', alpha=0.5, label="Class 2")
ax.set_title("Original data")
ax.legend()
# Plot the predictions
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X[y_pred==0, 0], X[y_pred==0, 1], c='r', alpha=0.5, label="Class 1")
ax.scatter(X[y_pred==1, 0], X[y_pred==1, 1], c='y', alpha=0.5, label="Class 2")
ax.set_title("Result of clustering")
ax.legend()
# Plot the transformed data
fig = plt.figure()
ax = fig.add_subplot(111)
idx_class0 = np.argwhere(y_true==0)
idx_class1 = np.argwhere(y_true==1)
ax.scatter(Y[idx_class0, 0], Y[idx_class0, 1], c='b', alpha=0.5, label="Class 1")
ax.scatter(Y[idx_class1, 0], Y[idx_class1, 1], c='g', alpha=0.5, label="Class 2")
ax.set_title("Original data after spectral embedding")
ax.legend()
print("Number in class 0: {}".format(np.sum(y_pred==0)))
print("Number in class 1: {}".format(np.sum(y_pred==1)))
plt.show()
if __name__ == '__main__':
test()
|
Change is the true nature of every place we inhabit, everything we are. I live on what was an island that became, in time, a peninsula only to – one day in the not too distant future, with the changing climate and rising seas – most likely become an island again. Indigenous peoples travelled down this coast – when it had a different coastline, a different sea level yet again – thousands of years ago.
|
'''Version 0.33'''
import json
import csv
import glob
import sys
import importlib
from pprint import pprint
from collections import Counter
# init is an optional flag to indicate you're starting
# over; old autograder results are written over and column
# headers are printed to the file.
team = "0"
init = False
for arg in sys.argv:
if arg == "init":
init = True
else:
team = arg
api = importlib.import_module("Team%s.recipe_api" % team)
def check_tools(answer, stud):
score = 0
expans = dict([[a, a.split()] for a in answer])
for s in stud:
if s in answer:
print s
score += 1
answer.remove(s)
stud.remove(s)
expans = dict([[a, {'words': a.split(), 'matches': Counter()}] for a in answer])
expstud = dict([[a, a.split()] for a in stud])
for s in expstud:
tmpscore = -1
for word in expans:
complement = set(expstud[s]) ^ set(expans[word]['words'])
intersection = set(expstud[s]) & set(expans[word]['words'])
newscore = float(len(intersection))/(len(intersection)+len(complement))
print "%s, %s, %d, %d, %f" % (s, word, len(intersection), len(complement), newscore)
if newscore > tmpscore:
tmpscore = newscore
tmpmatch = word
if tmpscore > 0:
expans[tmpmatch]['matches'][s] = tmpscore
stud.remove(s)
for word in expans:
match = expans[word]['matches'].most_common(1)
if len(match) > 0:
score += expans[word]['matches'].most_common(1)[0][1]
return score
def check_ingredients(answer, stud):
scores = []
score = 0
for x in range(min([len(answer), len(stud)])):
for ind in ['name', 'measurement', 'quantity', 'descriptor', 'preparation', 'prep-description']:
if ind in stud[x]:
print "\nYour answer: %s"%str(stud[x][ind])
print "Valid answers: %s"%str(answer[x][ind])
if ind == 'quantity':
flag = False
for val in answer[x][ind]:
if type(stud[x][ind]) is str:
if val == stud[x][ind]:
flag = True
elif val == stud[x][ind]:
flag = True
elif float('%.2f'%stud[x][ind]) == val:
flag = True
if flag:
score += 1
else:
print "No match!"
elif stud[x][ind] in answer[x][ind]:
score += 1
scores.append(min([score, answer[x]['max']]))
print "Score: %s\n---"%str(scores[-1])
score = 0
return sum(scores)
def get_file(fn):
with open(fn, 'r') as f:
answer = json.load(f)
return answer
def main(team, init=False):
"""Pass 'init' as a command line variable if this is your
first time running the program and you want it to print the
column headers to the file."""
keys = ['ingredients', 'primary cooking method', 'cooking methods', 'implied cooking methods', 'cooking tools', 'implied cooking tools']
if init:
with open('parsegrades.csv', 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter='\t')
csvwriter.writerow(keys)
scores = Counter(dict(zip(keys, [0]*len(keys))))
cnt = 1
for answer in (get_file(fn) for fn in glob.iglob('Recipes/*.json')):
stud = getattr(api, "autograder")(answer['url'])
temp = Counter(dict(zip(keys, [0]*len(keys))))
if type(stud) == str:
stud = json.loads(stud)
if type(stud) == dict:
temp['cooking tools'] = min([check_tools(answer['cooking tools'], stud['cooking tools']), answer['max']['cooking tools']])/float(answer['max']['cooking tools'])
temp['cooking methods'] = min([check_tools(answer['cooking methods'], stud['cooking methods']), answer['max']['cooking methods']])/float(answer['max']['cooking methods'])
temp['implied cooking tools'] = min([check_tools(answer['implied cooking tools'], stud['cooking tools']), answer['max']['implied cooking tools']])/float(answer['max']['implied cooking tools'])
temp['implied cooking methods'] = min([check_tools(answer['implied cooking methods'], stud['cooking methods']), answer['max']['implied cooking methods']])/float(answer['max']['implied cooking methods'])
if stud['primary cooking method'] == answer['primary cooking method']:
temp['primary cooking method'] = 1
stud = stud['ingredients']
temp['ingredients'] = check_ingredients(answer['ingredients'], stud)/float(answer['max']['ingredients'])
scores += temp
print "%s\t%s\t%s\t%s\t%s\t%s\t%s" % ("Recipe", 'Ingredients', 'Primary Method', 'Methods', 'Implied Methods', 'Tools', 'Implied Tools')
print "Recipe %d:\t%.3f\t%d\t%.3f\t%.3f\t%.3f\t%.3f" % (cnt, temp['ingredients'], temp['primary cooking method'], temp['cooking methods'], temp['implied cooking methods'], temp['cooking tools'], temp['implied cooking tools'])
cnt += 1
else:
print "student answer formatting error"
row = ["Team %s" % team]
row.extend([scores[k] for k in keys])
with open('parsegrades.csv', 'ab') as csvfile:
csvwriter = csv.writer(csvfile, delimiter='\t')
csvwriter.writerow(row)
if __name__ == '__main__':
main(team, init)
|
Exclusively created to feel just like natural lubrication, this high end water based formula from Shunga’s Toko line is the prefect way to add even more ‘slip’ and moisture to playtime. Lube brings you and the object of your desire even closer together, while cutting down on friction and increasing pleasurable sensations. The clear, odorless water based formula is ultra silky and long lasting, completely safe for latex condoms and gear as well as all types of toy materials, and it won’t stain your clothing or sheets.
"All you need is love. But a little chocolate now and then doesn't hurt."
"Love is the voice under all silences, the hope which has no opposite in fear; the strength so strong mere force is feebleness: the truth more first than sun, more last than star."
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_layout05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with user defined layout."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'area'})
chart.axis_ids = [43495808, 43497728]
data = [
[1, 2, 3, 4, 5],
[8, 7, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.set_x_axis({
'name': 'XXX',
'name_layout': {
'x': 0.34620319335083105,
'y': 0.85090259550889469,
}
})
chart.set_y_axis({
'name': 'YYY',
'name_layout': {
'x': 0.21388888888888888,
'y': 0.26349919801691457,
}
})
worksheet.insert_chart('E9', chart)
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
|
With a diversity of manufacturing options and extensive product development experience, Holland is well-positioned to produce custom, co-extrusion coated and laminated products. Many of the custom products that Holland develops are laminations and composites utilizing materials such as, papers, films, non-woven and woven fabrics, and foil.
Learn more about Holland’s complete set of Coating Capabilities.
Our wide width manufacturing capability combined with our unique product mix allows us design, develop and deliver custom coated and laminated products in a cost-effective manner.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# import re
import urlparse
import urllib
import time
# import redis
from pyquery import PyQuery as PQ
from scrapy.spiders import Spider
# from scrapy.selector import Selector
from scrapy.http import Request
from bookspider.items import QidianRankItem
# RC = redis.Redis()
class QidianRankSpider(Spider):
name = "qidianrank"
allowed_domains = ["top.qidian.com"]
start_urls = [
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=3&PageIndex=1",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=3&PageIndex=10",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=3&PageIndex=20",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=3&PageIndex=30",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=3&PageIndex=40",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=2&PageIndex=1",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=2&PageIndex=10",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=2&PageIndex=20",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=2&PageIndex=30",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=2&PageIndex=40",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=1&PageIndex=1",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=1&PageIndex=10",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=1&PageIndex=20",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=1&PageIndex=30",
"http://top.qidian.com/Book/TopDetail.aspx?TopType=1&Time=1&PageIndex=40",
]
# def is_pass_url(self, url):
# for i in PASS_URL:
# if i in url:
# return True
# return False
def parse(self, response):
url = response.url
# sel = Selector(response)
jQ = PQ(response.body_as_unicode())
for i in jQ("#list1 tr"):
elem = jQ(i)
title = elem.find("td").eq(2).find("a").eq(0).text()
if title:
try:
click = int(elem.find("td").eq(3).text())
except:
continue
else:
item = QidianRankItem()
item["time_type"] = QidianRankItem.get_time_type(url)
item["title"] = title
item["vip_click"] = click
yield item
url_obj = urlparse.urlparse(url)
page_num = str(
int(urlparse.parse_qs(url_obj.query).get("PageIndex", ['0'])[0]) + 1
)
time_num = urlparse.parse_qs(url_obj.query).get("Time", ['3'])[0]
if page_num == "50":
yield Request(url, callback=self.parse)
else:
new_qs = urllib.urlencode({
"PageIndex": page_num,
"Time": time_num,
"TopType": '1',
})
new_url = urlparse.urlunparse([
url_obj.scheme,
url_obj.netloc,
url_obj.path,
url_obj.params,
new_qs,
url_obj.fragment
])
time.sleep(0.5)
yield Request(new_url, callback=self.parse)
|
Mother Earth Natural Foods is committed to helping residents in the Fort Myers area stay healthy and reduce the risk of high cholesterol, obesity, diabetes and other health-related issues by providing an extensive collection of healthy foods and wellness products. Whether you are hoping to drastically change your current lifestyle or simply want to start making healthier decisions, Mother Earth is here to help you along your wellness journey!
We understand the difficulties associated with making the decision to change your eating habits and stay healthy, which is why we are here to help you every step of the way. Our knowledgeable and friendly associates will assist you in choosing the right products that will help you achieve optimal health benefits. We will consider your lifestyle and budget when suggesting potential foods, vitamins and remedies for your health goals. Reap the Benefits with Our Exceptional Health Products Stop by today to learn more!
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 15:25:46 2015
@author: nadiablago
"""
import numpy as np
def johnson2sdss(U, B, V, R, I):
'''global transformations between UBVRI and ugriz'''
'''
#Color Color Term Zeropoint Range
"gV": (0.630 ± 0.002) (B − V) −(0.124 ± 0.002)
"ri": (1.007 ± 0.005) (R − I) −(0.236 ± 0.003)
"rz": (1.584 ± 0.008) (R − I) −(0.386 ± 0.005)
"rR": (0.267 ± 0.005) (V − R) +(0.088 ± 0.003) V − R ≤ 0.93
"rR": (0.77 ± 0.04) (V − R) −(0.37 ± 0.04) V − R > 0.93
"ug": (0.750 ± 0.050) (U − B) + (0.770 ± 0.070) (B − V) +(0.720 ± 0.040)
"gB": −(0.370 ± 0.002) (B − V) −(0.124 ± 0.002)
"gr": (1.646 ± 0.008) (V − R) −(0.139 ± 0.004)
"iI": [0.247, 0.329]'''
def sdss2johnson(ref_sdss, savefile=None):
'''
Jordi et. al 2006
ugriz -> UBVRcIc
================
Transformation
U-B = (0.79 ± 0.02)*(u-g) - (0.93 ± 0.02)
U-B = (0.52 ± 0.06)*(u-g) + (0.53 ± 0.09)*(g-r) - (0.82 ± 0.04)
B-g = (0.175 ± 0.002)*(u-g) + (0.150 ± 0.003)
B-g = (0.313 ± 0.003)*(g-r) + (0.219 ± 0.002)
V-g = (-0.565 ± 0.001)*(g-r) - (0.016 ± 0.001)
V-I = (0.675 ± 0.002)*(g-i) + (0.364 ± 0.002) if g-i <= 2.1
V-I = (1.11 ± 0.02)*(g-i) - (0.52 ± 0.05) if g-i > 2.1
R-r = (-0.153 ± 0.003)*(r-i) - (0.117 ± 0.003)
R-I = (0.930 ± 0.005)*(r-i) + (0.259 ± 0.002)
I-i = (-0.386 ± 0.004)*(i-z) - (0.397 ± 0.001)
'''
ref_sdss = np.genfromtxt(ref_sdss, dtype=None, names=True, delimiter=',')
bands = "BVRI"
john = np.zeros(len(ref_sdss), dtype=[('id', '<i8'), ('ra', '<f8'), ('dec', '<f8'), \
('U', '<f4'), ('B', '<f4'), ('V', '<f4'), ('R', '<f4'), ('I', '<f4'),\
('dU', '<f4'), ('dB', '<f4'), ('dV', '<f4'), ('dR', '<f4'), ('dI', '<f4')])
band_dic = {"B":"g", "V":"g", "R":"r", "I":"i"}
coldic = {"U":"ug", "B":"gr", "V":"gr", "R":"ri", "I":"iz"}
coefs = {"U": [np.array([0.79, 0.93]), np.array([0.02, 0.02])],
"B": [np.array([0.313, 0.219]), np.array([0.003, 0.002])],
"V": [np.array([-0.565, 0.016]), np.array([0.001, 0.001])],
"R": [np.array([-0.153, 0.117]), np.array([0.003, 0.003])],
"I": [np.array([-0.386, 0.397]), np.array([0.004, 0.001])] }
for b in bands:
col = ref_sdss[coldic[b][0]] - ref_sdss[coldic[b][1]]
john[b] = np.sum(np.array([col, 1]) * coefs[b][0]) + ref_sdss[band_dic[b]]
john["d"+b] = np.sum(np.array([col, 1]) * coefs[b][1])
#U band a bit different
b = "U"
col = ref_sdss[coldic[b][0]] - ref_sdss[coldic[b][1]]
john[b] = np.sum(np.array([col, 1]) * coefs[b][0]) + john["B"]
john["d"+b] = np.sum( np.array([col, 1]) * coefs[b][1] )
john["ra"] = ref_sdss["ra"]
john["dec"] = ref_sdss["dec"]
john["id"] = ref_sdss["objid"]
if (not savefile is None):
np.savetxt(savefile, john, header="id,ra,dec,U,B,V,R,I,dU,dB,dV,dR,dI", fmt="%d,%.5f,%.5f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f")
return john
|
The membership of the NMA is composed of individuals and institutions who are actively engaged in museum work on a professional or volunteer basis at non-profit institutions and educational agencies or who are interested in the fundamental functioning of museums.
Membership is on a calendar year basis, January 1 to December 31. Payment is due by February 15.
|
# -*- coding: utf-8 -*-
# -*- mode: python -*-
from exceptions import *
class InvalidLogin(PermanentError, AnswerException): pass
class GuestDeny(PermanentError, AnswerException): pass
class UserDeny(PermanentError, AnswerException): pass
class Bumplimit(PermanentError, AnswerException): pass
class PermClosed(PermanentError, AnswerException): pass
class TopicDoesNotExist(PermanentError, AnswerException): pass
class BadGateway(TemporaryError, AnswerException): pass
class EmptyAnswer(TemporaryError, AnswerException): pass
class Antispam(TemporaryError, AnswerException): pass
class Redir(TemporaryError, AnswerException): pass
class Wait5Min(TemporaryError, AnswerException): pass
class Closed(TemporaryError, AnswerException): pass
class UnknownAnswer(TemporaryError, AnswerException): pass
class RegRetryLimit(PermanentError): pass
class DosVersionError(PermanentError, NotImplementedError): pass
class Captcha(AnswerException):
def __init__(self, page, errstr=None, target=None, postdata=None, catry=1):
'''
page: raw page with captcha
catry: see Beon.addtopicfin()
'''
super(Captcha, self).__init__(errstr, target, page, postdata)
self.page, self.catry = page, catry
class Success(AnswerException): pass
|
Variety: 100% Nebbiolo and vineyard: Pora. Vinification: fermentation in steel a 30 °c, 28 days of maceration on the skins with malolactic fermentation.
In 1958 the Produttori del Barbaresco was really founded. The Cantina Sociale dei Produttori del Barbaresco now has 60 members of 50 families and approximately 100 hectares of wine-growing land. The wine company mainly makes Barbaresco and Nebbiolo Langhe wines. These wines are more suitable for young drinking. In top wine years no less than nine different Cru wines are made; Sili, Rabaja, Pora, Montestefano, Ovello, Paja, Montefico, Moccagatta and Rio Sordo.
The nine cru's or vineyards of the Produttori lie on steep slopes consisting of the classic solid clay and marl of the Langhe. The most northerly chain of hills: Ovello, Montefico and Montestefano, have a higher clay content. They are known for their rich earthy, meaty flavors and firm structure. In the south are Pajé and Moccagatta, known for their intensity and elegance. Even further to the south is Barbaresco's largest slope, with the south side Asili, Rabajà and Pora. Among these three icons is the solitary southwest-facing slope of Rio Sordo, whose luxuriant Barbaresco exudes aromas of black cherry and truffle.
First I decanted it with the Vspin active Decanting system.
In the glass a ruby, garnet red color.
On the nose intense and delicate aromas of black cherries, orange peel and vanilla with lovely hints of tobacco, truffle and rose petals.
On the palate medium/full-bodied with medium acidity and fine grained tannins showing flavours of cherries, plums and raspberries with notes of tobacco, balasmico, chocolate and licorice. Great intensity and a long aftertaste. Already very nice but will keep for ten more years for sure.
|
"""System for Modular Analysis and Continuous Queries.
See http://smacq.sourceforge.net/
"""
import libpysmacq
import time, sys
# TODO:
# Change all instances of raise Exception to raise More_Appropriate_Exception
class SmacqQuery: # {{{
"""Executes one or more queries in the SMACQ (System for Modular Analysis and Continous Queries) API."""
graph = None
dts = None
scheduler = None
running = False
def __init__(self, query_str = None, run_now = False):
self.__running = False
self.scheduler = libpysmacq.SmacqScheduler()
self.dts = libpysmacq.DTS()
self.graph = libpysmacq.SmacqGraph()
if query_str:
self.graph.addQuery(self.dts, self.scheduler, query_str)
if run_now:
self.run()
def run(self, ignoreDups = False):
"""Adds this query to the main SmacqGraph and runs it. If the scheduler hasn't already been
started, then it is started."""
if self.__running:
if not ignoreDups:
print "This query is already running."
else:
self.graph.init(self.dts, self.scheduler)
self.scheduler.seed_produce(self.graph)
self.scheduler.start_threads(0)
self.__running = True
return
def is_running(self):
return self.__running
# Fetching Methods {{{
def fetchone(self):
"""Fetch the next result object and return it, or None when no more data is available"""
return self.fetchmany(1)
def fetchmany(self, num_results = 1): # {{{
"""Returns num_results DtsObject objects in a list. This will wait for results if it
needs to. If the number of results returned is less than requested, then the
query has been completed."""
self.run(True)
query_results = []
for i in range(num_results):
result = self.scheduler.get()
if result:
query_results.append( result )
else:
break
return query_results
def fetch_nb(self, num_results = 1): # {{{
"""Performs a non-blocking fetch of num_results data items.
To test if the query is done, check the value of done(). If done is True, then a fetchall
performed afterwards should return the remaining results without blocking."""
self.run(True)
query_results = []
for i in range(num_results):
result = self.scheduler.element()
if result:
query_results.append( result )
else:
break
return query_results
def fetchall(self, result_limit = None, time_limit = 0): # {{{
"""Fetches all results produced by the current query. Note that querying an unbounded
data source will cause the function to never return or run out of memory. Returns a tuple
containing the list of results and the reason it returned.
Two limiting parameters are provided: result_limit and time_limit (in seconds).
If either limit is reached, the query will return it's results immediately. If either limit
is set to None (default) or zero, it has no effect. """
self.run(True)
if result_limit == 0:
result_limit = None
time_done = time.time() + time_limit
num_results = 0
results = []
while (True):
if result_limit is not None:
if num_results >= result_limit:
stop_reason = "max_results"
break
else:
num_results += 1
if time_limit != 0 and time.time >= time_done:
stop_reason = "time"
break
result = self.scheduler.get()
if not result:
stop_reason = "done"
break
results.append( result )
return (results, stop_reason)
def busy_loop(self):
"""Runs the query until it is done, but throws out any results"""
self.run(True)
self.scheduler.busy_loop()
def __done(self):
# While this isn't currently needed, it will be if non-blocking fetches are implemented.
"""Returns True if the query is done processing, False otherwise"""
return self.scheduler.done()
#end Fetching methods }}}
# Iterator methods {{{
def __iter__(self):
"""Return self in compliane with iterator protocol."""
self.run(True)
return self
def next(self):
"""Return the next DtsObject returned by the query. Raise StopIteration when complete."""
x = self.scheduler.get()
if x:
return x
else:
raise StopIteration
# }}}
# Join methods {{{
def append(self, query):
"""Joins this query with the other_query.
other_query can be either a string or a SmacqGraph object"""
if type(query) == str:
newg = libpysmacq.SmacqGraph()
newg.addQuery(self.dts, self.scheduler, query)
self.graph.join(newg)
elif type(query) == libpysmacq.SmacqGraph:
self.graph.join(query)
else:
print type(query)
raise TypeError
def __iadd__(self, query):
"""This is the += operator."""
self += query
return self
def add(self, query):
"""Adds the query on the righthand side to the query on the left.
If the right hand side is a query string, it is used to create a new query object first."""
if type(query) == str:
self.graph.addQuery(self.dts, self.scheduler, query)
else:
self.graph.addQuery(query)
def __rshift__(self, query):
"""This is the >>= operator. Joins the query on the righthand side with the query on the left.
If the right hand side is a query string, it is used to create a new query object first."""
self.append(query)
return self
def __add__(self, query):
"""Adds two queries together, and returns a new query as a result"""
newQuery = self.clone()
newQuery += (query)
return newQuery
# end join methods }}}
def __str__(self): # {{{
return self.graph.print_query()
# }}}
# end SmacqQuery }}}
def DtsObject_getdata(self):
if len(self):
return self.get()._getdata()
else:
return None
def DtsObject_getitem(self, index):
index = str(index) # Covert numeric indices to strings
x = self.get().getfield(index, True)
if not x.get() and self.has_key(index):
return None
if not x.get():
raise KeyError, "DtsObject instance does not contain field " + index
return x
libpysmacq.DtsObject.__len__ = lambda self: self.get().__len__()
libpysmacq.DtsObject.has_key = lambda self, name: (self.get().getfield(str(name), True).get() != None)
libpysmacq.DtsObject.__getattr__ = lambda self, name: self.get().__getattribute__(name)
libpysmacq.DtsObject.__nonzero__ = lambda self: (self.get() != None)
def DtsObject_dict(self):
"""Construct a dictionary of all possible fields"""
d = {}
for f in self.keys():
d[f] = self[f]
return d
def DtsObject_str(self):
"""Return human-readable version of DtsObject by showing all of its fields"""
return str(self.dict())
def DtsObject_repr(self):
"""Return string representation of DtsObject"""
if self.has_key("string"):
s = self["string"]
if s:
# SMACQ strings are NULL-terminated, so ignore final byte
return s.getdata()[:-1]
return repr(self.getdata())
def DtsObject_keys(self, field_refs = False):
"""Returns a list of field names for this object.
If field_refs is True, DtsField objects are returned instead. DtsField objects can be used instead
of field name strings for DtsObject field lookups, and are signifigantly faster. The DtsField
objects will be returned in the same order as the field names."""
self.prime_all_fields()
fields = self.fieldcache()
field_names = []
field_nums = []
for i in range( len(fields) ):
if fields[i].get() is not None:
field_nums.append(i)
if field_refs:
# Make a list of DtsField objects
for i in field_nums:
field_names.append( libpysmacq.DtsField(i) )
else:
# Make a list of field names
field_getname = self.getfieldname
for i in field_nums:
field_names.append( field_getname( i ) )
return field_names
libpysmacq.DtsObject.dict = DtsObject_dict
libpysmacq.DtsObject.keys = DtsObject_keys
libpysmacq.DtsObject.getdata = DtsObject_getdata
libpysmacq.DtsObject.__str__ = DtsObject_str
libpysmacq.DtsObject.__repr__ = DtsObject_repr
libpysmacq.DtsObject.__getitem__ = DtsObject_getitem
del DtsObject_keys, DtsObject_str, DtsObject_dict, DtsObject_getitem, DtsObject_repr, DtsObject_getdata
|
Online payments company PayPal has released its first Super Bowl ad, which is set to air during the first quarter of the game.
In the video, 'old money' like coins, checks and dollars bills are portrayed as relics of the past. In contrast, 'new money' is touted as the more inclusive alternative that allows people to shop from wherever during any time of the day.
During the ad, phrases like ‘New money is not a dirty word’ and ‘Old money closes at 5' appear atop images of cities, robots, and people of all backgrounds.
The 45-second spot was directed by Nabil Elderkin, or NABIL, who has worked on music videos for artists including Nicki Minaj and John Legend.
|
"""
flowfilter.propagation
----------------------
Module containing propagation methods.
:copyright: 2015, Juan David Adarve, ANU. See AUTHORS for more details
:license: 3-clause BSD, see LICENSE for more details
"""
import numpy as np
import scipy.ndimage as nd
__all__ = ['dominantFlowX', 'dominantFlowY',
'propagate', 'propagationStep']
###########################################################
# GLOBAL VARIABLES
###########################################################
"""forward difference operator in X (column)"""
_dxp_k = np.array([[1.0, -1.0, 0.0]], dtype=np.float32)
"""backward difference operator in X (column)"""
_dxm_k = np.array([[0.0, 1.0, -1.0]], dtype=np.float32)
"""central difference in X (column)"""
_dxc_k = np.array([[1.0, 0.0, -1.0]], dtype=np.float32)
"""forward difference operator in Y (row)"""
_dyp_k = np.copy(_dxp_k.T)
"""backward difference operator in Y (row)"""
_dym_k = np.copy(_dxm_k.T)
"""central difference in Y (row)"""
_dyc_k = np.copy(_dxc_k.T)
"""+1 shift operator in X (column)"""
_sxp_k = np.array([[1.0, 0.0, 0.0]], dtype=np.float32)
"""-1 shift operator in X (column)"""
_sxm_k = np.array([[0.0, 0.0, 1.0]], dtype=np.float32)
"""+1 shift operator in Y (row)"""
_syp_k = np.copy(_sxp_k.T)
"""-1 shift operator in Y (row)"""
_sym_k = np.copy(_sxm_k.T)
def dominantFlowX(flow_x):
"""Computes dominant flow in X (column) direction.
Parameters
----------
flow_x : ndarray
Optical X flow component.
Returns
-------
flow_x_dom : ndarray
Dominant flow in X (column) direction
Raises
------
ValueError : if flow_x.ndim != 2
See Also
--------
dominantFlowY : Computes dominant flow in Y (row) direction
"""
if flow_x.ndim != 2:
raise ValueError('flow_x should be a 2D ndarray')
flow_x_dom = np.zeros_like(flow_x)
# central difference of absolute value of flow
flow_x_abs = nd.convolve(np.abs(flow_x), _dxc_k)
# pixel masks for positive and negative absolute differences
dabs_p = flow_x_abs >= 0
dabs_m = flow_x_abs < 0
flow_x_dom[dabs_p] = nd.convolve(flow_x, _sxp_k)[dabs_p]
flow_x_dom[dabs_m] = nd.convolve(flow_x, _sxm_k)[dabs_m]
return flow_x_dom
def dominantFlowY(flow_y):
"""Computes dominant flow in Y (row) direction
Parameters
----------
flow_y : ndarray
Optical flow Y component.
Returns
-------
flow_y_dom : ndarray
Dominant flow in Y (row) directions.
Raises
------
ValueError : if flow_y.ndim != 2
See Also
--------
dominantFlowX : Computes dominant flow in X (column) direction.
"""
if flow_y.ndim != 2:
raise ValueError('flow_y should be a 2D ndarray')
flow_y_dom = np.zeros_like(flow_y)
# central difference of absolute value of flow
flow_y_abs = nd.convolve(np.abs(flow_y), _dyc_k)
# pixel masks for positive and negative absolute differences
dabs_p = flow_y_abs >= 0
dabs_m = flow_y_abs < 0
# assign possitive or negative shifte
flow_y_dom[dabs_p] = nd.convolve(flow_y, _syp_k)[dabs_p]
flow_y_dom[dabs_m] = nd.convolve(flow_y, _sym_k)[dabs_m]
return flow_y_dom
def propagate(flow, iterations=1, dx=1.0, payload=None, border=3):
"""Propagate an optical flow field and attached payloads
Parameters
----------
flow : ndarray
Optical flow field. Each pixel (i, j) contains the (u, v)
components of optical flow.
iterations : integer, optional
Number of iterations the numerical scheme is run.
Defaults to 1
dx : float, optional
Pixel size. Defaults to 1.0.
payload : list, optional
List of scalar fields to be propagated alongside the
flow. Each element of the list must be a 2D ndarray.
Defautls to None
border: integer, optional
Border width in which the propagation does not take place.
The returned propagated flow with have the same values as
the input in the border regions. Defaults to 3.
Returns
-------
flowPropagated : ndarray
Propagated flow field.
payloadPropagated: list
Propagated payloads or None if payload parameters is None
Raises
------
ValueError : if iterations <= 0
See Also
--------
propagationStep : Performs one iteration of the propagation numerical scheme.
"""
if iterations <= 0: raise ValueError('iterations must be greater than zero')
# time step
dt = 1.0 / float(iterations)
# run the numerical scheme
for _ in range(iterations):
flow, payload = propagationStep(flow, dt, dx, payload, border)
# return the propagated flow and payload
return flow, payload
def propagationStep(flow, dt=1.0, dx=1.0, payload=None, border=3):
"""Performs one iteration of the propagation numerical scheme.
Parameters
----------
flow : ndarray
Optical flow field. Each pixel (i, j) contains the (u, v)
components of optical flow.
dt : float, optional
Time step. Defaults to 1.0.
dx : float, optional
Pixel size. Defaults to 1.0.
payload : list, optional
List of scalar fields to be propagated alongside the
optical flow. Each element of the list must be a 2D ndarray.
Defautls to None
border: integer, optional
Border width in which the propagation does not take place.
The returned propagated flow with have the same values as
the input in the border regions. Defaults to 3.
Returns
-------
flowPropagated : ndarray
Propagated flow field.
payloadPropagated: list
Propagated payloads or None if payload parameters is None
Raises
------
ValueError : if flow.ndim != 3
ValueError : if border < 0
ValueError : if dx <= 0.0
ValueError : if dt <= 0.0
See Also
--------
propagate : Propagate an optical flow field and attached payloads
"""
# Parameters check
if flow.ndim != 3: raise ValueError('flow field must be a 3D ndarray')
if border < 0: raise ValueError('border should be greater or equal zero')
if dx <= 0.0: raise ValueError('dx should be greater than zero')
if dt <= 0.0: raise ValueError('dt should be greater than zero')
# U V flow components
U = np.copy(flow[:,:,0])
V = np.copy(flow[:,:,1])
# ratio between time and pixel size
R = dt/dx
#############################################
# PROPAGATION IN X (column) DIRECTION
#
# Uh = U - R*U*dx(U)
# Vh = V - R*U*dx(V)
#############################################
Ud = dominantFlowX(U)
# sign of dominant flow
Up = Ud >= 0
Um = Ud < 0
Uh = np.copy(U)
Vh = np.copy(V)
# propagation with upwind difference operators
Uh[Up] -= R*(Ud*nd.convolve(U, _dxm_k))[Up]
Uh[Um] -= R*(Ud*nd.convolve(U, _dxp_k))[Um]
Vh[Up] -= R*(Ud*nd.convolve(V, _dxm_k))[Up]
Vh[Um] -= R*(Ud*nd.convolve(V, _dxp_k))[Um]
# payload propagation
if payload != None:
payloadPropX = list()
# for each field in the payload list
for field in payload:
fieldPropX = np.copy(field)
fieldPropX[Up] -= R*(Ud*nd.convolve(field, _dxm_k))[Up]
fieldPropX[Um] -= R*(Ud*nd.convolve(field, _dxp_k))[Um]
payloadPropX.append(fieldPropX)
#############################################
# PROPAGATION IN Y DIRECTION
#
# U1 = Uh - R*Uh*dy(U)
# V1 = Vh - R*Vh*dy(V)
#############################################
Vd = dominantFlowY(Vh)
# sign of dominant flow
Vp = Vd >= 0
Vm = Vd < 0
U1 = np.copy(Uh)
V1 = np.copy(Vh)
# propagation with upwind difference operators
U1[Vp] -= R*(Vd*nd.convolve(Uh, _dym_k))[Vp]
U1[Vm] -= R*(Vd*nd.convolve(Uh, _dyp_k))[Vm]
V1[Vp] -= R*(Vd*nd.convolve(Vh, _dym_k))[Vp]
V1[Vm] -= R*(Vd*nd.convolve(Vh, _dyp_k))[Vm]
# payload propagation
payloadPropagated = None
if payload != None:
payloadPropagated = list()
# for each scalar field in the payload
for i in range(len(payloadPropX)):
field = payloadPropX[i]
fieldPropY = np.copy(field)
fieldPropY[Vp] -= R*(Vd*nd.convolve(field, _dym_k))[Vp]
fieldPropY[Vm] -= R*(Vd*nd.convolve(field, _dyp_k))[Vm]
payloadPropagated.append(fieldPropY)
##############################################
# PACK THE PROPAGATED FLOW WITH BORDER REMOVAL
##############################################
if border == 0:
flowPropagated = np.concatenate([p[...,np.newaxis] for p in [U1, V1]], axis=2)
else:
flowPropagated = np.copy(flow)
# assign the propagated flow to the interior region of the field
flowPropagated[border:-border, border:-border, 0] = U1[border:-border, border:-border]
flowPropagated[border:-border, border:-border, 1] = V1[border:-border, border:-border]
# sanity check
if np.isnan(flowPropagated).any() or np.isinf(flowPropagated).any():
print('propagationStep(): NaN or Inf detected in propagated flow')
return flowPropagated, payloadPropagated
|
How Can I Determine the SID for a User Account?
Hey, Scripting Guy! How can I determine the SID for a user account?
Hey, MD. For those of you whose eyes glaze over any time they see an acronym (not that we blame you), SID is short for Security Identifier. A SID is a unique ID string (e.g., S-1-5-21-1454471165-1004336348-1606980848-5555) that is assigned to each account created in a domain or on a local computer. For our purposes, we’ll just say that SID is how the operating system keeps track of accounts. For example, you can rename the Administrator account on a computer and still use that account to function as an administrator because Windows doesn’t really care what the name is; Windows still knows that this account is the Administrator account because the SID remains the same regardless of the account name. It’s like your Social Security Number which – assuming you haven’t had your identify hijacked – uniquely identifies you regardless of the name you go by.
Most of the time you don’t need to worry about SIDs, which is good: obviously it’s easier to deal with an account name like kenmyer than it is to deal with a SID like S-1-5-21-1454471165-1004336348-1606980848-5555. However, there are times when it’s useful to know which SID goes with which user account. WMI’s security classes, for example, rely on SIDs; likewise, the Windows registry tracks user profiles by SID rather than by name (take a look at HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList to see what we mean). You might be able to live your entire scripting life without ever needing to know a user’s SID. But, then again ….
As you can see, the SID is practically longer than the script. All we do here is connect to the WMI service, and then use the Get method to bind to a specified instance of the Win32_UserAccount class. Notice we don’t use ExecQuery and return a collection of all the SIDs in our domain; that won’t work. Instead, we have to use Get and specify a particular user account. After that, it’s simply a matter of echoing the SID, which we do in the last line of the script.
The big difference here is that instead of getting an instance of the Win32_UserAccount class we get an instance of the Win32_SID class (and note that we pass the SID as the parameter to the Get method). As soon as we’ve retrieved that instance, we echo the account name and domain name, and we’re off and running.
|
"""SCons.Tool.ipkg
Tool-specific initialization for ipkg.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The ipkg tool calls the ipkg-build. Its only argument should be the
packages fake_root.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ipkg.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import os
import SCons.Builder
def generate(env):
"""Add Builders and construction variables for ipkg to an Environment."""
try:
bld = env['BUILDERS']['Ipkg']
except KeyError:
bld = SCons.Builder.Builder( action = '$IPKGCOM',
suffix = '$IPKGSUFFIX',
source_scanner = None,
target_scanner = None)
env['BUILDERS']['Ipkg'] = bld
env['IPKG'] = 'ipkg-build'
env['IPKGCOM'] = '$IPKG $IPKGFLAGS ${SOURCE}'
env['IPKGUSER'] = os.popen('id -un').read().strip()
env['IPKGGROUP'] = os.popen('id -gn').read().strip()
env['IPKGFLAGS'] = SCons.Util.CLVar('-o $IPKGUSER -g $IPKGGROUP')
env['IPKGSUFFIX'] = '.ipk'
def exists(env):
return env.Detect('ipkg-build')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Court in Florida Airline Case: Passengers Can Sue for Personal Injury Under State Law – Jiminez-Ruiz v. Spirit Airlines, Inc. | The Law Firm of Anidjar & Levine, P.A.
Home » Blog » Court in Florida Airline Case: Passengers Can Sue for Personal Injury Under State Law – Jiminez-Ruiz v. Spirit Airlines, Inc.
In a recent personal injury case out of Puerto Rico, the federal court there ruled that the Airline Deregulation Act (ADA) does not preempt a passenger injured due to an airline’s negligence from suing the airline under applicable state law.
The allegations at issue in Jiminez-Ruiz v. Spirit Airlines, Inc. concern injuries sustained by Plaintiff Jaime Jimenez-Ruiz while a passenger aboard a Spirit Airlines flight from Ft. Lauderdale to Puerto Rico. Plaintiff, a Puerto Rico resident, was injured when he slipped and fell on the second to last step of a mobile ramp while deplaning after the plane landed. He filed suit, alleging that Spirit negligently failed to: (1) dry the steps of the mobile ramp; (2) apply anti-slip tape to the steps; (3) illuminate the area; and (4) warn passengers about the ramp’s dangerous conditions. Plaintiff sought monetary damages for his injuries.
According to the court, the statute does not prohibit Plaintiff from bringing the negligence claims against Spirit. Although the airline argued that the claims pertain to a “service” provided by Spirit, the court agreed with an earlier ruling by the Ninth Circuit Court of Appeals, in which that court found that “Congress did not intend to preempt passengers’ run-of-the-mill personal injury claims,” such as Plaintiff’s claims under the Puerto Rico negligence statute, in passing the ADA. Accordingly, the court denied Plaintiff’s motion to dismiss the claims.
The legal term “negligence” is the most common theory of recovery for people injured in any type of accident, including car accidents, aviation accidents and motorcycle accidents as well as incidents of medical malpractice.
If you are the victim of another party’s negligence, you need the best representation you can get to protect your interests. The South Florida personal injury attorneys at Anidjar & Levine have vast experience bringing negligence claims on behalf of clients injured in a variety of different situations. We are responsible and hardworking attorneys who help guide our clients through potential negligence cause of actions and consider all of the available legal remedies.
If you were injured in an accident due to another person’s negligent behavior, call the accident and negligence attorneys at Anidjar & Levine for a free consultation. Based in Ft. Lauderdale, we serve clients throughout South Florida, including in Hollywood, Hialeah, Pompano Beach and Coral Springs. Our lawyers can speak with you about whether you might be entitled to compensation for your injury. You can reach our Florida offices at 800-747-3733 or submit an on-line form to contact us today.
|
# -*- coding: utf-8 -*-
from openerp.osv import orm, fields
from openerp import SUPERUSER_ID
from openerp.addons import decimal_precision
class delivery_carrier(orm.Model):
_inherit = 'delivery.carrier'
_columns = {
'website_published': fields.boolean('Available in the website'),
'website_description': fields.text('Description for the website'),
}
_defaults = {
'website_published': True
}
class SaleOrder(orm.Model):
_inherit = 'sale.order'
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = super(SaleOrder, self)._amount_all(cr, uid, ids, field_name, arg, context=context)
currency_pool = self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
line_amount = sum([line.price_subtotal for line in order.order_line if line.is_delivery])
currency = order.pricelist_id.currency_id
res[order.id]['amount_delivery'] = currency_pool.round(cr, uid, currency, line_amount)
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
_columns = {
'amount_delivery': fields.function(
_amount_all_wrapper, type='float', digits_compute=decimal_precision.get_precision('Account'),
string='Delivery Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The amount without tax.", track_visibility='always'
),
'website_order_line': fields.one2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
domain=[('is_delivery', '=', False)],
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
),
}
def _check_carrier_quotation(self, cr, uid, order, force_carrier_id=None, context=None):
carrier_obj = self.pool.get('delivery.carrier')
# check to add or remove carrier_id
if not order:
return False
if all(line.product_id.type == "service" for line in order.website_order_line):
order.write({'carrier_id': None}, context=context)
self.pool['sale.order']._delivery_unset(cr, SUPERUSER_ID, [order.id], context=context)
return True
else:
carrier_id = force_carrier_id or order.carrier_id.id
carrier_ids = self._get_delivery_methods(cr, uid, order, context=context)
if carrier_id:
if carrier_id not in carrier_ids:
carrier_id = False
else:
carrier_ids.remove(carrier_id)
carrier_ids.insert(0, carrier_id)
if force_carrier_id or not carrier_id or not carrier_id in carrier_ids:
for delivery_id in carrier_ids:
grid_id = carrier_obj.grid_get(cr, SUPERUSER_ID, [delivery_id], order.partner_shipping_id.id)
if grid_id:
carrier_id = delivery_id
break
order.write({'carrier_id': carrier_id}, context=context)
if carrier_id:
order.delivery_set(context=context)
else:
order._delivery_unset(context=context)
return bool(carrier_id)
def _get_delivery_methods(self, cr, uid, order, context=None):
carrier_obj = self.pool.get('delivery.carrier')
delivery_ids = carrier_obj.search(cr, uid, [('website_published','=',True)], context=context)
# Following loop is done to avoid displaying delivery methods who are not available for this order
# This can surely be done in a more efficient way, but at the moment, it mimics the way it's
# done in delivery_set method of sale.py, from delivery module
for delivery_id in list(delivery_ids):
grid_id = carrier_obj.grid_get(cr, SUPERUSER_ID, [delivery_id], order.partner_shipping_id.id)
if not grid_id:
delivery_ids.remove(delivery_id)
return delivery_ids
def _get_errors(self, cr, uid, order, context=None):
errors = super(SaleOrder, self)._get_errors(cr, uid, order, context=context)
if not self._get_delivery_methods(cr, uid, order, context=context):
errors.append(('No delivery method available', 'There is no available delivery method for your order'))
return errors
def _get_website_data(self, cr, uid, order, context=None):
""" Override to add delivery-related website data. """
values = super(SaleOrder, self)._get_website_data(cr, uid, order, context=context)
# We need a delivery only if we have stockable products
has_stockable_products = False
for line in order.order_line:
if line.product_id.type in ('consu', 'product'):
has_stockable_products = True
if not has_stockable_products:
return values
delivery_ctx = dict(context, order_id=order.id)
DeliveryCarrier = self.pool.get('delivery.carrier')
delivery_ids = self._get_delivery_methods(cr, uid, order, context=context)
values['deliveries'] = DeliveryCarrier.browse(cr, SUPERUSER_ID, delivery_ids, context=delivery_ctx)
return values
|
Welcome to Crookston & Fosston Eye Clinic!
We welcome you to our clinic. We take great pride and joy in providing our patients with high quality and personal service. At Crookston & Fosston Eye Clinic we will give you the attention and personal service you will come to expect and enjoy.
We’re proud to offer comprehensive eye care services and eyewear for all ages. We are the “trusted” eye care clinic and we look forward to serving you and your family. Our staff looks forward to helping you with all of your eye health needs. We will take the time to answer all of your questions and ensure you understand all of your options.
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 4 22:07:54 2013
@author: matt
Qt-specific code for creating UI elements from objects
decorated with ui_decorators
"""
from ui_decorators import *
#UI creation from properties:
import PySide.QtCore
import PySide.QtGui
from Queue import Queue
_bool_checkstate_map = {True: PySide.QtCore.Qt.CheckState.Checked,
False: PySide.QtCore.Qt.CheckState.Unchecked,
None: PySide.QtCore.Qt.CheckState.PartiallyChecked}
def _bool_to_checkstate(b):
"""
Convert a python object into a Qt CheckState.
Returns Checked for True, Unchecked for False,
and PartiallyChceked for anything else
>>> _bool_to_checkstate(True)
PySide.QtCore.Qt.CheckState.Checked
>>> _bool_to_checkstate(False)
PySide.QtCore.Qt.CheckState.Unchecked
>>> _bool_to_checkstate(None)
PySide.QtCore.Qt.CheckState.PartiallyChecked
>>> _bool_to_checkstate(34)
PySide.QtCore.Qt.CheckState.PartiallyChecked
"""
return _bool_checkstate_map.get(b, _bool_checkstate_map[None])
def _checkstate_to_bool(cs):
"""
Convert a Qt CheckState int a python bool or None.
Returns True for Checked, False for Unchecked or None.
>>> _checkstate_to_bool(PySide.QtCore.Qt.CheckState.Checked)
True
>>> _checkstate_to_bool(PySide.QtCore.Qt.CheckState.Unchecked)
False
>>> _checkstate_to_bool(PySide.QtCore.Qt.CheckState.PartiallyChecked) is None
True
"""
for key, val in _bool_checkstate_map.iteritems():
if val==cs:
return key
class Framework(FrameworkBase, PySide.QtCore.QObject):
"""
Qt Framework class
We derive from QObject to use signals, allowing us to implement
the run_on_ui_thread function by adding the function to a Queue,
emitting a signal and having ourselves connected to that signal.
The code for the received signal is in the UI thread, so we then
call any functions in the queue.
"""
_queue_updated = PySide.QtCore.Signal()
def __init__(self):
PySide.QtCore.QObject.__init__(self)
self.q = Queue()
self._queue_updated.connect(self.on_queue_updated)
self.app = PySide.QtGui.QApplication("Qt")
self.main = PySide.QtGui.QMainWindow()
self.main.setDockOptions(self.main.AllowNestedDocks | self.main.AllowTabbedDocks)
self.changing_widgets = []
def close(self):
self.app.quit()
def on_queue_updated(self):
while not self.q.empty():
f = self.q.get()
f()
def run_on_ui_thread(self, f):
self.q.put(f)
self._queue_updated.emit()
def get_main_window(self):
return self.main
def get_filename(self, mode):
if mode=="load":
return PySide.QtGui.QFileDialog.getOpenFileName()
else:
return PySide.QtGui.QFileDialog.getSaveFileName()
def get_widgets_for_method(self, method):
"""
Return a list of (text, widget) tuples
"""
ret = []
listenerfunc = getattr(method, "listeners", None)
method_name = method.__func__.__name__
def add_widget(name, widget, found_attr, update_widget=None):
if name is None:
name = method_name
if update_widget:
# we wrap the update function in a check to make sure we're
#not in the middle of changing the control
update_widget = updating_widget(widget, update_widget)
# we subscribe to changes from any listeners
if listenerfunc:
listenerfunc(method.im_self).append(update_widget)
# if a get func is supplied we use it to initialize the widget
if found_attr.get("getfunc"):
curval=found_attr.get("getfunc")(method.im_self)
update_widget(curval)
ret.append((name, widget))
def widget_changing(widget, func):
# we wrap change func so that we know which UI elements are changing
# NB we can change a textedit which can change a slider. We want to
# ignore the slider value and the text value, so we need a list of
# changing widgets
def setter(*args):
self.changing_widgets.append(widget)
try:
ret = func(*args)
finally:
self.changing_widgets.remove(widget)
return ret
return setter
def updating_widget(widget, func):
def updater(*args):
if widget not in self.changing_widgets:
return func(*args)
return updater
if hasattr(method, "_slider"):
widget = PySide.QtGui.QSlider(PySide.QtCore.Qt.Orientation.Horizontal)
widget.setMaximum(method._slider["maximum"])
widget.setMinimum(method._slider["minimum"])
widget.valueChanged.connect(widget_changing(widget,
lambda x, method=method: method(x / method._slider["scale"])))
update_widget = lambda newv, method=method, widget=widget: widget.setValue(newv * method._slider["scale"])
add_widget(None, widget, method._slider, update_widget)
if hasattr(method, "_button"):
widget = PySide.QtGui.QPushButton(method_name)
widget.clicked.connect(lambda method=method: method())
add_widget("", widget, method._button)
if hasattr(method, "_combobox"):
widget = PySide.QtGui.QComboBox()
widget.addItems(map(str, method._combobox["options"]))
widget.currentIndexChanged.connect(widget_changing(widget,
lambda x, method=method: method(method._combobox["options"][x])))
update_widget = lambda newv, method=method, widget=widget: widget.setCurrentIndex(method._combobox["options"].index(newv))
add_widget(None, widget, method._combobox, update_widget)
if hasattr(method, "_textbox"):
widget = PySide.QtGui.QLineEdit()
widget.textEdited.connect(widget_changing(widget,
lambda x, method=method: method(x)))
update_widget = lambda newv, widget=widget: widget.setText(str(newv))
add_widget(None, widget, method._textbox, update_widget)
if hasattr(method, "_checkbox"):
widget = PySide.QtGui.QCheckBox()
widget.stateChanged.connect(widget_changing(widget,
lambda x, method=method: method(_checkstate_to_bool(x))))
update_widget = lambda newv, widget=widget: widget.setCheckState(_bool_to_checkstate(newv))
add_widget(None, widget, method._checkbox, update_widget)
return ret
def get_obj_widget(self, obj):
layout = PySide.QtGui.QFormLayout()
for p in dir(obj):
v = getattr(obj, p)
if not isinstance(v, types.MethodType):
continue
widgets = self.get_widgets_for_method(v)
for name, widget in widgets:
layout.addRow(name, widget)
d=PySide.QtGui.QDockWidget(obj.__class__.__name__)
d.setWidget(PySide.QtGui.QWidget())
d.widget().setLayout(layout)
return d
def display_widgets(self, ws):
for w in ws:
self.main.addDockWidget(PySide.QtCore.Qt.LeftDockWidgetArea, w)
self.main.show()
self.app.exec_()
|
Outlook: The Bearcats don't have a ton of depth, but they do have speed. That's especially true of junior Ayrianna Smith, who qualified for the 2015 MHSAA Division 1 Finals in the 100-meter dash as well as the 400-meter relay along with Karen Harris and Erion Traylor.
"We have been excited to be outside since the official start of practice," Armock said. "The girls are working harder to achieve higher goals than they did last season. We would like to see more girls come out for the team, but are very excited the girls we have. Speed will be good once again for our girls. We are looking to find the right mix right now for our relay teams and trying to fit some other pieces of the puzzle in other areas of our team... Getting back to the state meet is a goal for many of our female runners."
Outlook: With over 50 athletes on the squad, the defending Interstate Eight Conference champion Beavers have high hopes for 2016. Junior Charley Andrews leads the team after winning back-to-back state championships in the high jump while also running on the 2015 state championship 1,600-meter relay team along with fellow returner Catie Scott.
"Our team returns several top competitors from last year's league championship team and a couple of new faces that we expect to score points," Renner said. "Several of the girls put in a lot of work over the fall and winter and have looked good so far. Our goal is to compete for a league championship and to have a good showing at the end of the season."
Outlook: The Spartans have an infusion of youth to help the team try to defend its SMAC championship, led by returning Division 1 state qualifiers Jayden Edgerson, Alexis Williams-Edgerson, Maggie Farrell, Taylar Coyer, Jakayla Potter, Meggie Riegle, Kalista Hubbart and Logann Haluszka.
"We are a young team with key leaders who will help to guide the underclassmen in the right direction," Pryor said. "We should be a strong team within the league and hope to get several athletes to the state meet."
Outlook: After finishing in the bottom tier of the Interstate Eight Conference a year ago, the Panthers hope to make a big leap forward in the conference standings, led by returning Division 2 state qualifier Arabia Bacon.
"Our girls team is promising with some very good individual talent. We have a good mixture of inexperienced and experienced athletes," McKire said. "Our main goal for this year is to get some of our very talented young athletes to experience success at the regional level to advance to state competition."
Outlook: The Fighting Tigers are low on numbers, but not on talent, as five of the 13 athletes on the squad were 2015 state qualifiers - including a trio of All-State performers Taylor Pessetti, Riley Rutherford and Ava Strenge.
"2016 is going to be an exciting year to watch the Lady Tigers from St. Phil," VanderPol said. "The team will be anchored by Ava Strenge and Julia Slattery in the distance events, and Meagan Casterline, Taylor Pessetti, and Riley Rutherford in the sprints/middle distance. The team is also very excited to see what contributions Madi Elliot (freshman, distance), and Maya Segovia (freshman, sprints/high jump), can add to the team. The team did not lose any major point earners from last year and our hope is that the new girls on the team will be able to fill the gaps our lineup had last year... We expect to compete for both division and conference championships, as well as pushing to win a regional title, something that hasn't been done in a long time at St. Phil. In June, we expect to surprise a lot of people with how well we do at the state finals."
Battle Creek was well-represented during the 2015 Michigan High School Athletic Association Finals in girls track and field, as all five city schools had athletes competing on the final day of the season.
And with state qualifiers back in the fold for Battle Creek Central, Harper Creek, Lakeview, Pennfield and St. Philip, the 2016 season is shaping up to be a memorable one for the Cereal City.
Among those state qualifiers was Harper Creek junior Charley Andrews, who returns to defend her two Division 2 state titles in the high jump as well as a state crown in the 1,600-meter relay along with fellow returner Catie Scott.
Battle Creek Central is led by 2015 All-City Meet MVP Ayrianna Smith, who is back on the track following a knee injury she suffered playing volleyball this past fall.
Lakeview sent nine athletes to the 2015 Division 1 State Finals, and eight have returned to pick up where they left off a season ago.
Pennfield’s Arabia Bacon returns after she broke on to the scene as a freshman, qualifying for the Division 2 Finals in both the 100 hurdles and 300 hurdles.
St. Philip has five returning state qualifiers, led by junior Ava Strenge – the 2014 Division 4 cross country state champion.
Outlook: The Bearcats don’t have a ton of depth, but they do have speed. That’s especially true of junior Ayrianna Smith, who qualified for the 2015 MHSAA Division 1 Finals in the 100-meter dash as well as the 400-meter relay along with Karen Harris and Erion Traylor.
“We are a young team with key leaders who will help to guide the underclassmen in the right direction,” Pryor said. “We should be a strong team within the league and hope to get several athletes to the state meet.
Outlook: The Fighting Tigers are low on numbers, but not on talent, as five of the 13 athletes on the squad were 2015 state qualifiers – including a trio of All-State performers Taylor Pessetti, Riley Rutherford and Ava Strenge.
|
import random
from nltk.tokenize import wordpunct_tokenize
from collections import OrderedDict
from trainbot import Trainbot
import chatbot_brain
brain_dict = OrderedDict({})
def add_func_to_dict(name=None):
def wrapper(func):
function_name = name
if function_name is None:
function_name = func.__name__
brain_dict[function_name] = func, func.__doc__
return func
return wrapper
@add_func_to_dict("Bigram Brain")
def _create_bi_chains(chatbot_brain, seeds, size=200):
u"""Return list of Markov-Chain generated strings where each word
added onto the sentence is selected solely from the probability
of it following the given last word in the training data."""
print "the seeds are: " + str(seeds)
candidates = []
while len(candidates) < size:
seed = str(chatbot_brain.i_filter_random(seeds))
candidate = [seed]
done = False
count = 0
while not done:
count += 1
try:
next_word = random.choice(chatbot_brain.bi_lexicon[seed])
candidate.append(next_word)
seed = next_word
except KeyError:
candidates.append(" ".join(candidate))
done = True
if next_word in chatbot_brain.stop_puncts:
candidates.append(" ".join(candidate))
done = True
if count > 75:
done = True
return candidates
@add_func_to_dict("Trigram Brain")
def _create_chains(chatbot_brain, seeds, size=200):
u"""Return list of Markov-Chain generated strings where each word
added onto the sentence is selected solely from the probability
of it following the given last two words in the training data."""
print "the seeds are: " + str(seeds)
candidates = []
while len(candidates) < size:
seed = str(chatbot_brain.i_filter_random(seeds))
pair = str(chatbot_brain._pair_seed(seed))
w_1 = pair[0]
w_2 = pair[1]
next_word = ""
word_1, word_2 = w_1, w_2
candidate = [word_1, word_2]
pair = "{} {}".format(word_1, word_2)
done = False
while not done:
try:
next_word = random.choice(chatbot_brain.tri_lexicon[pair])
candidate.append(next_word)
word_1, word_2 = word_2, next_word
pair = "{} {}".format(word_1, word_2)
except KeyError:
candidates.append(" ".join(candidate))
done = True
if next_word in chatbot_brain.stop_puncts:
candidates.append(" ".join(candidate))
done = True
return candidates
|
Developing nuclear power as part of the nation's long-term energy strategy has been incorporated into major national projects, the digital transformation has played an important role in order to improve the safety of NPP operation and reduce the cost of NPP design,construction, operation and maintenance.With 40 speakers and over 250 senior executives expected to attend, NITF2019 will target boosting the smart and digital transformation of the nuclear power industry through the showcase of the latest information technologies and solutions, sharing the best industry practices. It is the best opportunity to network and to learn the IT solutions from leading figures from China, U.S., France, Germany and South Korea in the fields of digital design& engineering, operation & maintenance, data protection as well as cyber security.
State Nuclear Power Demonstration Plant Co., Ltd.
Guangxi Fangchenggang Nuclear Power Co., Ltd.
Hainan Nuclear Power Co., Ltd.
Yangjiang Nuclear Power Co., Ltd.
Liaoning Hongyanhe Nuclear Power Co., Ltd.
China Nuclear Industry 23rd Construction Co., Ltd.
Sanmen Nuclear Power Co., Ltd.
|
#!/usr/bin/python
import sys, getopt
def main(argv):
inputfile = ''
outputfile = ''
count = 0
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'm180sanify.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
if inputfile == '' or outputfile == '':
sys.exit()
print 'Input file is ', inputfile
print 'Output file is ', outputfile
f1 = open(inputfile, 'r')
f2 = open(outputfile, 'w')
for line in f1:
if line in ("T0\n", "T1\n"):
f2.write(line.replace('T0', 'M108 T0').replace('T1', 'M108 T1'))
count += 1
else:
f2.write(line)
f1.close()
f2.close()
print 'Replaced ',count,' tool change calls.'
if __name__ == "__main__":
main(sys.argv[1:])
|
This past summer, after a South African carpenter open-published a design for a 3D printable “robohand”, I created a Google+ community to help 3D printing enthusiasts “develop a distributed pay-it-forward network for design, customization, and fabrication of 3D-printed prosthetics”.
Every day brings another inspirational story or innovation. I may start chronicling them here. Anyway, here’s what happened a few days ago.
3 minutes later, a group member offers to help print, but asks for design help.
2 hours later (what took so long?) a teenager —one of our most active and accomplished contributors—steps up to the plate.
Within minutes, the new team starts arranging a meeting. They’ll meet over video this weekend.
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
from chemcoord.cartesian_coordinates._cartesian_class_core import CartesianCore
from chemcoord.cartesian_coordinates.point_group import PointGroupOperations
class CartesianSymmetry(CartesianCore):
def _get_point_group_analyzer(self, tolerance=0.3):
from pymatgen.symmetry.analyzer import PointGroupAnalyzer
return PointGroupAnalyzer(self.get_pymatgen_molecule(),
tolerance=tolerance)
def _convert_eq(self, eq):
"""WORKS INPLACE on eq
"""
rename = dict(enumerate(self.index))
eq['eq_sets'] = {rename[k]: {rename[x] for x in v}
for k, v in eq['eq_sets'].items()}
eq['sym_ops'] = {rename[k]: {rename[x]: v[x] for x in v}
for k, v in eq['sym_ops'].items()}
try:
sym_mol = self.from_pymatgen_molecule(eq['sym_mol'])
sym_mol.index = self.index
eq['sym_mol'] = sym_mol._to_numeric()
except KeyError:
pass
def get_pointgroup(self, tolerance=0.3):
"""Returns a PointGroup object for the molecule.
Args:
tolerance (float): Tolerance to generate the full set of symmetry
operations.
Returns:
:class:`~PointGroupOperations`
"""
PA = self._get_point_group_analyzer(tolerance=tolerance)
return PointGroupOperations(PA.sch_symbol, PA.symmops)
def get_equivalent_atoms(self, tolerance=0.3):
"""Returns sets of equivalent atoms with symmetry operations
Args:
tolerance (float): Tolerance to generate the full set of symmetry
operations.
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
PA = self._get_point_group_analyzer(tolerance=tolerance)
eq = PA.get_equivalent_atoms()
self._convert_eq(eq)
return eq
def symmetrize(self, max_n=10, tolerance=0.3, epsilon=1e-3):
"""Returns a symmetrized molecule
The equivalent atoms obtained via
:meth:`~Cartesian.get_equivalent_atoms`
are rotated, mirrored... unto one position.
Then the average position is calculated.
The average position is rotated, mirrored... back with the inverse
of the previous symmetry operations, which gives the
symmetrized molecule.
This operation is repeated iteratively ``max_n`` times at maximum
until the difference between subsequently symmetrized structures is
smaller than ``epsilon``.
Args:
max_n (int): Maximum number of iterations.
tolerance (float): Tolerance for detecting symmetry.
Gets passed as Argument into
:class:`~pymatgen.analyzer.symmetry.PointGroupAnalyzer`.
epsilon (float): If the elementwise absolute difference of two
subsequently symmetrized structures is smaller epsilon,
the iteration stops before ``max_n`` is reached.
Returns:
dict: The returned dictionary has three possible keys:
``sym_mol``:
A symmetrized molecule :class:`~Cartesian`
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not symmetry-equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
from pymatgen.symmetry.analyzer import iterative_symmetrize
mg_mol = self.get_pymatgen_molecule()
eq = iterative_symmetrize(mg_mol, max_n=max_n, tolerance=tolerance,
epsilon=epsilon)
self._convert_eq(eq)
return eq
def get_asymmetric_unit(self, eq=None):
eq = self.get_equivalent_atoms() if (eq is None) else eq
new_frame = self.loc[eq['eq_sets'].keys(), :]._frame
from chemcoord.cartesian_coordinates.asymmetric_unit_cartesian_class \
import AsymmetricUnitCartesian
return AsymmetricUnitCartesian(new_frame, _metadata={'eq': eq})
|
Rob and Jason are joined by Andrew Pardoe to discuss Visual C++ conformance progress as well as experimental features like Modules.
Andrew started working at Microsoft in 2002. He worked for the C++ team for exactly five years, first on testing the Itanium optimizer and then on the Phoenix compiler platform. He left in 2007 to become a PM on the CLR team (the C# runtime). Andrew left that job about two years ago and through the magic of corporate reorgs ended up as the C++ compiler PM.
In his role at Microsoft Andrew pays attention to pretty much everything without a GUI: the compiler front end/parser, code analysis, and a little bit to the optimizer. He also owns the tools acquisition story—such as the VC++ Build Tools SKU and updating to latest daily drops through NuGet—and Clang/C2. The Clang/C2 work is what ties Andrew into the Islandwood team, and the code analysis work focuses mostly on the C++ Core Guidelines checkers.
|
#!/usr/bin/env python
#
import glob
import random
import test
import replika
import replika.assets
from replika.ingame import action
background = replika.assets.image('../assets/background.jpg')
woman = replika.assets.Loop(
replika.assets.images(sorted(glob.glob('../assets/walk_*.png'))))
class Woman(replika.ingame.Puppet):
def __init__(self, puppet_asset, layer, name, position=None,
distance=5.0):
super(Woman, self).__init__(puppet_asset, layer, name,
position, distance)
game = replika.new_game()
scene = game.new_scene(auto_switch=True)
scene.add_asset(background)
woman_graphics = replika.assets.Puppet({
'initial': replika.assets.Loop(
replika.assets.images(sorted(glob.glob('../assets/walk_*.png')))),
'move_right': replika.assets.Loop(
replika.assets.images(sorted(glob.glob('../assets/walk_*.png')))),
'move_left': replika.assets.Loop(
replika.assets.images(sorted(glob.glob('../assets/walk_*.png')),
horizontal_flip=True))
})
woman_graphics.behaviour = Woman
test.start('Puppet() inherit test')
while game.is_running:
position = (random.randint(-512, 512), random.randint(-384, 384))
try:
new_woman = scene.add_asset(woman_graphics, position=position)
if not isinstance(new_woman, Woman):
test.failed('Invalid type of InGame() object')
except:
test.failed('Cannot inherit from Puppet() objects')
if game.frame >= 50:
game.quit()
game.update()
test.ok()
|
As single members of the Church of Jesus Christ of Latter Day Saints, it is preferable to date fellow active members that will hopefully, ideally, lead to an eventual temple marriage. However, unless you live in an area where there are many fellow local single members—Utah, Idaho, Arizona for example—it may be difficult to find, date, and marry an active member. Therefore, unless you are able to move to such an area, many single members have become willing to expand their dating zone beyond driving distance to plane distance.
1) Budget: Plan your trips early. Figure out the most affordable plane tickets or driving options. If needed, you might need to limit or cut out other expenditures to support the monetary commitment of long-distance dating. However, in case plans change or there is a break up, you may consider buying insurance on the plane tickets so you can get refunds if needed.
2) Have regular communication when apart: As long as both partners are feeling strongly into the relationship, try to maintain some form of communication with each other daily to compensate for not physically being able to see each other. Text, email, Facetime or Skype, even old-fashioned snail mail letters are all options. Regular communication will help keep bonds strong and growing, as well as help lessen the natural insecurities and even paranoia that can happen when apart. Technology today allows for a far greater ability to stay connected long distance than ever before. Take advantage of your options.
3) Regular Visits: If you are able to visit each other one weekend a month at least, that would be ideal. However, budget issues and other time issues/responsibilities (kids, work, etc) may make this difficult to achieve. So, if you can’t visit each other at least once a month, please at least try to visit every other month. If you are not able to physically meet every other month, it is my opinion that that is not enough face-to-face time together to really nourish and nurture a long-distance relationship. In such cases I recommend finding someone else to date. However, of course, this is not a hard and fast rule, and individual situations may vary.
5) Be Honest: Some people in long distance relationships are “open”, meaning, it is understood that either side can date other local singles while dating each other as well. Other long-distance relationships are “committed”, meaning, both sides agree to only date each other. The important thing here is that whatever your arrangement is, be open and clear about it. Communicate and decide together. Even more important here is to be honest about what you are doing. If you agree to date others, there is no need to discuss dating specifics with others unless at any point you decide to be committed to the local partner—at which time there is a need to break up. Conversely, if you agree you are a committed couple, stay faithful and do not date others. Broken trust in a long-distance relationship is difficult to recover from and may upend the entire thing.
|
# ----------------------------------------------------------------------------
# cocos2d "update" plugin
#
# Author: Luis Parravicini
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"update" plugin for cocos2d command line tool
'''
__docformat__ = 'restructuredtext'
import re
import os
import cocos2d
import httplib
class CCPluginUpdate(cocos2d.CCPlugin):
@staticmethod
def plugin_name():
return "update"
@staticmethod
def brief_description():
return "checks if there's an update available"
def _check_versions(self):
latest_version = self._get_latest_version()
current_version = '2.1.0'
#FIXME check version numbers with verlib? https://wiki.python.org/moin/Distutils/VersionComparison
def _get_latest_version(self):
cocos2d.Logging.info("obtaining latest version number")
conn = httplib.HTTPConnection('cocos2d-x.org', timeout=10)
try:
conn.request('GET', '/download')
res = conn.getresponse()
if res.status != httplib.OK:
raise cocos2d.CCPluginError("Unexpected response status (%d)" % res.status)
data = res.read()
#FIXME: quick and dirty (and error prone) way to extract the latest version
#from the html page
match = re.search('href="http://cdn.cocos2d-x.org/cocos2d-x-(.*?).zip"', data)
if match is None:
raise cocos2d.CCPluginError("Couldn't extract latest version from site")
return match.group(1)
finally:
conn.close()
def run(self, argv, dependencies):
self.parse_args(argv)
self._check_versions()
|
This is the party spot in Shanghai. A hip and stylish night club, perched on the 24th floor high above Shanghai, with dramatic floor-to-ceiling windows offering spectacular 360� views of the Bund, Pudong, Nanjing Road, People's Square and Huaihai Road. The first thing you notice upon arriving at M1NT Club is the jaw-dropping 17-metre long shark tank, complete with more than 20 black and white reef-tip sharks. Entranceways don't get any better than that.
On my last trip to Shanghai a friend of mine brought me to probably one of the best clubs in Shanghai. M1NTH is really classy and is a great place to hang out. Great fun and music the entire night!
|
"""
A canvas class with type and draw feature.
Author: Rong Xiao <akelux@gmail.com>
LICENSE: GPL 2.0
"""
try:
# Python2
import Tkinter as tk
except ImportError:
# Python3
import tkinter as tk
from font_chooser import askChooseFont
from tkColorChooser import askcolor
import tkFileDialog, tkMessageBox
class TypeDraw(tk.Canvas):
"""
A Canvas variant with predefined bindings for typing and drawing.
"""
def __init__(self, master=None, cnf={}, **kw):
tk.Canvas.__init__(self, master=master, cnf=cnf, **kw)
self.mx = -1
self.my = -1
self.draw_color = 'black'
self.color = 'white'
self.font = ('Consolas', 16)
self.line_width = 2
self.em = 12
self.saved = True
self.cursor = None
self.blink = False
self.stack = [] # history for redo
self.bind('<Button-1>', self.catch_mouse)
self.bind_all('<Key>', self.key_pressed) # have to use bind all
self.bind('<B1-Motion>', self.draw)
# self.bind('<B3-Motion>', self.draw)
def catch_mouse(self, event = None):
self.mx = event.x
self.my = event.y
self.start_blinking()
# self.root.update()
def key_pressed(self, event=None):
# print 'event.char:', event.char
# print "key symbol:", event.keysym
if len(event.char) != 1: # process combined control keys
sym = event.keysym
# if sym == 'Escape':
# self.blink = False
if sym == 'Right':
self.mx += 1
elif sym == 'Left':
self.mx -= 1
elif sym == 'Up':
self.my -= 1
elif sym == 'Down':
self.my += 1
return
o = ord(event.char)
# print "ord:", o
widget = None
if o == 32: # don't draw space
self.mx = self.mx+3*self.em/4
elif o == 27: # escape
self.blink = False
elif o>32 and o<127:
widget = self.create_text(self.mx, self.my, text = event.char, font=self.font, fill=self.draw_color)
self.saved = False
self.stack.append(widget) # put to stack for undo
self.mx += self.em # shift after draw a character
self.start_blinking()
elif o == 127 or o == 8:
self.blink = False
if self.stack:
widget = self.stack.pop()
self.delete(widget)
# self.root.update()
def draw(self, event=None):
# self.stop_blinking()
self.blink = False
mx = event.x
my = event.y
if self.mx >= 0:
w = self.create_line(self.mx, self.my, mx, my, width=self.line_width, fill=self.draw_color)
self.saved = False
self.stack.append(w)
self.mx=mx
self.my=my
def clear(self, event=None):
self.delete(tk.ALL)
def change_color(self,color):
self.draw_color = color
def change_linewidth(self,width):
self.line_width = width
def blinking(self):
if self.cursor == None: # draw cursor
h = 5*self.em/4
w = (self.line_width+1)/2
self.cursor = self.create_rectangle(self.mx-w, self.my-h/2, self.mx + w,self.my + h/2,outline = self.draw_color, fill=self.draw_color)
else: # hide cursor
self.delete(self.cursor)
self.cursor = None
if self.blink:
self.after(500, self.blinking)
elif self.cursor:
self.delete(self.cursor)
self.cursor = None
def start_blinking(self):
if not self.blink:
self.blink = True
self.after(500, self.blinking)
def choose_font(self):
self.font, self.em = askChooseFont(self)
def set_bgcolor(self):
self.color = askcolor(parent=self,
title='Choose a background color')
self.config(bg=self.color[1])
def set_drawcolor(self):
self.draw_color = askcolor(parent=self,
title='Choose a drawing color')[1]
def save(self):
if not self.saved:
f = tkFileDialog.asksaveasfilename(parent=self)
if f:
if f[-4:] != '.eps':
f+='.eps'
self.postscript(file=f, colormode='color')
self.saved = True
return self.saved
def load(self): # T.B.D.
f = tkFileDialog.askopenfilename(parent=self)
photo = tk.PhotoImage(file=f)
self.delete(tk.ALL)
self.create_image(image=photo)
def close(self): # ask for saving before closing
if not self.saved:
ok = tkMessageBox.askyesnocancel(parent=self,
message="Your scratch has unsaved modifications. Do you want to save the scratch?",
title="Save scratch")
if ok == True:
return self.save()
elif ok == None: # cancel
return False # close without saving
else: # no
return True
return True
|
Vice President of India, Shri M. Venkaiah Naidu today inaugurated World Sustainable Development Summit 2019, organized by The Energy and Resources Institute – TERI.
He expressed concern over the impact of climate change on developing countries and called upon all nations to collaborate and cooperate in an equally unparalleled manner to ensure sustainable development.
“Climate change and global warming threaten lives and livelihoods of billions and upsets the delicate balance of nature. It is in its recognition that India is trying its best to include the paradigm of sustainability and environmental conservation in all its development endeavours,” he said.
The vice-president also emphasised on the importance of sustainable agriculture as a part of sustainable development and said more efficient systems of irrigation with “more crop per drop” mantra should be used.
“There is a need to make use of the endless possibilities of biotechnology and nano-technology to develop a range of green products, including nano-fertilisers.
“We should move towards more efficient systems of irrigation with ‘more crop per drop’ as our mantra. Greater thrust has to be placed on organic farming and on the use of natural means of pest control,” he said.
Naidu, while kick-starting the summit, said sustainable development is inclusive development which includes sustainable agriculture, sustainable mobility solutions, urbanisation, energy security and clean energy, waste management and efforts in wildlife conservation.
The summit themed ‘Attaining the 2030 Agenda: delivering on our promise’ has been organised by The Energy and Resources Institute (TERI) at the India Habitat Centre, Delhi, from February 11 to 13.
“Sustainable development is a common goal for all world nations, given the unprecedented scale of environmental degradation and its drastic consequences that we have been witnessing,” the vice-president said.
Addressing the summit, Naidu said India’s traditional practices reflected a sustainable lifestyle and the Vedic philosophy of India always emphasized the undeniable connection that human beings share with nature.
The vice-president said every individual must contribute to sustainable development either by turning the ignition off at long traffic stops or by recycling and composting or by cycling to work in congested cities.
“India has set a target of raising its existing 21.54 per cent forest cover to 33 per cent of the total geographical area through aggressive forestation drives,” he said.
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.tools import float_is_zero
class PosMakePayment(models.TransientModel):
_name = 'pos.make.payment'
_description = 'Point of Sale Payment'
def _default_session(self):
active_id = self.env.context.get('active_id')
if active_id:
return self.env['pos.order'].browse(active_id).session_id
return False
def _default_journal(self):
active_id = self.env.context.get('active_id')
if active_id:
session = self.env['pos.order'].browse(active_id).session_id
return session.config_id.journal_ids and session.config_id.journal_ids.ids[0] or False
return False
def _default_amount(self):
active_id = self.env.context.get('active_id')
if active_id:
order = self.env['pos.order'].browse(active_id)
return (order.amount_total - order.amount_paid)
return False
session_id = fields.Many2one('pos.session', required=True, default=_default_session)
journal_id = fields.Many2one('account.journal', string='Payment Mode', required=True, default=_default_journal)
amount = fields.Float(digits=0, required=True, default=_default_amount)
payment_name = fields.Char(string='Payment Reference')
payment_date = fields.Date(string='Payment Date', required=True, default=lambda *a: fields.Date.today())
@api.onchange('session_id')
def _on_change_session(self):
if self.session_id:
return {
'domain': {'journal_id': [('id', 'in', self.session_id.config_id.journal_ids.ids)]}
}
@api.multi
def check(self):
"""Check the order:
if the order is not paid: continue payment,
if the order is paid print ticket.
"""
self.ensure_one()
order = self.env['pos.order'].browse(self.env.context.get('active_id', False))
currency = order.pricelist_id.currency_id
amount = order.amount_total - order.amount_paid
data = self.read()[0]
# add_payment expect a journal key
data['journal'] = data['journal_id'][0]
data['amount'] = currency.round(data['amount']) if currency else data['amount']
if not float_is_zero(amount, precision_rounding=currency.rounding or 0.01):
order.add_payment(data)
if order.test_paid():
order.action_pos_order_paid()
return {'type': 'ir.actions.act_window_close'}
return self.launch_payment()
def launch_payment(self):
return {
'name': _('Payment'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.make.payment',
'view_id': False,
'target': 'new',
'views': False,
'type': 'ir.actions.act_window',
'context': self.env.context,
}
|
Upper Lakes Foods, Inc. has an immediate opening for a full-time Warehouse associate, second and third shifts. The Warehouse Associate is responsible for loading and unloading trucks, carts, and pallets and moves products. Restocks the warehouse. Keeps warehouse clean and safe when shortfalls are observed.
Ability to pass the Lift Truck Operator Training Course test.
Must be able to pass a pre-employment physical, drug screening and background check.
Ability to add, subtract, multiply, divide. Ability to compute rate, ratio, and percent.
|
from genesys.generator._unknown.character import race
import random
class Mark():
types = [] # 12
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
memory_types = [] # 16
memory_ofs = [] # 17
def __init__(self, name_id):
self.name_id = name_id
class Scar(Mark):
pass
class Birthmark(Mark):
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
class Moles(Mark):
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
class Frecles(Mark):
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
memory_types = [] # 16
memory_ofs = [] # 17
class SmoothSkin(Mark):
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
memory_types = [] # 16
memory_ofs = [] # 17
class SoftSkin(Mark):
places_from = [] # 13
places_through = [] # 14
places_to = [] # 15
memory_types = [] # 16
memory_ofs = [] # 17
def charGen():
marks = [Scar(i) for i in range(6)] + \
[Birthmark(i) for i in range(6, 9)] + \
[Moles(9), Frecles(10), SmoothSkin(11), ] + \
[SoftSkin(12 + i) for i in range(10)]
names20 = []
races = [race.Human(i) for i in range(3)] + \
[race.Elf(i) for i in range(3, 9)] + \
[race.Gnome(10), ] + \
[race.Troll(11), race.Orc(12), race.Goblin(13)] + \
[race.Dwarf(14), race.Giant(15)] + \
[race.Race(15 + i) for i in range(10)]
names22 = []
names23 = []
names24 = []
names25 = []
names26 = []
names27 = []
names28 = []
srace = random.choice(races)
mark = random.choice(marks)
mark_from = random.choice(mark.places_form)
mark_through = random.choice(mark.places_through)
mark_to = random.choice(mark.places_to)
memory_type = random.choice(mark.memory_types)
memory_of = random.choice(mark.memory_ofs)
first_name = random.choice(srace.first_name)
last_name = random.choice(srace.last_name)
random20 = random.choice(names20)
random22 = random.choice(names22)
random23 = random.choice(names23)
random24 = random.choice(names24)
random25 = random.choice(names25)
random26 = random.choice(names26)
while random26 == random25:
random26 = random.choice(names26)
random27 = random.choice(names27)
random28 = random.choice(names28)
head = "%s a %s. %s over %s" % (
srace.hair,
srace.face,
srace.eyes,
srace.promise,
)
name2 = "%s %s %s %s leaves %s of %s." % (
mark,
mark_from,
mark_through,
mark_to,
memory_type,
memory_of,
)
name3 = "This is the face of %s %s, a true %s among %s. He stands %s others, despite his %s frame." % (
first_name,
last_name,
random20,
srace,
random22,
random23,
)
name4 = "There's something %s about him, perhaps it's %s or perhaps it's simply %s. But nonetheless, people tend to %s, while %s." % (
random24,
random25,
random26,
random27,
random28,
)
return "\n".join([
head,
name2,
name3,
name4,
])
|
Say 'hi' to Frank E. Bolts. This little Halloween favorite is ready for trick or treating! Take him house to house showing him your tricks as you collect your treats. His favorite tricks are somersaults and his favorite candy is... candy corn of course.
|
from quex.frs_py.file_in import error_msg
import quex.core_engine.state_machine.parallelize as parallelize
from quex.core_engine.generator.action_info import PatternActionInfo
import quex.core_engine.state_machine.nfa_to_dfa as nfa_to_dfa
import quex.core_engine.state_machine.hopcroft_minimization as hopcroft
class GeneratorBase:
def __init__(self, PatternActionPair_List, StateMachineName):
assert type(PatternActionPair_List) == list
assert map(lambda elm: elm.__class__ == PatternActionInfo, PatternActionPair_List) \
== [ True ] * len(PatternActionPair_List)
self.state_machine_name = StateMachineName
# -- setup of state machine lists and id lists
self.__extract_special_lists(PatternActionPair_List)
# (*) create state (combined) state machines
# -- core state machine
self.sm = self.__create_core_state_machine()
# -- pre conditions
self.pre_context_sm = self.__create_pre_context_state_machine()
# -- backward detectors for state machines with forward ambiguous
# post-conditions.
self.papc_backward_detector_state_machine_list = \
self.__create_backward_input_position_detectors()
def __extract_special_lists(self, PatternActionPair_List):
# (0) extract data structures:
# -- state machine list: simply a list of all state machines
# (the original state machine id is marked as 'origin' inside
# 'get_state_machine')
# -- a map from state machine id to related action (i.e. the code fragment)
self.state_machine_list = []
self.action_db = {}
# -- extract:
# -- state machines that are post-conditioned
self.post_contexted_sm_id_list = []
# -- state machines that nore non-trivially pre-conditioned,
# i.e. they need a reverse state machine to be verified.
self.pre_context_sm_id_list = []
self.pre_context_sm_list = []
# -- pre-conditions that are trivial, i.e. it is only checked for
# the last character, if it was a particular one or not.
self.begin_of_line_condition_f = False
# [NOT IMPLEMENTED YET]
# # trivial_pre_context_dict = {} # map: state machine id --> character code(s)
for action_info in PatternActionPair_List:
sm = action_info.pattern_state_machine()
sm_id = sm.get_id()
self.state_machine_list.append(sm)
# -- register action information under the state machine id, where it belongs.
self.action_db[sm_id] = action_info
# -- collect all pre-conditions and make one single state machine out of it
pre_sm = sm.core().pre_context_sm()
if pre_sm != None:
self.pre_context_sm_list.append(pre_sm)
self.pre_context_sm_id_list.append(pre_sm.get_id())
if sm.core().pre_context_begin_of_line_f():
self.begin_of_line_condition_f = True
# [NOT IMPLEMENTED YET]
# # -- collect information about trivial (char code) pre-conditions
# # if sm.get_trivial_pre_context_character_codes() != []:
# # trivial_pre_context_dict[sm.get_id()] = sm.get_trivial_pre_context_character_codes()
# -- collect all ids of post conditioned state machines
if sm.core().post_context_id() != -1L:
self.post_contexted_sm_id_list.append(sm_id)
def __create_core_state_machine(self):
# (1) transform all given patterns into a single state machine
# (the index of the patterns remain as 'origins' inside the states)
return self.__get_combined_state_machine(self.state_machine_list)
def __create_pre_context_state_machine(self):
if self.pre_context_sm_list == []: return None
# -- add empty actions for the pre-condition terminal states
for pre_sm in self.pre_context_sm_list:
self.action_db[pre_sm.get_id()] = PatternActionInfo(pre_sm, "")
return self.__get_combined_state_machine(self.pre_context_sm_list,
FilterDominatedOriginsF=False)
def __create_backward_input_position_detectors(self):
# -- find state machines that contain a state flagged with
# 'pseudo-ambiguous-post-condition'.
# -- collect all backward detector state machines in a list
papc_sm_list = []
for sm in self.state_machine_list:
papc_sm = sm.core().post_context_backward_input_position_detector_sm()
if sm.core().post_context_backward_input_position_detector_sm() == None: continue
papc_sm_list.append(papc_sm)
# -- code generation 'looks' for origins, so mark them.
papc_sm.mark_state_origins()
return papc_sm_list
def __get_combined_state_machine(self, StateMachine_List, FilterDominatedOriginsF=True):
"""Creates a DFA state machine that incorporates the paralell
process of all pattern passed as state machines in
the StateMachine_List. Each origins of each state machine
are kept in the final state, if it is not dominated.
Performs: -- parallelization
-- translation from NFA to DFA
-- Frank Schaefers Adapted Hopcroft optimization.
Again: The state machine ids of the original state machines
are traced through the whole process.
FilterDominatedOriginsF, if set to False, can disable the filtering
of dominated origins. This is important for pre-conditions, because,
all successful patterns need to be reported!
"""
def __check(Place, sm):
__check_on_orphan_states(Place, sm)
__check_on_init_state_not_acceptance(Place, sm)
def __check_on_orphan_states(Place, sm):
orphan_state_list = sm.get_orphaned_state_index_list()
if orphan_state_list == []: return
error_msg("After '%s'" % Place + "\n" + \
"Orphaned state(s) detected in regular expression (optimization lack).\n" + \
"Please, log a defect at the projects website quex.sourceforge.net.\n" + \
"Orphan state(s) = " + repr(orphan_state_list) + "\n",
fh, DontExitF=True)
def __check_on_init_state_not_acceptance(Place, sm):
init_state = sm.get_init_state()
if init_state.core().is_acceptance():
error_msg("After '%s'" % Place + "\n" + \
"The initial state is 'acceptance'. This should never appear.\n" + \
"Please, log a defect at the projects website quex.sourceforge.net.\n")
if filter(lambda origin: origin.is_acceptance(), init_state.origins().get_list()) != []:
error_msg("After '%s'" % Place + "\n" + \
"Initial state contains an origin that is 'acceptance'. This should never appear.\n" + \
"Please, log a defect at the projects website quex.sourceforge.net.\n")
# (1) mark at each state machine the machine and states as 'original'.
#
# This is necessary to trace in the combined state machine the
# pattern that actually matched. Note, that a state machine in
# the StateMachine_List represents one possible pattern that can
# match the current input.
#
map(lambda x: x.mark_state_origins(), StateMachine_List)
# (2) setup all patterns in paralell
sm = parallelize.do(StateMachine_List)
__check("Parallelization", sm)
# (3) convert the state machine to an DFA (paralellization created an NFA)
sm = nfa_to_dfa.do(sm)
__check("NFA to DFA", sm)
# (4) determine for each state in the DFA what is the dominating original state
if FilterDominatedOriginsF: sm.filter_dominated_origins()
__check("Filter Dominated Origins", sm)
# (5) perform hopcroft optimization
# Note, that hopcroft optimization does consider the original acceptance
# states when deciding if two state sets are equivalent.
sm = hopcroft.do(sm)
__check("Hopcroft Minimization", sm)
return sm
|
View a YouTube Video of Women's Lunch Place (for Women And Children) below.
Women's Lunch Place provides day shelter for women and women with children in need .The Women’s Lunch Place strives to meet guests’ most basic and immediate needs while coordinating and delivering critical support services designed to help women achieve greater stability and self-sufficiency whenever possible.
Be a volunteer. Call Women's Lunch Place (for Women And Children) at (617) 267-0200 for current volunteer work opportunities.
Posting Comments below is for viewers on this website only. If you have information that can help users regarding Women's Lunch Place (for Women And Children), please provide it below.
|
# Generated by Django 3.1.2 on 2020-11-07 20:00
from decimal import Decimal
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sspanel", "0003_auto_20200729_0733"),
]
operations = [
migrations.CreateModel(
name="TrojanNode",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("node_id", models.IntegerField(unique=True)),
("level", models.PositiveIntegerField(default=0)),
("name", models.CharField(max_length=32, verbose_name="名字")),
("info", models.CharField(max_length=1024, verbose_name="节点说明")),
(
"country",
models.CharField(
choices=[
("US", "美国"),
("CN", "中国"),
("GB", "英国"),
("SG", "新加坡"),
("TW", "台湾"),
("HK", "香港"),
("JP", "日本"),
("FR", "法国"),
("DE", "德国"),
("KR", "韩国"),
("JE", "泽西岛"),
("NZ", "新西兰"),
("MX", "墨西哥"),
("CA", "加拿大"),
("BR", "巴西"),
("CU", "古巴"),
("CZ", "捷克"),
("EG", "埃及"),
("FI", "芬兰"),
("GR", "希腊"),
("GU", "关岛"),
("IS", "冰岛"),
("MO", "澳门"),
("NL", "荷兰"),
("NO", "挪威"),
("PL", "波兰"),
("IT", "意大利"),
("IE", "爱尔兰"),
("AR", "阿根廷"),
("PT", "葡萄牙"),
("AU", "澳大利亚"),
("RU", "俄罗斯联邦"),
("CF", "中非共和国"),
],
default="CN",
max_length=5,
verbose_name="国家",
),
),
(
"used_traffic",
models.BigIntegerField(default=0, verbose_name="已用流量"),
),
(
"total_traffic",
models.BigIntegerField(default=1073741824, verbose_name="总流量"),
),
(
"enable",
models.BooleanField(
db_index=True, default=True, verbose_name="是否开启"
),
),
(
"enlarge_scale",
models.DecimalField(
decimal_places=2,
default=Decimal("1.0"),
max_digits=10,
verbose_name="倍率",
),
),
(
"ehco_listen_host",
models.CharField(
blank=True, max_length=64, null=True, verbose_name="隧道监听地址"
),
),
(
"ehco_listen_port",
models.CharField(
blank=True, max_length=64, null=True, verbose_name="隧道监听端口"
),
),
(
"ehco_listen_type",
models.CharField(
choices=[("raw", "raw"), ("wss", "wss"), ("mwss", "mwss")],
default="raw",
max_length=64,
verbose_name="隧道监听类型",
),
),
(
"ehco_transport_type",
models.CharField(
choices=[("raw", "raw"), ("wss", "wss"), ("mwss", "mwss")],
default="raw",
max_length=64,
verbose_name="隧道传输类型",
),
),
(
"enable_ehco_lb",
models.BooleanField(
db_index=True, default=True, verbose_name="是否负载均衡"
),
),
("server", models.CharField(max_length=128, verbose_name="服务器地址")),
(
"inbound_tag",
models.CharField(default="proxy", max_length=64, verbose_name="标签"),
),
(
"service_port",
models.IntegerField(default=443, verbose_name="服务端端口"),
),
("client_port", models.IntegerField(default=443, verbose_name="客户端端口")),
(
"listen_host",
models.CharField(
default="0.0.0.0", max_length=64, verbose_name="本地监听地址"
),
),
(
"grpc_host",
models.CharField(
default="0.0.0.0", max_length=64, verbose_name="grpc地址"
),
),
(
"grpc_port",
models.CharField(
default="8080", max_length=64, verbose_name="grpc端口"
),
),
(
"network",
models.CharField(default="tcp", max_length=64, verbose_name="连接方式"),
),
(
"security",
models.CharField(
blank=True,
default="tls",
max_length=64,
null=True,
verbose_name="加密方式",
),
),
(
"alpn",
models.CharField(
blank=True, max_length=64, null=True, verbose_name="alpn"
),
),
(
"certificateFile",
models.CharField(
blank=True, max_length=64, null=True, verbose_name="crt地址"
),
),
(
"keyFile",
models.CharField(
blank=True, max_length=64, null=True, verbose_name="key地址"
),
),
],
options={
"verbose_name_plural": "Trojan节点",
},
),
migrations.AddField(
model_name="relaynode",
name="ehco_trojan_lb_port",
field=models.IntegerField(
blank=True,
help_text="trojan负载均衡端口",
null=True,
verbose_name="trojan负载均衡端口",
),
),
migrations.AlterField(
model_name="nodeonlinelog",
name="node_type",
field=models.CharField(
choices=[("ss", "ss"), ("vmess", "vmess"), ("trojan", "trojan")],
default="ss",
max_length=32,
verbose_name="节点类型",
),
),
migrations.AlterField(
model_name="user",
name="first_name",
field=models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
migrations.AlterField(
model_name="usertrafficlog",
name="node_type",
field=models.CharField(
choices=[("ss", "ss"), ("vmess", "vmess"), ("trojan", "trojan")],
default="ss",
max_length=32,
verbose_name="节点类型",
),
),
migrations.CreateModel(
name="TrojanRelayRule",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("relay_port", models.CharField(max_length=64, verbose_name="中转端口")),
(
"listen_type",
models.CharField(
choices=[("raw", "raw"), ("wss", "wss"), ("mwss", "mwss")],
default="raw",
max_length=64,
verbose_name="监听类型",
),
),
(
"transport_type",
models.CharField(
choices=[("raw", "raw"), ("wss", "wss"), ("mwss", "mwss")],
default="raw",
max_length=64,
verbose_name="传输类型",
),
),
(
"relay_node",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="trojan_relay_rules",
to="sspanel.relaynode",
verbose_name="中转节点",
),
),
(
"trojan_node",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="relay_rules",
to="sspanel.trojannode",
verbose_name="Trojan节点",
),
),
],
options={
"verbose_name_plural": "Trojan转发规则",
},
),
]
|
Online shopping has gained immense popularity over the last few years. Back in the days, online shopping was a concept that people could not even think of. But with the passage of time and advancements in technology, the concept of online shopping has rapidly gained fame. Now, we find a lot more people turning towards the wonders of the internet to shop for the items that they are interested in. Clothing happens to be the most commonly shopped for items online. Now, online shops are also catering to plus sized women to fulfill their clothing needs.
To put it in simple words, it is highly recommended for plus sized women to shop online for the benefits that it has to offer. Let us go through a few of the advantages that you can reap by shopping for plus size clothing on the internet.
One of the major reasons why plus sized women prefer shopping online these days is because it helps them choose from a wider variety of clothes. The fact is that when you go to a traditional retailer, you will find that they have very limited stock for plus size women. This means that the options available for you will be very restricted. In most cases, the available options are nowhere close to being stylish. On the other hand, online retailers offer an extensive variety of clothes for plus size women. There are even online shops that cater only the plus sized individuals, so you are guaranteed to have a wonderful plus size online shopping uae experience.
The best part about online shopping is that it is extremely convenient. The fact is that when you choose to shop on the internet, there is no longer the need for you to worry about walking down to the local shopping center and walking from shop to shop. You also do not have to worry about parking your car. This is because online shopping makes it possible for you to purchase the items that you are interested in without even stepping out of your bedroom. No matter where you are, be it at home or at the office, you can shop for plus sized clothes as long as you have a laptop or smartphone with an internet connection. Use this link for further information and to get started with shopping for plus size clothing.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.