code
stringlengths 1
199k
|
|---|
import numpy as np
from numpy import cos, sin, pi
from pele.potentials import LJ
from pele.angleaxis import RBTopology, RBSystem, RigidFragment, RBPotentialWrapper
def make_otp():
"""this constructs a single OTP molecule"""
otp = RigidFragment()
otp.add_atom("O", np.array([0.0, -2./3 * np.sin( 7.*pi/24.), 0.0]), 1.)
otp.add_atom("O", np.array([cos( 7.*pi/24.), 1./3. * sin( 7.* pi/24.), 0.0]), 1.)
otp.add_atom("O", np.array([-cos( 7.* pi/24.), 1./3. * sin( 7.*pi/24), 0.0]), 1.)
otp.finalize_setup()
return otp
class OTPCluster(RBSystem):
"""
This will build a system class for an OTP (Ortho Ter Phenyl) cluster
OTP is a very simple rigid body molecule defined as 3 Lennard-Jones particles
connected in a rigid isocolese triangle
"""
def __init__(self, nmol):
self.nrigid = nmol
super(OTPCluster, self).__init__()
self.setup_params(self.params)
def setup_aatopology(self):
"""this sets up the topology for the whole rigid body system"""
topology = RBTopology()
topology.add_sites([make_otp() for _ in xrange(self.nrigid)])
self.render_scale = 0.2
self.atom_types = topology.get_atomtypes()
self.draw_bonds = []
for i in xrange(self.nrigid):
self.draw_bonds.append((3*i, 3*i+1))
self.draw_bonds.append((3*i, 3*i+2))
topology.finalize_setup()
return topology
def setup_params(self, params):
"""set some system dependent parameters to imrprove algorithm performance"""
params.double_ended_connect.local_connect_params.tsSearchParams.iprint = 10
nebparams = params.double_ended_connect.local_connect_params.NEBparams
nebparams.max_images = 50
nebparams.image_density = 5
nebparams.iter_density = 10.
nebparams.k = 5.
nebparams.reinterpolate = 50
nebparams.NEBquenchParams["iprint"] = 10
tssearch = params.double_ended_connect.local_connect_params.tsSearchParams
tssearch.nsteps_tangent1 = 10
tssearch.nsteps_tangent2 = 30
tssearch.lowestEigenvectorQuenchParams["nsteps"] = 50
tssearch.iprint=1
tssearch.nfail_max = 100
def get_potential(self):
"""construct the rigid body potential"""
try:
return self.pot
except AttributeError:
# construct the potential which will compute the energy and gradient in atomistic (cartesian) coordinates
cartesian_potential = LJ()
# wrap it so it can be used with angle axis coordinates
self.pot = RBPotentialWrapper(self.aatopology.cpp_topology, cartesian_potential)
return self.pot
def load_coords_pymol(self, *args, **kwargs):
import pymol
RBSystem.load_coords_pymol(self, *args, **kwargs)
# draw the spheres slightly smaller
pymol.cmd.set("sphere_scale", value=.25)
def test_bh():
np.random.seed(0)
nmol = 10
system = OTPCluster(nmol)
db = system.create_database()
bh = system.get_basinhopping(db)
bh.run(100)
m1 = db.minima()[0]
print m1.coords
for x in m1.coords:
print "%.12f," % x,
print ""
print m1.energy
def test_gui():
from pele.gui import run_gui
nmol = 5
system = OTPCluster(nmol)
run_gui(system)
if __name__ == "__main__":
test_gui()
|
import urllib
import sickbeard
from sickbeard import logger
from sickbeard.exceptions import ex
try:
import lib.simplejson as json #@UnusedImport
except:
import json #@Reimport
API_URL = "https://%(username)s:%(secret)s@api.notifo.com/v1/send_notification"
class NotifoNotifier:
def test_notify(self, username, apisecret, title="Test:"):
return self._sendNotifo("This is a test notification from SickBeard", title, username, apisecret)
def _sendNotifo(self, msg, title, username, apisecret, label="SickBeard"):
"""
Sends a message to notify using the given authentication information
msg: The string to send to notifo
title: The title of the message
username: The username to send it to
apisecret: The API key for the username
label: The label to use for the message (optional)
Returns: True if the message was delivered, False otherwise
"""
# tidy up the message
msg = msg.strip()
# build up the URL and parameters
apiurl = API_URL % {"username": username, "secret": apisecret}
data = urllib.urlencode({
"title": title,
"label": label,
"msg": msg.encode(sickbeard.SYS_ENCODING)
})
# send the request to notifo
try:
data = urllib.urlopen(apiurl, data)
result = json.load(data)
except ValueError, e:
logger.log(u"Unable to decode JSON: "+repr(data), logger.ERROR)
return False
except IOError, e:
logger.log(u"Error trying to communicate with notifo: "+ex(e), logger.ERROR)
return False
data.close()
# see if it worked
if result["status"] != "success" or result["response_message"] != "OK":
return False
else:
return True
def notify_snatch(self, ep_name, title="Snatched:"):
"""
Send a notification that an episode was snatched
ep_name: The name of the episode that was snatched
title: The title of the notification (optional)
"""
if sickbeard.NOTIFO_NOTIFY_ONSNATCH:
self._notifyNotifo(title, ep_name)
def notify_download(self, ep_name, title="Completed:"):
"""
Send a notification that an episode was downloaded
ep_name: The name of the episode that was downloaded
title: The title of the notification (optional)
"""
if sickbeard.NOTIFO_NOTIFY_ONDOWNLOAD:
self._notifyNotifo(title, ep_name)
def _notifyNotifo(self, title, message, username=None, apisecret=None, force=False):
"""
Send a notifo notification based on the SB settings.
title: The title to send
message: The message to send
username: The username to send it to (optional, default to the username in the config)
apisecret: The API key to use (optional, defaults to the api key in the config)
force: If true then the notification will be sent even if it is disabled in the config (optional)
Returns: True if the message succeeded, false otherwise
"""
if not sickbeard.USE_NOTIFO and not force:
logger.log("Notification for Notifo not enabled, skipping this notification", logger.DEBUG)
return False
if not username:
username = sickbeard.NOTIFO_USERNAME
if not apisecret:
apisecret = sickbeard.NOTIFO_APISECRET
logger.log(u"Sending notification for " + message, logger.DEBUG)
self._sendNotifo(message, title, username, apisecret)
return True
notifier = NotifoNotifier
|
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
import ibm_db_dbi
except ImportError:
pass
import logging
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: http://code.google.com/p/ibm-db/
User guide: http://code.google.com/p/ibm-db/wiki/README
API: http://www.python.org/dev/peps/pep-0249/
License: Apache License 2.0
"""
def __init__(self):
GenericConnector.__init__(self)
def connect(self):
self.initConnection()
try:
database = "DRIVER={IBM DB2 ODBC DRIVER};DATABASE=%s;HOSTNAME=%s;PORT=%s;PROTOCOL=TCPIP;" % (self.db, self.hostname, self.port)
self.connector = ibm_db_dbi.connect(database, self.user, self.password)
except ibm_db_dbi.OperationalError, msg:
raise SqlmapConnectionException(msg)
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except ibm_db_dbi.ProgrammingError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[1])
return None
def execute(self, query):
try:
self.cursor.execute(query)
except (ibm_db_dbi.OperationalError, ibm_db_dbi.ProgrammingError), msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[1])
except ibm_db_dbi.InternalError, msg:
raise SqlmapConnectionException(msg[1])
self.connector.commit()
def select(self, query):
self.execute(query)
return self.fetchall()
|
from steam import WebAPI as steamwebapi
from config import SteamAPI
from io import BytesIO
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import time
import aiohttp
import os
import datetime
import discord
async def steam(cmd, message, args):
steamapi = steamwebapi(SteamAPI)
steam_input = ' '.join(args)
# Data Collection Start
response_call = steamapi.call('ISteamUser.ResolveVanityURL', vanityurl=str(steam_input), url_type=1)
try:
response = response_call['response']
userid = response['steamid']
except Exception as e:
await message.channel.send('User Not Found Or Profile Private...')
return
gamecount_call = steamapi.call('IPlayerService.GetOwnedGames', steamid=userid, include_appinfo=False,
include_played_free_games=True, appids_filter=-1)
gamecountnonfree_call = steamapi.call('IPlayerService.GetOwnedGames', steamid=userid, include_appinfo=False,
include_played_free_games=False, appids_filter=-1)
summary_call = steamapi.call('ISteamUser.GetPlayerSummaries', steamids=userid)
summary = summary_call['response']['players'][0]
displayname = str(summary['personaname'])
currentstamp = int(round(time.time()))
creation = summary['timecreated']
lastonline = currentstamp - int(summary['lastlogoff'])
fmt = '%B %d, %Y'
creation = datetime.datetime.fromtimestamp(creation).strftime(fmt)
lastonline = time.strftime('%H:%M:%S', time.gmtime(int(lastonline)))
onlinenow = summary['personastate']
avatar_url = str(summary['avatarfull'])
gamecount = gamecount_call['response']['game_count']
gamecountnonfree = gamecount - gamecountnonfree_call['response']['game_count']
# Data Collection End, Pillow Start
async with aiohttp.ClientSession() as session:
async with session.get(avatar_url) as data:
avatar_raw = await data.read()
with Image.open(BytesIO(avatar_raw)) as avatar:
base = Image.open(cmd.resource('img/base.png'))
overlay = Image.open(cmd.resource('img/overlay.png'))
base.paste(avatar, (0, 0))
base.paste(overlay, (0, 0), overlay)
main_font = cmd.resource('fonts/NotoSansCJKjp-Medium.otf')
font = ImageFont.truetype(main_font, 32)
font2 = ImageFont.truetype(main_font, 23)
imgdraw = ImageDraw.Draw(base)
if len(displayname) > 18:
displayname = displayname[:17] + '...'
imgdraw.text((190, 7), displayname, (255, 255, 255), font=font)
imgdraw.text((190, 45), 'Joined ' + str(creation), (255, 255, 255), font=font2)
imgdraw.text((190, 75), 'Last Logoff: ' + str(lastonline) + ' ago', (255, 255, 255), font=font2)
imgdraw.text((190, 105), 'Has ' + str(gamecount) + ' games', (255, 255, 255), font=font2)
imgdraw.text((190, 135), 'Out of which ' + str(gamecountnonfree) + ' are free', (255, 255, 255), font=font2)
if onlinenow == 0:
imgdraw.text((2, 165), '*', (102, 102, 153), font=font)
elif onlinenow == 1:
imgdraw.text((2, 165), '*', (26, 188, 156), font=font)
elif onlinenow == 2:
imgdraw.text((2, 165), '*', (255, 51, 0), font=font)
elif onlinenow == 3:
imgdraw.text((2, 165), '*', (255, 153, 0), font=font)
else:
imgdraw.text((2, 165), '*', (102, 102, 153), font=font)
base.save(f'cache/steam_{message.id}.png')
await message.channel.send(file=discord.File(f'cache/steam_{message.id}.png'))
os.remove(f'cache/steam_{message.id}.png')
|
default_app_config = "wiki.plugins.globalhistory.apps.GlobalHistoryConfig"
|
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr, flt
from frappe import _
from frappe.model.document import Document
from operator import itemgetter
class BOM(Document):
def autoname(self):
last_name = frappe.db.sql("""select max(name) from `tabBOM`
where name like "BOM/%s/%%" """ % frappe.db.escape(self.item))
if last_name:
idx = cint(cstr(last_name[0][0]).split('/')[-1].split('-')[0]) + 1
else:
idx = 1
self.name = 'BOM/' + self.item + ('/%.3i' % idx)
def validate(self):
self.clear_operations()
self.validate_main_item()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "stock_uom", "qty", "BOM Item")
self.validate_materials()
self.set_bom_material_details()
self.calculate_cost()
self.validate_operations()
def on_update(self):
self.check_recursion()
self.update_exploded_items()
def on_submit(self):
self.manage_default_bom()
def on_cancel(self):
frappe.db.set(self, "is_active", 0)
frappe.db.set(self, "is_default", 0)
# check if used in any other bom
self.validate_bom_links()
self.manage_default_bom()
def on_update_after_submit(self):
self.validate_bom_links()
self.manage_default_bom()
def get_item_det(self, item_code):
item = frappe.db.sql("""select name, item_name, is_asset_item, is_purchase_item,
docstatus, description, image, is_sub_contracted_item, stock_uom, default_bom,
last_purchase_rate
from `tabItem` where name=%s""", item_code, as_dict = 1)
if not item:
frappe.throw(_("Item: {0} does not exist in the system").format(item_code))
return item
def validate_rm_item(self, item):
if item[0]['name'] == self.item:
frappe.throw(_("Raw material cannot be same as main Item"))
def set_bom_material_details(self):
for item in self.get("items"):
ret = self.get_bom_material_detail({"item_code": item.item_code, "item_name": item.item_name, "bom_no": item.bom_no,
"qty": item.qty})
for r in ret:
if not item.get(r):
item.set(r, ret[r])
def get_bom_material_detail(self, args=None):
""" Get raw material details like uom, desc and rate"""
if not args:
args = frappe.form_dict.get('args')
if isinstance(args, basestring):
import json
args = json.loads(args)
item = self.get_item_det(args['item_code'])
self.validate_rm_item(item)
args['bom_no'] = args['bom_no'] or item and cstr(item[0]['default_bom']) or ''
args.update(item[0])
rate = self.get_rm_rate(args)
ret_item = {
'item_name' : item and args['item_name'] or '',
'description' : item and args['description'] or '',
'image' : item and args['image'] or '',
'stock_uom' : item and args['stock_uom'] or '',
'bom_no' : args['bom_no'],
'rate' : rate
}
return ret_item
def get_rm_rate(self, arg):
""" Get raw material rate as per selected method, if bom exists takes bom cost """
rate = 0
if arg['bom_no']:
rate = self.get_bom_unitcost(arg['bom_no'])
elif arg and (arg['is_purchase_item'] == 1 or arg['is_sub_contracted_item'] == 1):
if self.rm_cost_as_per == 'Valuation Rate':
rate = self.get_valuation_rate(arg)
elif self.rm_cost_as_per == 'Last Purchase Rate':
rate = arg['last_purchase_rate']
elif self.rm_cost_as_per == "Price List":
if not self.buying_price_list:
frappe.throw(_("Please select Price List"))
rate = frappe.db.get_value("Item Price", {"price_list": self.buying_price_list,
"item_code": arg["item_code"]}, "price_list_rate") or 0
return rate
def update_cost(self):
if self.docstatus == 2:
return
for d in self.get("items"):
rate = self.get_bom_material_detail({'item_code': d.item_code, 'bom_no': d.bom_no,
'qty': d.qty})["rate"]
if rate:
d.rate = rate
if self.docstatus == 1:
self.flags.ignore_validate_update_after_submit = True
self.calculate_cost()
self.save()
self.update_exploded_items()
frappe.msgprint(_("Cost Updated"))
def get_bom_unitcost(self, bom_no):
bom = frappe.db.sql("""select name, total_cost/quantity as unit_cost from `tabBOM`
where is_active = 1 and name = %s""", bom_no, as_dict=1)
return bom and bom[0]['unit_cost'] or 0
def get_valuation_rate(self, args):
""" Get weighted average of valuation rate from all warehouses """
total_qty, total_value, valuation_rate = 0.0, 0.0, 0.0
for d in frappe.db.sql("""select actual_qty, stock_value from `tabBin`
where item_code=%s""", args['item_code'], as_dict=1):
total_qty += flt(d.actual_qty)
total_value += flt(d.stock_value)
if total_qty:
valuation_rate = total_value / total_qty
if valuation_rate <= 0:
last_valuation_rate = frappe.db.sql("""select valuation_rate
from `tabStock Ledger Entry`
where item_code = %s and valuation_rate > 0
order by posting_date desc, posting_time desc, name desc limit 1""", args['item_code'])
valuation_rate = flt(last_valuation_rate[0][0]) if last_valuation_rate else 0
return valuation_rate
def manage_default_bom(self):
""" Uncheck others if current one is selected as default,
update default bom in item master
"""
if self.is_default and self.is_active:
from frappe.model.utils import set_default
set_default(self, "item")
item = frappe.get_doc("Item", self.item)
if item.default_bom != self.name:
item.default_bom = self.name
item.save()
else:
frappe.db.set(self, "is_default", 0)
item = frappe.get_doc("Item", self.item)
if item.default_bom == self.name:
item.default_bom = None
item.save()
def clear_operations(self):
if not self.with_operations:
self.set('operations', [])
def validate_main_item(self):
""" Validate main FG item"""
item = self.get_item_det(self.item)
if not item:
frappe.throw(_("Item {0} does not exist in the system or has expired").format(self.item))
else:
ret = frappe.db.get_value("Item", self.item, ["description", "stock_uom", "item_name"])
self.description = ret[0]
self.uom = ret[1]
self.item_name= ret[2]
def validate_materials(self):
""" Validate raw material entries """
if not self.get('items'):
frappe.throw(_("Raw Materials cannot be blank."))
check_list = []
for m in self.get('items'):
if m.bom_no:
validate_bom_no(m.item_code, m.bom_no)
if flt(m.qty) <= 0:
frappe.throw(_("Quantity required for Item {0} in row {1}").format(m.item_code, m.idx))
check_list.append(cstr(m.item_code))
unique_chk_list = set(check_list)
if len(unique_chk_list) != len(check_list):
frappe.throw(_("Same item has been entered multiple times."))
def check_recursion(self):
""" Check whether recursion occurs in any bom"""
check_list = [['parent', 'bom_no', 'parent'], ['bom_no', 'parent', 'child']]
for d in check_list:
bom_list, count = [self.name], 0
while (len(bom_list) > count ):
boms = frappe.db.sql(" select %s from `tabBOM Item` where %s = %s " %
(d[0], d[1], '%s'), cstr(bom_list[count]))
count = count + 1
for b in boms:
if b[0] == self.name:
frappe.throw(_("BOM recursion: {0} cannot be parent or child of {2}").format(b[0], self.name))
if b[0]:
bom_list.append(b[0])
def update_cost_and_exploded_items(self, bom_list=[]):
bom_list = self.traverse_tree(bom_list)
for bom in bom_list:
bom_obj = frappe.get_doc("BOM", bom)
bom_obj.on_update()
return bom_list
def traverse_tree(self, bom_list=[]):
def _get_children(bom_no):
return [cstr(d[0]) for d in frappe.db.sql("""select bom_no from `tabBOM Item`
where parent = %s and ifnull(bom_no, '') != ''""", bom_no)]
count = 0
if self.name not in bom_list:
bom_list.append(self.name)
while(count < len(bom_list)):
for child_bom in _get_children(bom_list[count]):
if child_bom not in bom_list:
bom_list.append(child_bom)
count += 1
bom_list.reverse()
return bom_list
def calculate_cost(self):
"""Calculate bom totals"""
self.calculate_op_cost()
self.calculate_rm_cost()
self.total_cost = self.operating_cost + self.raw_material_cost
def calculate_op_cost(self):
"""Update workstation rate and calculates totals"""
self.operating_cost = 0
for d in self.get('operations'):
if d.workstation:
if not d.hour_rate:
d.hour_rate = flt(frappe.db.get_value("Workstation", d.workstation, "hour_rate"))
if d.hour_rate and d.time_in_mins:
d.operating_cost = flt(d.hour_rate) * flt(d.time_in_mins) / 60.0
self.operating_cost += flt(d.operating_cost)
def calculate_rm_cost(self):
"""Fetch RM rate as per today's valuation rate and calculate totals"""
total_rm_cost = 0
for d in self.get('items'):
if d.bom_no:
d.rate = self.get_bom_unitcost(d.bom_no)
d.amount = flt(d.rate, self.precision("rate", d)) * flt(d.qty, self.precision("qty", d))
d.qty_consumed_per_unit = flt(d.qty, self.precision("qty", d)) / flt(self.quantity, self.precision("quantity"))
total_rm_cost += d.amount
self.raw_material_cost = total_rm_cost
def update_exploded_items(self):
""" Update Flat BOM, following will be correct data"""
self.get_exploded_items()
self.add_exploded_items()
def get_exploded_items(self):
""" Get all raw materials including items from child bom"""
self.cur_exploded_items = {}
for d in self.get('items'):
if d.bom_no:
self.get_child_exploded_items(d.bom_no, d.qty)
else:
self.add_to_cur_exploded_items(frappe._dict({
'item_code' : d.item_code,
'item_name' : d.item_name,
'description' : d.description,
'image' : d.image,
'stock_uom' : d.stock_uom,
'qty' : flt(d.qty),
'rate' : flt(d.rate),
}))
def add_to_cur_exploded_items(self, args):
if self.cur_exploded_items.get(args.item_code):
self.cur_exploded_items[args.item_code]["qty"] += args.qty
else:
self.cur_exploded_items[args.item_code] = args
def get_child_exploded_items(self, bom_no, qty):
""" Add all items from Flat BOM of child BOM"""
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
child_fb_items = frappe.db.sql("""select bom_item.item_code, bom_item.item_name, bom_item.description,
bom_item.stock_uom, bom_item.qty, bom_item.rate,
bom_item.qty / ifnull(bom.quantity, 1) as qty_consumed_per_unit
from `tabBOM Explosion Item` bom_item, tabBOM bom
where bom_item.parent = bom.name and bom.name = %s and bom.docstatus = 1""", bom_no, as_dict = 1)
for d in child_fb_items:
self.add_to_cur_exploded_items(frappe._dict({
'item_code' : d['item_code'],
'item_name' : d['item_name'],
'description' : d['description'],
'stock_uom' : d['stock_uom'],
'qty' : d['qty_consumed_per_unit']*qty,
'rate' : flt(d['rate']),
}))
def add_exploded_items(self):
"Add items to Flat BOM table"
frappe.db.sql("""delete from `tabBOM Explosion Item` where parent=%s""", self.name)
self.set('exploded_items', [])
for d in sorted(self.cur_exploded_items, key=itemgetter(0)):
ch = self.append('exploded_items', {})
for i in self.cur_exploded_items[d].keys():
ch.set(i, self.cur_exploded_items[d][i])
ch.amount = flt(ch.qty) * flt(ch.rate)
ch.qty_consumed_per_unit = flt(ch.qty) / flt(self.quantity)
ch.docstatus = self.docstatus
ch.db_insert()
def validate_bom_links(self):
if not self.is_active:
act_pbom = frappe.db.sql("""select distinct bom_item.parent from `tabBOM Item` bom_item
where bom_item.bom_no = %s and bom_item.docstatus = 1
and exists (select * from `tabBOM` where name = bom_item.parent
and docstatus = 1 and is_active = 1)""", self.name)
if act_pbom and act_pbom[0][0]:
frappe.throw(_("Cannot deactivate or cancel BOM as it is linked with other BOMs"))
def validate_operations(self):
if self.with_operations and not self.get('operations'):
frappe.throw(_("Operations cannot be left blank."))
def get_bom_items_as_dict(bom, company, qty=1, fetch_exploded=1):
item_dict = {}
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
query = """select
bom_item.item_code,
item.item_name,
sum(bom_item.qty/ifnull(bom.quantity, 1)) * %(qty)s as qty,
item.description,
item.image,
item.stock_uom,
item.default_warehouse,
item.expense_account as expense_account,
item.buying_cost_center as cost_center
from
`tab{table}` bom_item, `tabBOM` bom, `tabItem` item
where
bom_item.parent = bom.name
and bom_item.docstatus < 2
and bom_item.parent = %(bom)s
and item.name = bom_item.item_code
and is_stock_item = 1
{conditions}
group by item_code, stock_uom"""
if fetch_exploded:
query = query.format(table="BOM Explosion Item",
conditions="""and item.is_sub_contracted_item = 0""")
items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True)
else:
query = query.format(table="BOM Item", conditions="")
items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True)
# make unique
for item in items:
if item_dict.has_key(item.item_code):
item_dict[item.item_code]["qty"] += flt(item.qty)
else:
item_dict[item.item_code] = item
for item, item_details in item_dict.items():
for d in [["Account", "expense_account", "default_expense_account"],
["Cost Center", "cost_center", "cost_center"], ["Warehouse", "default_warehouse", ""]]:
company_in_record = frappe.db.get_value(d[0], item_details.get(d[1]), "company")
if not item_details.get(d[1]) or (company_in_record and company != company_in_record):
item_dict[item][d[1]] = frappe.db.get_value("Company", company, d[2]) if d[2] else None
return item_dict
@frappe.whitelist()
def get_bom_items(bom, company, qty=1, fetch_exploded=1):
items = get_bom_items_as_dict(bom, company, qty, fetch_exploded).values()
items.sort(lambda a, b: a.item_code > b.item_code and 1 or -1)
return items
def validate_bom_no(item, bom_no):
"""Validate BOM No of sub-contracted items"""
bom = frappe.get_doc("BOM", bom_no)
if not bom.is_active:
frappe.throw(_("BOM {0} must be active").format(bom_no))
if bom.docstatus != 1:
if not getattr(frappe.flags, "in_test", False):
frappe.throw(_("BOM {0} must be submitted").format(bom_no))
if item and not (bom.item.lower() == item.lower() or \
bom.item.lower() == cstr(frappe.db.get_value("Item", item, "variant_of")).lower()):
frappe.throw(_("BOM {0} does not belong to Item {1}").format(bom_no, item))
|
from flask import request
from apps.content import push_content_notification
from apps.tasks import send_to
from superdesk import get_resource_service
import superdesk
from superdesk.errors import SuperdeskApiError, InvalidStateTransitionError
from superdesk.metadata.item import CONTENT_STATE, ITEM_STATE
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.metadata.utils import item_url
from apps.archive.archive import SOURCE as ARCHIVE
from superdesk.workflow import is_workflow_state_transition_valid
class DuplicateResource(Resource):
endpoint_name = 'duplicate'
resource_title = endpoint_name
schema = {
'desk': Resource.rel('desks', False, required=True)
}
url = 'archive/<{0}:guid>/duplicate'.format(item_url)
resource_methods = ['POST']
item_methods = []
privileges = {'POST': 'duplicate'}
class DuplicateService(BaseService):
def create(self, docs, **kwargs):
guid_of_item_to_be_duplicated = request.view_args['guid']
guid_of_duplicated_items = []
for doc in docs:
archive_service = get_resource_service(ARCHIVE)
archived_doc = archive_service.find_one(req=None, _id=guid_of_item_to_be_duplicated)
if not archived_doc:
raise SuperdeskApiError.notFoundError('Fail to found item with guid: %s' %
guid_of_item_to_be_duplicated)
current_desk_of_item = archived_doc.get('task', {}).get('desk')
if current_desk_of_item is None or str(current_desk_of_item) != str(doc.get('desk')):
raise SuperdeskApiError.preconditionFailedError(message='Duplicate is allowed within the same desk.')
if not is_workflow_state_transition_valid('duplicate', archived_doc[ITEM_STATE]):
raise InvalidStateTransitionError()
send_to(doc=archived_doc, desk_id=doc.get('desk'))
new_guid = archive_service.duplicate_content(archived_doc)
guid_of_duplicated_items.append(new_guid)
if kwargs.get('notify', True):
push_content_notification([archived_doc])
return guid_of_duplicated_items
superdesk.workflow_action(
name='duplicate',
exclude_states=[CONTENT_STATE.SPIKED, CONTENT_STATE.KILLED],
privileges=['archive', 'duplicate']
)
|
from freeswitch import *
import sys
import re
import random
import messaging.sms.submit
from datetime import datetime, timedelta
RP_GENERIC_HEADER = '00GG0003919999'
TP_GENERIC_HEADER = '11GG'
MAX_GSM_TIME = 63 * 7
def gen_header(reference, header):
return re.sub('GG', reference, header)
def gen_hex(i):
tmp = hex(i)[2:]
if (len(tmp) == 1):
return "0" + tmp
else:
return tmp
def gen_tpdu(to, text):
tmp = (messaging.sms.submit.SmsSubmit(str(to), text))
tmp._validity = timedelta(MAX_GSM_TIME)
#stripping the nonsense headers, will probably fix later
return tmp.to_pdu()[0].pdu[6:].lower()
def gen_body(to, text):
reference = str(hex(random.randint(17,255))[2:]) #random reference?
rp_header = gen_header(reference, RP_GENERIC_HEADER)
tp_header = gen_header(reference, TP_GENERIC_HEADER)
tp_user_data = gen_tpdu(to, text)
tp_len = (len(tp_header) + len(tp_user_data))/2 #octets, not bytes
return rp_header + gen_hex(tp_len) + tp_header + tp_user_data
def send_smqueue_message(to, fromm, text):
event = Event("CUSTOM", "SMS::SEND_MESSAGE")
event.addHeader("proto", "sip");
event.addHeader("dest_proto", "sip");
event.addHeader("from", fromm)
event.addHeader("from_full", "sip:" + fromm + "@" + getGlobalVariable("domain"))
event.addHeader("to", "internal/sip:smsc@" + getGlobalVariable("smqueue_host") + ":" + getGlobalVariable("smqueue_port"))
event.addHeader("subject", "SIMPLE_MESSAGE")
event.addHeader("type", "application/vnd.3gpp.sms");
event.addHeader("hint", "the hint");
event.addHeader("replying", "false");
event.addBody(gen_body(to, text));
event.fire()
def chat(message, args):
args = args.split('|')
if (len(args) < 3):
consoleLog('err', 'Missing Args\n')
exit(1)
to = args[0]
fromm = args[1]
text = args[2]
if ((not to or to == '') or
(not fromm or fromm == '')):
consoleLog('err', 'Malformed Args\n')
exit(1)
send_smqueue_message(to, fromm, text)
def fsapi(session, stream, env, args):
#chat doesn't use message anyhow
chat(None, args)
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('judge', '0003_license_key'),
]
operations = [
migrations.CreateModel(
name='LanguageLimit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time_limit', models.FloatField()),
('memory_limit', models.IntegerField()),
('language', models.ForeignKey(to='judge.Language')),
('problem', models.ForeignKey(related_name='language_limits', to='judge.Problem')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='languagelimit',
unique_together=set([('problem', 'language')]),
),
migrations.AlterField(
model_name='comment',
name='body',
field=models.TextField(verbose_name=b'Body of comment'),
preserve_default=True,
),
]
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session
class SQLAlchemyDatabaseConnector:
"""
Database connector for use with SQLAlchemy.
"""
def __init__(self, database_location: str):
"""
Default constructor.
:param database_location: the url of the database that connections can be made to.
"""
self._engine = None
self._database_location = database_location
def create_session(self) -> Session:
"""
Creates a SQLAlchemy session, which is used to interact with the database.
:return: connected database session
"""
if not self._engine:
self._engine = create_engine(self._database_location)
Session = sessionmaker(bind=self._engine)
session = Session()
return session
|
import unittest2
from lxml import etree
from openerp.tests import test_mail_examples
from openerp.tools import html_sanitize, html_email_clean, append_content_to_html, plaintext2html, email_split
class TestSanitizer(unittest2.TestCase):
""" Test the html sanitizer that filters html to remove unwanted attributes """
def test_basic_sanitizer(self):
cases = [
("yop", "<p>yop</p>"), # simple
("lala<p>yop</p>xxx", "<p>lala</p><p>yop</p>xxx"), # trailing text
("Merci à l'intérêt pour notre produit.nous vous contacterons bientôt. Merci",
u"<p>Merci à l'intérêt pour notre produit.nous vous contacterons bientôt. Merci</p>"), # unicode
]
for content, expected in cases:
html = html_sanitize(content)
self.assertEqual(html, expected, 'html_sanitize is broken')
def test_evil_malicious_code(self):
# taken from https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Tests
cases = [
("<IMG SRC=javascript:alert('XSS')>"), # no quotes and semicolons
("<IMG SRC=javascript:alert('XSS')>"), # UTF-8 Unicode encoding
("<IMG SRC=javascript:alert('XSS')>"), # hex encoding
("<IMG SRC=\"jav
ascript:alert('XSS');\">"), # embedded carriage return
("<IMG SRC=\"jav
ascript:alert('XSS');\">"), # embedded newline
("<IMG SRC=\"jav ascript:alert('XSS');\">"), # embedded tab
("<IMG SRC=\"jav	ascript:alert('XSS');\">"), # embedded encoded tab
("<IMG SRC=\"  javascript:alert('XSS');\">"), # spaces and meta-characters
("<IMG SRC=\"javascript:alert('XSS')\""), # half-open html
("<IMG \"\"\"><SCRIPT>alert(\"XSS\")</SCRIPT>\">"), # malformed tag
("<SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT>"), # non-alpha-non-digits
("<SCRIPT/SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT>"), # non-alpha-non-digits
("<<SCRIPT>alert(\"XSS\");//<</SCRIPT>"), # extraneous open brackets
("<SCRIPT SRC=http://ha.ckers.org/xss.js?< B >"), # non-closing script tags
("<INPUT TYPE=\"IMAGE\" SRC=\"javascript:alert('XSS');\">"), # input image
("<BODY BACKGROUND=\"javascript:alert('XSS')\">"), # body image
("<IMG DYNSRC=\"javascript:alert('XSS')\">"), # img dynsrc
("<IMG LOWSRC=\"javascript:alert('XSS')\">"), # img lowsrc
("<TABLE BACKGROUND=\"javascript:alert('XSS')\">"), # table
("<TABLE><TD BACKGROUND=\"javascript:alert('XSS')\">"), # td
("<DIV STYLE=\"background-image: url(javascript:alert('XSS'))\">"), # div background
("<DIV STYLE=\"background-image:\0075\0072\006C\0028'\006a\0061\0076\0061\0073\0063\0072\0069\0070\0074\003a\0061\006c\0065\0072\0074\0028.1027\0058.1053\0053\0027\0029'\0029\">"), # div background with unicoded exploit
("<DIV STYLE=\"background-image: url(javascript:alert('XSS'))\">"), # div background + extra characters
("<IMG SRC='vbscript:msgbox(\"XSS\")'>"), # VBscrip in an image
("<BODY ONLOAD=alert('XSS')>"), # event handler
("<BR SIZE=\"&{alert('XSS')}\>"), # & javascript includes
("<LINK REL=\"stylesheet\" HREF=\"javascript:alert('XSS');\">"), # style sheet
("<LINK REL=\"stylesheet\" HREF=\"http://ha.ckers.org/xss.css\">"), # remote style sheet
("<STYLE>@import'http://ha.ckers.org/xss.css';</STYLE>"), # remote style sheet 2
("<META HTTP-EQUIV=\"Link\" Content=\"<http://ha.ckers.org/xss.css>; REL=stylesheet\">"), # remote style sheet 3
("<STYLE>BODY{-moz-binding:url(\"http://ha.ckers.org/xssmoz.xml#xss\")}</STYLE>"), # remote style sheet 4
("<IMG STYLE=\"xss:expr/*XSS*/ession(alert('XSS'))\">"), # style attribute using a comment to break up expression
]
for content in cases:
html = html_sanitize(content)
self.assertNotIn('javascript', html, 'html_sanitize did not remove a malicious javascript')
self.assertTrue('ha.ckers.org' not in html or 'http://ha.ckers.org/xss.css' in html, 'html_sanitize did not remove a malicious code in %s (%s)' % (content, html))
content = "<!--[if gte IE 4]><SCRIPT>alert('XSS');</SCRIPT><![endif]-->" # down-level hidden block
self.assertEquals(html_sanitize(content, silent=False), '')
def test_html(self):
sanitized_html = html_sanitize(test_mail_examples.MISC_HTML_SOURCE)
for tag in ['<div', '<b', '<i', '<u', '<strike', '<li', '<blockquote', '<a href']:
self.assertIn(tag, sanitized_html, 'html_sanitize stripped too much of original html')
for attr in ['javascript']:
self.assertNotIn(attr, sanitized_html, 'html_sanitize did not remove enough unwanted attributes')
emails = [("Charles <charles.bidule@truc.fr>", "Charles <charles.bidule@truc.fr>"),
("Dupuis <'tr/-: ${dupuis#$'@truc.baz.fr>", "Dupuis <'tr/-: ${dupuis#$'@truc.baz.fr>"),
("Technical <service/technical+2@open.com>", "Technical <service/technical+2@open.com>"),
("Div nico <div-nico@open.com>", "Div nico <div-nico@open.com>")]
for email in emails:
self.assertIn(email[1], html_sanitize(email[0]), 'html_sanitize stripped emails of original html')
def test_edi_source(self):
html = html_sanitize(test_mail_examples.EDI_LIKE_HTML_SOURCE)
self.assertIn('div style="font-family: \'Lucica Grande\', Ubuntu, Arial, Verdana, sans-serif; font-size: 12px; color: rgb(34, 34, 34); background-color: #FFF;', html,
'html_sanitize removed valid style attribute')
self.assertIn('<span style="color: #222; margin-bottom: 5px; display: block; ">', html,
'html_sanitize removed valid style attribute')
self.assertIn('img class="oe_edi_paypal_button" src="https://www.paypal.com/en_US/i/btn/btn_paynowCC_LG.gif"', html,
'html_sanitize removed valid img')
self.assertNotIn('</body></html>', html, 'html_sanitize did not remove extra closing tags')
class TestCleaner(unittest2.TestCase):
""" Test the email cleaner function that filters the content of incoming emails """
def test_00_basic_text(self):
""" html_email_clean test for signatures """
test_data = [
(
"""This is Sparta!\n--\nAdministrator\n+9988776655""",
['This is Sparta!'],
['Administrator', '9988776655']
), (
"""<p>--\nAdministrator</p>""",
[],
['--', 'Administrator']
), (
"""<p>This is Sparta!\n---\nAdministrator</p>""",
['This is Sparta!'],
['---', 'Administrator']
), (
"""<p>--<br>Administrator</p>""",
[],
[]
), (
"""<p>This is Sparta!<br/>--<br>Administrator</p>""",
['This is Sparta!'],
[]
), (
"""This is Sparta!\n>Ah bon ?\nCertes\n> Chouette !\nClair""",
['This is Sparta!', 'Certes', 'Clair'],
['Ah bon', 'Chouette']
)
]
for test, in_lst, out_lst in test_data:
new_html = html_email_clean(test, remove=True)
for text in in_lst:
self.assertIn(text, new_html, 'html_email_cleaner wrongly removed content')
for text in out_lst:
self.assertNotIn(text, new_html, 'html_email_cleaner did not remove unwanted content')
def test_05_shorten(self):
# TEST: shorten length
test_str = '''<div>
<span>
</span>
<p>Hello, <span>Raoul</span>
<bold>You</bold> are
pretty</p>
<span>Really</span>
</div>
'''
# shorten at 'H' of Hello -> should shorten after Hello,
html = html_email_clean(test_str, shorten=True, max_length=1, remove=True)
self.assertIn('Hello,', html, 'html_email_cleaner: shorten error or too short')
self.assertNotIn('Raoul', html, 'html_email_cleaner: shorten error or too long')
self.assertIn('read more', html, 'html_email_cleaner: shorten error about read more inclusion')
# shorten at 'are' -> should shorten after are
html = html_email_clean(test_str, shorten=True, max_length=17, remove=True)
self.assertIn('Hello,', html, 'html_email_cleaner: shorten error or too short')
self.assertIn('Raoul', html, 'html_email_cleaner: shorten error or too short')
self.assertIn('are', html, 'html_email_cleaner: shorten error or too short')
self.assertNotIn('pretty', html, 'html_email_cleaner: shorten error or too long')
self.assertNotIn('Really', html, 'html_email_cleaner: shorten error or too long')
self.assertIn('read more', html, 'html_email_cleaner: shorten error about read more inclusion')
# TEST: shorten in quote
test_str = '''<div> Blahble
bluih blouh
<blockquote>This is a quote
<span>And this is quite a long quote, after all.</span>
</blockquote>
</div>'''
# shorten in the quote
html = html_email_clean(test_str, shorten=True, max_length=25, remove=True)
self.assertIn('Blahble', html, 'html_email_cleaner: shorten error or too short')
self.assertIn('bluih', html, 'html_email_cleaner: shorten error or too short')
self.assertIn('blouh', html, 'html_email_cleaner: shorten error or too short')
self.assertNotIn('quote', html, 'html_email_cleaner: shorten error or too long')
self.assertIn('read more', html, 'html_email_cleaner: shorten error about read more inclusion')
# shorten in second word
html = html_email_clean(test_str, shorten=True, max_length=9, remove=True)
self.assertIn('Blahble', html, 'html_email_cleaner: shorten error or too short')
self.assertIn('bluih', html, 'html_email_cleaner: shorten error or too short')
self.assertNotIn('blouh', html, 'html_email_cleaner: shorten error or too short')
self.assertNotIn('quote', html, 'html_email_cleaner: shorten error or too long')
self.assertIn('read more', html, 'html_email_cleaner: shorten error about read more inclusion')
# shorten waaay too large
html = html_email_clean(test_str, shorten=True, max_length=900, remove=True)
self.assertIn('Blahble', html, 'html_email_cleaner: shorten error or too short')
self.assertIn('bluih', html, 'html_email_cleaner: shorten error or too short')
self.assertIn('blouh', html, 'html_email_cleaner: shorten error or too short')
self.assertNotIn('quote', html, 'html_email_cleaner: shorten error or too long')
def test_10_email_text(self):
""" html_email_clean test for text-based emails """
new_html = html_email_clean(test_mail_examples.TEXT_1, remove=True)
for ext in test_mail_examples.TEXT_1_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.TEXT_1_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
new_html = html_email_clean(test_mail_examples.TEXT_2, remove=True)
for ext in test_mail_examples.TEXT_2_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.TEXT_2_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
def test_20_email_html(self):
new_html = html_email_clean(test_mail_examples.HTML_1, remove=True)
for ext in test_mail_examples.HTML_1_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.HTML_1_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
new_html = html_email_clean(test_mail_examples.HTML_2, remove=True)
for ext in test_mail_examples.HTML_2_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.HTML_2_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
# --- MAIL ORIGINAL --- -> can't parse this one currently, too much language-dependent
# new_html = html_email_clean(test_mail_examples.HTML_3, remove=False)
# for ext in test_mail_examples.HTML_3_IN:
# self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
# for ext in test_mail_examples.HTML_3_OUT:
# self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
def test_30_email_msoffice(self):
new_html = html_email_clean(test_mail_examples.MSOFFICE_1, remove=True)
for ext in test_mail_examples.MSOFFICE_1_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.MSOFFICE_1_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
new_html = html_email_clean(test_mail_examples.MSOFFICE_2, remove=True)
for ext in test_mail_examples.MSOFFICE_2_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.MSOFFICE_2_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
new_html = html_email_clean(test_mail_examples.MSOFFICE_3, remove=True)
for ext in test_mail_examples.MSOFFICE_3_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.MSOFFICE_3_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
def test_40_email_hotmail(self):
new_html = html_email_clean(test_mail_examples.HOTMAIL_1, remove=True)
for ext in test_mail_examples.HOTMAIL_1_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.HOTMAIL_1_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
def test_50_email_gmail(self):
new_html = html_email_clean(test_mail_examples.GMAIL_1, remove=True)
for ext in test_mail_examples.GMAIL_1_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.GMAIL_1_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
def test_60_email_thunderbird(self):
new_html = html_email_clean(test_mail_examples.THUNDERBIRD_1, remove=True)
for ext in test_mail_examples.THUNDERBIRD_1_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.THUNDERBIRD_1_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase signature / quoted content')
def test_70_read_more_and_shorten(self):
expand_options = {
'oe_expand_container_class': 'span_class',
'oe_expand_container_content': 'Herbert Einstein',
'oe_expand_separator_node': 'br_lapin',
'oe_expand_a_class': 'a_class',
'oe_expand_a_content': 'read mee',
}
new_html = html_email_clean(test_mail_examples.OERP_WEBSITE_HTML_1, remove=True, shorten=True, max_length=100, expand_options=expand_options)
for ext in test_mail_examples.OERP_WEBSITE_HTML_1_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.OERP_WEBSITE_HTML_1_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase overlimit content')
for ext in ['<span class="span_class">Herbert Einstein<br_lapin></br_lapin><a href="#" class="a_class">read mee</a></span>']:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly take into account specific expand options')
new_html = html_email_clean(test_mail_examples.OERP_WEBSITE_HTML_2, remove=True, shorten=True, max_length=200, expand_options=expand_options, protect_sections=False)
for ext in test_mail_examples.OERP_WEBSITE_HTML_2_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.OERP_WEBSITE_HTML_2_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase overlimit content')
for ext in ['<span class="span_class">Herbert Einstein<br_lapin></br_lapin><a href="#" class="a_class">read mee</a></span>']:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly take into account specific expand options')
new_html = html_email_clean(test_mail_examples.OERP_WEBSITE_HTML_2, remove=True, shorten=True, max_length=200, expand_options=expand_options, protect_sections=True)
for ext in test_mail_examples.OERP_WEBSITE_HTML_2_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed not quoted content')
for ext in test_mail_examples.OERP_WEBSITE_HTML_2_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not erase overlimit content')
for ext in [
'<span class="span_class">Herbert Einstein<br_lapin></br_lapin><a href="#" class="a_class">read mee</a></span>',
'tasks using the gantt chart and control deadlines']:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly take into account specific expand options')
def test_70_read_more(self):
new_html = html_email_clean(test_mail_examples.BUG1, remove=True, shorten=True, max_length=100)
for ext in test_mail_examples.BUG_1_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed valid content')
for ext in test_mail_examples.BUG_1_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not removed invalid content')
new_html = html_email_clean(test_mail_examples.BUG2, remove=True, shorten=True, max_length=250)
for ext in test_mail_examples.BUG_2_IN:
self.assertIn(ext, new_html, 'html_email_cleaner wrongly removed valid content')
for ext in test_mail_examples.BUG_2_OUT:
self.assertNotIn(ext, new_html, 'html_email_cleaner did not removed invalid content')
def test_90_misc(self):
# False boolean for text must return empty string
new_html = html_email_clean(False)
self.assertEqual(new_html, False, 'html_email_cleaner did change a False in an other value.')
# Message with xml and doctype tags don't crash
new_html = html_email_clean(u'<?xml version="1.0" encoding="iso-8859-1"?>\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\n "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">\n <head>\n <title>404 - Not Found</title>\n </head>\n <body>\n <h1>404 - Not Found</h1>\n </body>\n</html>\n')
self.assertNotIn('encoding', new_html, 'html_email_cleaner did not remove correctly encoding attributes')
class TestHtmlTools(unittest2.TestCase):
""" Test some of our generic utility functions about html """
def test_plaintext2html(self):
cases = [
("First \nSecond \nThird\n \nParagraph\n\r--\nSignature paragraph", 'div',
"<div><p>First <br/>Second <br/>Third</p><p>Paragraph</p><p>--<br/>Signature paragraph</p></div>"),
("First<p>It should be escaped</p>\nSignature", False,
"<p>First<p>It should be escaped</p><br/>Signature</p>")
]
for content, container_tag, expected in cases:
html = plaintext2html(content, container_tag)
self.assertEqual(html, expected, 'plaintext2html is broken')
def test_append_to_html(self):
test_samples = [
('<!DOCTYPE...><HTML encoding="blah">some <b>content</b></HtMl>', '--\nYours truly', True, True, False,
'<!DOCTYPE...><html encoding="blah">some <b>content</b>\n<pre>--\nYours truly</pre>\n</html>'),
('<!DOCTYPE...><HTML encoding="blah">some <b>content</b></HtMl>', '--\nYours truly', True, False, False,
'<!DOCTYPE...><html encoding="blah">some <b>content</b>\n<p>--<br/>Yours truly</p>\n</html>'),
('<html><body>some <b>content</b></body></html>', '<!DOCTYPE...>\n<html><body>\n<p>--</p>\n<p>Yours truly</p>\n</body>\n</html>', False, False, False,
'<html><body>some <b>content</b>\n\n\n<p>--</p>\n<p>Yours truly</p>\n\n\n</body></html>'),
]
for html, content, plaintext_flag, preserve_flag, container_tag, expected in test_samples:
self.assertEqual(append_content_to_html(html, content, plaintext_flag, preserve_flag, container_tag), expected, 'append_content_to_html is broken')
class TestEmailTools(unittest2.TestCase):
""" Test some of our generic utility functions for emails """
def test_email_split(self):
cases = [
("John <12345@gmail.com>", ['12345@gmail.com']), # regular form
("d@x; 1@2", ['d@x', '1@2']), # semi-colon + extra space
("'(ss)' <123@gmail.com>, 'foo' <foo@bar>", ['123@gmail.com','foo@bar']), # comma + single-quoting
('"john@gmail.com"<johnny@gmail.com>', ['johnny@gmail.com']), # double-quoting
('"<jg>" <johnny@gmail.com>', ['johnny@gmail.com']), # double-quoting with brackets
]
for text, expected in cases:
self.assertEqual(email_split(text), expected, 'email_split is broken')
if __name__ == '__main__':
unittest2.main()
|
from spack import *
import os
import shutil
class Gaussian(Package):
"""Gaussian is a computer program for computational chemistry"""
homepage = "http://www.gaussian.com/"
url = "file://{0}/g09.tgz".format(os.getcwd())
version('09', '7d4c95b535e68e48af183920df427e4e')
def install(self, spec, prefix):
shutil.copytree(os.getcwd(), prefix.bin)
patch_install_files = ['flc',
'linda8.2/opteron-linux/bin/flc',
'linda8.2/opteron-linux/bin/LindaLauncher',
'linda8.2/opteron-linux/bin/ntsnet',
'linda8.2/opteron-linux/bin/pmbuild',
'linda8.2/opteron-linux/bin/vntsnet',
'ntsnet'
]
for filename in patch_install_files:
if os.path.isfile(filename):
filter_file('/mf/frisch/g09', prefix.bin, join_path(prefix.bin,
filename), string='True')
patch_install_files = ['linda8.2/opteron-linux/bin/ntsnet',
'linda8.2/opteron-linux/bin/vntsnet',
]
for filename in patch_install_files:
if os.path.isfile(filename):
filter_file('/usr/bin/linda', prefix.bin, join_path(prefix.bin,
filename), string='True')
def setup_environment(self, spack_env, run_env):
run_env.set('g09root', self.prefix)
run_env.set('GAUSSIANHOME', self.prefix)
run_env.set('GAUSS_EXEDIR', self.prefix.bin)
run_env.set('G09_BASIS', join_path(self.prefix.bin, 'basis'))
run_env.set('GAUSS_LEXEDIR', join_path(self.prefix.bin,
'linda-exe'))
run_env.set('GAUSS_ARCHDIR', join_path(self.prefix.bin, 'arch'))
run_env.set('GAUSS_BSDDIR', join_path(self.prefix.bin, 'bsd'))
run_env.prepend_path('LD_LIBRARY_PATH', join_path(self.prefix.bin,
'linda8.2/opteron-linux/lib'))
run_env.prepend_path('LD_LIBRARY_PATH', self.prefix.bin)
|
import os
import sys
import re
import datetime
import subprocess
source_exts = [ '.py', '.c', '.h', '.cpp' ]
def is_source(path):
for ext in source_exts:
if path.endswith(ext):
return True
def get_name_and_version():
f = open('configure.ac', 'r')
config = f.read()
f.close()
exp = 'AC_INIT\(\[[^\]]+\],\[([^\]]+)\],\[\],\[([^\]]+)\]'
match = re.search(exp, config)
if not match:
print 'Cannot find the package name and version.'
sys.exit(0)
return [ match.group(2), match.group(1) ]
def cmd_help():
print 'Usage: \n\
maint-helper.py build-snapshot - build a source snapshot \n\
maint-helper.py fix-copyright [path] - fix the copyright year \n\
maint-helper.py check-licenses - check licenses in the source'
def cmd_build_snapshot():
[ name, version ] = get_name_and_version()
print 'Update git...'
retcode = subprocess.call(['git', 'pull'])
if retcode:
print 'ERROR - cannot pull from git'
cmd = 'git-show-ref --hash=10 refs/heads/master'
alphatag = os.popen(cmd).readline().strip()
tarball = '%s-%s-git%s.tar.bz2' % (name, version, alphatag)
print 'Build %s...' % tarball
retcode = subprocess.call(['make', 'distcheck'])
if retcode:
sys.exit(0)
if 'JOYBUILD_PATH' in os.environ:
tarball = os.path.join(os.environ['JOYBUILD_PATH'], 'source', tarball)
os.rename('%s-%s.tar.bz2' % (name, version), tarball)
print 'Update NEWS.sugar...'
if os.environ.has_key('SUGAR_NEWS'):
sugar_news_path = os.environ['SUGAR_NEWS']
if os.path.isfile(sugar_news_path):
f = open(sugar_news_path, 'r')
sugar_news = f.read()
f.close()
else:
sugar_news = ''
[ name, version ] = get_name_and_version()
sugar_news += '%s - %s - %s\n\n' % (name, version, alphatag)
f = open('NEWS', 'r')
for line in f.readlines():
if len(line.strip()) > 0:
sugar_news += line
else:
break
f.close()
f = open(sugar_news_path, 'w')
f.write(sugar_news)
f.close()
print 'Update NEWS...'
f = open('NEWS', 'r')
news = f.read()
f.close()
news = 'Snapshot %s\n\n' % alphatag + news
f = open('NEWS', 'w')
f.write(news)
f.close()
print 'Committing to git...'
changelog = 'Snapshot %s.' % alphatag
retcode = subprocess.call(['git', 'commit', '-a', '-m % s' % changelog])
if retcode:
print 'ERROR - cannot commit to git'
retcode = subprocess.call(['git', 'push'])
if retcode:
print 'ERROR - cannot push to git'
print 'Done.'
def check_licenses(path, license, missing):
matchers = { 'LGPL' : 'GNU Lesser General Public',
'GPL' : 'GNU General Public License' }
license_file = os.path.join(path, '.license')
if os.path.isfile(license_file):
f = open(license_file, 'r')
license = f.readline().strip()
f.close()
for item in os.listdir(path):
full_path = os.path.join(path, item)
if os.path.isdir(full_path):
check_licenses(full_path, license, missing)
else:
check_source = is_source(item)
# Special cases.
if item.find('marshal') > 0 or \
item.startswith('egg') > 0:
check_source = False
if check_source:
f = open(full_path, 'r')
source = f.read()
f.close()
miss_license = True
if source.find(matchers[license]) > 0:
miss_license = False
# Special cases.
if source.find('THIS FILE IS GENERATED') > 0:
miss_license = False
if miss_license:
if not missing.has_key(license):
missing[license] = []
missing[license].append(full_path)
def cmd_check_licenses():
missing = {}
check_licenses(os.getcwd(), 'LGPL', missing)
for item in missing.keys():
print '%s:\n' % item
for path in missing[item]:
print path
print '\n'
COPYRIGHT = 'Copyright (C) '
def fix_copyright(path):
for item in os.listdir(path):
full_path = os.path.join(path, item)
if os.path.isdir(full_path):
fix_copyright(full_path)
elif is_source(item):
f = open(full_path, 'r')
source = f.read()
f.close()
year_start = -1
year_end = -1
i1 = source.find(COPYRIGHT)
if i1 != -1:
i1 += len(COPYRIGHT)
i2 = i1 + source[i1:].find(' ')
if i1 > 0:
try:
year_start = int(source[i1:i1 + 4])
year_end = int(source[i1 + 6: i1 + 10])
except ValueError:
pass
if year_start > 0 and year_end < 0:
year_end = year_start
year = datetime.date.today().year
if year_end < year:
result = '%s%d-%d%s' % (source[:i1], year_start,
year, source[i2:])
f = open(full_path, 'w')
f.write(result)
f.close()
def cmd_fix_copyright(path):
fix_copyright(path)
if len(sys.argv) < 2:
cmd_help()
elif sys.argv[1] == 'build-snapshot':
cmd_build_snapshot()
elif sys.argv[1] == 'check-licenses':
cmd_check_licenses()
elif sys.argv[1] == 'fix-copyright' and len(sys.argv) > 2:
cmd_fix_copyright(sys.argv[2])
|
try:
from maxent import *
except ImportError:
import sys
print >> sys.stderr, 'maxent module not found, get it from homepages.inf.ed.ac.uk/s0450736/maxent_toolkit.html'
sys.exit(-1)
import orange
def extract_features(ex):
f = []
for i, a in enumerate(ex.domain.attributes):
f.append('%s=%s' % (a.name, ex[i]))
return f
def MaxentLearner(examples=None, **kwds):
learner = apply(MaxentLearnerClass,(), kwds)
if examples:
return learner(examples)
else:
return learner
class MaxentLearnerClass:
def __init__(self, name='Maximum Entropy Learner',
iters = 15, method = 'lbfgs', gaussian = 0.0):
self.name = name
self.iters = iters
assert method == 'lbfgs' or method == 'gis'
self.method = method
self.gaussian = gaussian
def __call__(self, data, weight=None):
# we will ignore the weight
# build the me model here
m = MaxentModel()
m.begin_add_event()
for ex in data:
m.add_event(extract_features(ex), ex.getclass().value)
m.end_add_event()
m.train(self.iters, self.method, self.gaussian)
return MaxentClassifier(model = m, domain = data.domain)
class MaxentClassifier:
def __init__(self, **kwds):
self.__dict__ = kwds
def __call__(self, example, result_type = orange.GetValue):
if result_type == orange.GetValue:
return orange.Value(self.domain.classVar, self.model.predict(extract_features(example)))
else:
# build a label map, which will be used to sort the outputted
# probabilities
class_map = {}
for pos, label in enumerate(self.domain.classVar.values):
class_map[label] = pos
result = self.model.eval_all(extract_features(example))
if len(result) > 0:
if result_type == orange.GetProbabilities:
r = [None]*len(result)
for label, prob in result:
r[class_map[label]] = prob
return r
elif result_type == orange.GetBoth:
return (orange.Value(self.domain.classVar, result[0][0]), result[0][1])
else:
return None
if __name__ == '__main__':
import unittest
class TestOrngMaxent(unittest.TestCase):
def setUp(self):
set_verbose(1)
self.data = orange.ExampleTable("voting")
self.classifier = MaxentLearner(self.data, iters = 10)
#self.classifier = orange.MaxentClassifier(data)
def test_predict_class(self):
for i in range(5):
c = self.classifier(self.data[i])
print "original", self.data[i].getclass(), "classified as", c
self.assertEqual(self.data[i].getclass(), c)
def test_predict_prob(self):
print "Possible classes:", self.data.domain.classVar.values
print "Probabilities for democrats:"
for i in range(5):
p = self.classifier(self.data[i], orange.GetProbabilities)
print "%d: %5.3f (originally %s)" % (i+1, p[1], self.data[i].getclass())
self.assertAlmostEqual(p[1], self.data[i].getclass() == \
'democrat' and 1.0 or 0.0, 2)
def test_predict_both(self):
for i in range(5):
r = self.classifier(self.data[i], orange.GetBoth)
self.assertEqual(r[0], self.data[i].getclass())
self.assertAlmostEqual(r[1], 1.0, 2)
print 'running unittest...'
unittest.main()
|
"""This file is part of the prometeo project.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.5'
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.views.generic.simple import redirect_to
from django.views.generic import list_detail, create_update
from django.template import RequestContext
from django.contrib import messages
from prometeo.core.auth.decorators import obj_permission_required as permission_required
from prometeo.core.views import filtered_list_detail
from models import *
from forms import *
def _get_product(request, *args, **kwargs):
product_id = kwargs.get('product_id', None)
id = kwargs.get('id', None)
if product_id:
return get_object_or_404(Product, id=product_id)
elif id:
return get_object_or_404(Product, id=id)
return None
@permission_required('products.view_product')
def product_list(request, page=0, paginate_by=10, **kwargs):
"""Shows a product list.
"""
return filtered_list_detail(
request,
Product,
fields=['name', 'code', 'ean13', 'description', 'is_consumable', 'is_service'],
page=page,
paginate_by=paginate_by,
template_name='products/product_list.html',
**kwargs
)
@permission_required('products.add_product')
def product_add(request, **kwargs):
"""Adds a new product.
"""
product = Product()
if request.method == 'POST':
form = ProductForm(request.POST, instance=product)
if form.is_valid():
form.save()
messages.success(request, _("The product was created successfully."))
return redirect_to(request, url=product.get_absolute_url())
else:
form = ProductForm(instance=product)
return render_to_response('products/product_edit.html', RequestContext(request, {'form': form, 'object': product}))
@permission_required('products.view_product', _get_product)
def product_detail(request, id, page=None, **kwargs):
"""Shows product details.
"""
object_list = Product.objects.all()
return list_detail.object_detail(
request,
object_id=id,
queryset=object_list,
extra_context={
'object_list': object_list,
},
**kwargs
)
@permission_required('products.change_product', _get_product)
def product_edit(request, id, **kwargs):
"""Edits a product.
"""
return create_update.update_object(
request,
object_id=id,
form_class=ProductForm,
template_name='products/product_edit.html'
)
@permission_required('products.delete_product', _get_product)
def product_delete(request, id, **kwargs):
"""Deletes a product.
"""
return create_update.delete_object(
request,
model=Product,
object_id=id,
post_delete_redirect=reverse('product_list'),
template_name='products/product_delete.html',
**kwargs
)
@permission_required('products.view_product', _get_product)
def product_supplies(request, id, page=0, paginate_by=10, **kwargs):
"""Shows the product's supplies.
"""
product = get_object_or_404(Product, pk=id)
return filtered_list_detail(
request,
product.supply_set.all(),
fields=['id', 'supplier', 'purchase_price', 'minimal_quantity', 'max_purchase_discount', 'payment_terms'],
page=page,
paginate_by=paginate_by,
template_name='products/product_supplies.html',
extra_context={'object': product},
**kwargs
)
@permission_required('products.change_product', _get_product)
def product_add_supply(request, id, **kwargs):
"""Adds a new supply to the given product.
"""
supply = Supply(product_id=id)
if request.method == 'POST':
form = SupplyForm(request.POST, instance=supply)
if form.is_valid():
form.save()
messages.success(request, _("The supply was created successfully."))
return redirect_to(request, url=supply.get_absolute_url())
else:
form = SupplyForm(instance=supply)
return render_to_response('products/product_edit_supply.html', RequestContext(request, {'form': form, 'object': supply}))
@permission_required('products.view_product', _get_product)
def product_supply_detail(request, product_id, id, **kwargs):
"""Show details of the given product supply.
"""
product = get_object_or_404(Product, pk=product_id)
object_list = product.supply_set.all()
return list_detail.object_detail(
request,
object_id=id,
queryset=object_list,
extra_context={
'object_list': object_list,
},
template_name='products/product_supply_detail.html',
**kwargs
)
@permission_required('products.change_product', _get_product)
def product_edit_supply(request, product_id, id, **kwargs):
"""Edits an supply of the given product.
"""
supply = get_object_or_404(Supply, product__pk=product_id, pk=id)
return create_update.update_object(
request,
form_class=SupplyForm,
object_id=id,
template_name='products/product_edit_supply.html',
**kwargs
)
@permission_required('products.change_product', _get_product)
def product_delete_supply(request, product_id, id, **kwargs):
"""Deletes an supply of the given product.
"""
supply = get_object_or_404(Supply, product__pk=product_id, pk=id)
return create_update.delete_object(
request,
model=Supply,
object_id=id,
post_delete_redirect=reverse('product_supplies', args=[product_id]),
template_name='products/product_delete_supply.html',
**kwargs
)
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import glob
from time import sleep
import tinctest
from tinctest.lib import local_path
from gppylib.commands.base import Command
from mpp.lib.PSQL import PSQL
from mpp.lib.filerep_util import Filerepe2e_Util
from mpp.lib.gprecoverseg import GpRecover
from mpp.lib.gpstart import GpStart
from mpp.lib.gpstop import GpStop
from mpp.lib.config import GPDBConfig
from mpp.lib.gpfilespace import Gpfilespace
from mpp.lib.gpdbverify import GpdbVerify
from mpp.models import MPPTestCase
from mpp.gpdb.tests.storage.lib.dbstate import DbStateClass
from mpp.gpdb.tests.storage.lib.common_utils import *
class PgtwoPhaseClass(MPPTestCase):
'''Helper class for pg_twophase supporting functions '''
def __init__(self,methodName):
self.filereputil = Filerepe2e_Util()
self.config = GPDBConfig()
self.gprecover = GpRecover(self.config)
self.gpstop = GpStop()
self.gpstart = GpStart()
self.gpfile = Gpfilespace(self.config)
self.gpverify = GpdbVerify(config=self.config)
self.dbstate = DbStateClass('run_validation',self.config)
self.port = os.getenv('PGPORT')
super(PgtwoPhaseClass,self).__init__(methodName)
def invoke_fault(self, fault_name, type, role='mirror', port=None, occurence=None, sleeptime=None, seg_id=None):
''' Reset the fault and then issue the fault with the given type'''
self.filereputil.inject_fault(f=fault_name, y='reset', r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
self.filereputil.inject_fault(f=fault_name, y=type, r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
tinctest.logger.info('Successfully injected fault_name : %s fault_type : %s' % (fault_name, type))
def inject_fault(self, fault_type):
'''
@param fault_type : type of fault to ne suspended
'''
if fault_type == 'end_prepare_two_phase_sleep':
self.filereputil.inject_fault(f='end_prepare_two_phase_sleep', sleeptime='1000', y='sleep', r='primary', p=self.port)
tinctest.logger.info('Injected fault to sleep in end_prepare_two_phase')
elif fault_type == 'abort':
# In case of abort fault we need to include this error type fault also, to fake a situation where one of the segment is not responding back, which can make the master to trigger an abort transaction
self.invoke_fault('transaction_abort_after_distributed_prepared', 'error', port=self.port, occurence='0', seg_id='1')
self.invoke_fault('twophase_transaction_abort_prepared', 'suspend', role='primary', port=self.port, occurence='0')
elif fault_type == 'commit':
self.invoke_fault('twophase_transaction_commit_prepared', 'suspend', role='primary', port=self.port, occurence='0')
elif fault_type == 'dtm_broadcast_prepare':
self.invoke_fault('dtm_broadcast_prepare', 'suspend', seg_id = '1', port=self.port, occurence='0')
elif fault_type == 'dtm_broadcast_commit_prepared':
self.invoke_fault('dtm_broadcast_commit_prepared', 'suspend', seg_id = '1', port=self.port, occurence='0')
elif fault_type == 'dtm_xlog_distributed_commit':
self.invoke_fault('dtm_xlog_distributed_commit', 'suspend', seg_id = '1', port=self.port, occurence='0')
def resume_faults(self, fault_type, cluster_state='sync'):
'''
@param fault_type : commit/abort/end_prepare_two_phase_sleep/dtm_broadcast_prepare/dtm_broadcast_commit_prepared/dtm_xlog_distributed_commit
@description : Resume the suspended faults
'''
tinctest.logger.info('coming to resume faults with xact %s' % fault_type)
if fault_type == 'abort':
self.filereputil.inject_fault(f='twophase_transaction_abort_prepared', y='resume', r='primary', p=self.port , o='0')
if cluster_state !='resync':
self.filereputil.inject_fault(f='transaction_abort_after_distributed_prepared', y='reset', p=self.port , o='0', seg_id='1')
elif fault_type == 'commit':
self.filereputil.inject_fault(f='twophase_transaction_commit_prepared', y='resume', r='primary', p=self.port , o='0')
elif fault_type == 'dtm_broadcast_prepare':
self.filereputil.inject_fault(f='dtm_broadcast_prepare', y='resume', seg_id = '1', p=self.port, o='0')
elif fault_type == 'dtm_broadcast_commit_prepared':
tinctest.logger.info('coming to if dtm_broadcast_commit_prepared')
self.filereputil.inject_fault(f='dtm_broadcast_commit_prepared', y='resume', seg_id = '1', p=self.port, o='0')
elif fault_type == 'dtm_xlog_distributed_commit':
self.filereputil.inject_fault(f='dtm_xlog_distributed_commit', y='resume', seg_id = '1', p=self.port, o='0')
else:
tinctest.logger.info('No faults to resume')
tinctest.logger.info('Resumed the suspended transaction fault')
#Wait till all the trigger_sqls are complete before returning
sql_count = PSQL.run_sql_command('select count(*) from pg_stat_activity;', flags ='-q -t', dbname='postgres')
while(sql_count.strip() != '1'):
sleep(5)
sql_count = PSQL.run_sql_command('select count(*) from pg_stat_activity;', flags ='-q -t', dbname='postgres')
tinctest.logger.info('stat_activity count %s ' % sql_count)
return
def start_db(self):
'''Gpstart '''
rc = self.gpstart.run_gpstart_cmd()
if not rc:
raise Exception('Failed to start the cluster')
tinctest.logger.info('Started the cluster successfully')
def stop_db(self):
''' Gpstop and dont check for rc '''
cmd = Command('Gpstop_a', 'gpstop -a')
tinctest.logger.info('Executing command: gpstop -a')
cmd.run()
def crash_and_recover(self, crash_type, fault_type, checkpoint='noskip', cluster_state='sync'):
'''
@param crash_type : gpstop_i/gpstop_a/failover_to_primary/failover_to_mirror
@note: when skip checkpoint is enabled, gpstop -a returns a non-rc return code and fails in the library. To workaround, using a local function
'''
if crash_type == 'gpstop_i' :
rc = self.gpstop.run_gpstop_cmd(immediate = True)
if not rc:
raise Exception('Failed to stop the cluster')
tinctest.logger.info('Stopped cluster immediately')
self.start_db()
elif crash_type == 'gpstop_a':
self.resume_faults(fault_type, cluster_state)
if checkpoint == 'skip' :
self.stop_db()
else:
rc = self.gpstop.run_gpstop_cmd()
if not rc:
raise Exception('Failed to stop the cluster')
tinctest.logger.info('Smart stop completed')
self.start_db()
elif crash_type == 'failover_to_primary':
self.invoke_fault('filerep_consumer', 'fault')
self.resume_faults(fault_type, cluster_state)
(rc, num) =self.filereputil.wait_till_change_tracking_transition()
tinctest.logger.info('Value of rc and num_down %s, %s, %s' % (rc, num, fault_type))
elif crash_type == 'failover_to_mirror':
self.invoke_fault('postmaster', 'panic', role='primary')
if fault_type in ('dtm_broadcast_prepare', 'dtm_broadcast_commit_prepared', 'dtm_xlog_distributed_commit') :
self.resume_faults(fault_type, cluster_state)
PSQL.wait_for_database_up()
(rc, num) = self.filereputil.wait_till_change_tracking_transition()
tinctest.logger.info('Value of rc and num_down %s, %s' % (rc, num))
if fault_type == 'abort' :
self.filereputil.inject_fault(f='transaction_abort_after_distributed_prepared', y='reset',p=self.port , o='0', seg_id='1')
if cluster_state == 'resync':
if not self.gprecover.wait_till_insync_transition():
raise Exception('Segments not in sync')
def get_trigger_status_old(self, trigger_count):
'''Compare the pg_stat_activity count with the total number of trigger_sqls executed '''
for i in range(1,50):
psql_count = PSQL.run_sql_command('select count(*) from pg_stat_activity;', flags='-q -t', dbname='postgres')
tinctest.logger.info('Count of trigger sqls %s' % psql_count)
if int(psql_count.strip()) < trigger_count :
tinctest.logger.info('coming to the if loop in get_trigger_status')
return False
return True
def get_trigger_status(self, trigger_count, fault_type):
if fault_type == None:
return self.get_trigger_status_old(trigger_count);
return self.filereputil.check_fault_status(fault_name=fault_type, status="triggered", seg_id='1', num_times_hit=trigger_count);
def check_trigger_sql_hang(self, test_dir, fault_type = None):
'''
@description : Return the status of the trigger sqls: whether they are waiting on the fault
Since gpfaultinjector has no way to check if all the sqls are triggered, we are using
a count(*) on pg_stat_activity and compare the total number of trigger_sqls
'''
trigger_count=0
for dir in test_dir.split(","):
trigger_dir = local_path('%s/trigger_sql/sql/' % (dir))
trigger_count += len(glob.glob1(trigger_dir,"*.sql"))
tinctest.logger.info('Total number of sqls to trigger %d in %s' % (trigger_count,test_dir));
return self.get_trigger_status(trigger_count, fault_type)
def run_faults_before_pre(self, cluster_state):
'''
@param cluster_state : sync/change_tracking/resync
@description: 1. Cluster into change_tracking in case of resync/ change_tracking.
'''
if cluster_state == 'resync':
self.invoke_fault('filerep_consumer', 'fault')
self.filereputil.wait_till_change_tracking_transition()
tinctest.logger.info('Change_tracking transition complete')
def run_faults_before_trigger(self, checkpoint, cluster_state, fault_type):
'''
@param checkpoint : skip/noskip
@param cluster_state : sync/change_tracking/resync
@param fault_type : commit/abort
@param end_prepare_two_phase_sleep : True/False
@description : 1. Suspend resync faults. 2. Issue Checkpoint before the skip checkpoint, so that the bufferpool is cleared. 3. If skip issue 'skip checkpoint'. 4. Suspend transaction_faults based on test_type.
'''
if cluster_state == 'change_tracking':
self.invoke_fault('filerep_consumer', 'fault')
self.filereputil.wait_till_change_tracking_transition()
tinctest.logger.info('Change_tracking transition complete')
if cluster_state == 'resync':
self.invoke_fault('filerep_resync', 'suspend', role='primary')
if checkpoint == 'skip':
self.invoke_fault('filerep_transition_to_sync_before_checkpoint', 'suspend', role='primary', port=self.port, occurence='0')
rc = self.gprecover.incremental()
if not rc:
raise Exception('Gprecvoerseg failed')
tinctest.logger.info('Cluster in resync state')
PSQL.run_sql_command('CHECKPOINT;', dbname='postgres')
if checkpoint == 'skip':
self.invoke_fault('checkpoint', 'skip', role='primary', port= self.port, occurence='0')
self.inject_fault(fault_type)
# Can't do it after filerep_resync resume as gets stuck due to
# filerep_transition_to_sync_before_checkpoint suspend above for
# MirroedLock
PSQL.wait_for_database_up()
if cluster_state == 'resync':
self.filereputil.inject_fault(f='filerep_resync', y='resume', r='primary')
def run_crash_and_recover(self, crash_type, fault_type, test_dir, cluster_state='sync', checkpoint='noskip'):
'''
@param crash_type : gpstop_i/gpstop_a/failover_to_mirror/failover_to_primary
@param fault_type : commit/abort/end_prepare_two_phase_sleep
@param test_dir : dir of the trigger sqls
@description : Execute the specified crash type before/after resuming the suspended fault and recover
'''
trigger_status = self.check_trigger_sql_hang(test_dir)
tinctest.logger.info('trigger_status %s' % trigger_status)
sleep(50) # This sleep is needed till we get a way to find the state of all suspended sqls
if trigger_status == True:
if cluster_state == 'resync':
self.filereputil.inject_fault(f='filerep_transition_to_sync_before_checkpoint', y='resume', r='primary')
sleep(15) # wait little before crash
self.crash_and_recover(crash_type, fault_type, checkpoint, cluster_state)
else:
tinctest.logger.info('The fault_status is not triggered')
def gprecover_rebalance(self):
'''
@description: Run rebalance through gpstop -air is much faster than gprecoverseg -r for test purpose.
'''
rc = self.gpstop.run_gpstop_cmd(immediate = True)
if not rc:
raise Exception('Failed to stop the cluster')
tinctest.logger.info('Stopped cluster immediately')
self.start_db()
def run_gprecover(self, crash_type, cluster_state='sync'):
'''Recover the cluster if required. '''
if crash_type in ('failover_to_primary', 'failover_to_mirror') or cluster_state == 'change_tracking' :
rc = self.gprecover.incremental()
if not rc:
raise Exception('Gprecvoerseg failed')
if not self.gprecover.wait_till_insync_transition():
raise Exception('Segments not in sync')
tinctest.logger.info('Cluster in sync state')
if crash_type == 'failover_to_mirror' :
self.gprecover_rebalance()
tinctest.logger.info('Successfully Rebalanced the cluster')
else:
tinctest.logger.info('No need to run gprecoverseg. The cluster should be already in sync')
def switch_ckpt_faults_before_trigger(self, cluster_state, fault_type):
'''
@param cluster_state : sync/change_tracking/resync
@param fault_type : dtm_broadcast_prepare/dtm_broadcast_commit_prepared/dtm_xlog_distributed_commit
'''
if cluster_state in ('change_tracking', 'resync'):
self.invoke_fault('filerep_consumer', 'fault')
self.filereputil.wait_till_change_tracking_transition()
tinctest.logger.info('Change_tracking transition complete')
if cluster_state == 'resync':
self.invoke_fault('filerep_resync', 'suspend', role='primary')
rc = self.gprecover.incremental()
if not rc:
raise Exception('Gprecvoerseg failed')
tinctest.logger.info('Cluster in resync state')
self.inject_fault(fault_type)
def switch_ckpt_switch_xlog(self):
'''
@description: pg_switch_xlog on segments
'''
sql_cmd = 'select * from pg_switch_xlog();'
num_primary = self.config.get_countprimarysegments()
for i in range(num_primary):
(host, port) = self.config.get_hostandport_of_segment(psegmentNumber=i)
PSQL.run_sql_command_utility_mode(sql_cmd, host = host, port = port)
def switch_checkpoint_loop(self, fault_type):
'''
@description: Run switch_xlog and checkpoint based on the fault_type
'''
if fault_type == 'dtm_xlog_distributed_commit':
self.switch_ckpt_switch_xlog()
else:
for i in range(5):
self.switch_ckpt_switch_xlog()
def switch_ckpt_crash_and_recover(self, crash_type, fault_type, test_dir, cluster_state='sync', checkpoint='noskip'):
'''
@param crash_type : gpstop_i/gpstop_a/failover_to_mirror/failover_to_primary
@param fault_type : dtm_broadcast_prepare/dtm_broadcast_commit_prepared/dtm_xlog_distributed_commit
@param test_dir : dir of the trigger_sqls
'''
trigger_status = self.check_trigger_sql_hang(test_dir, fault_type)
tinctest.logger.info('trigger_status %s' % trigger_status)
if trigger_status == True:
if cluster_state == 'resync':
self.filereputil.inject_fault(f='filerep_resync', y='resume', r='primary')
sleep(30) #Give a little time before crash.
self.crash_and_recover(crash_type, fault_type, checkpoint, cluster_state)
else:
tinctest.logger.info('The fault_status is not triggered')
def cleanup_dangling_processes(self):
'''
@description: Since the test suspend transactions at different stages and does immediate shutdown,
few processes will not be cleaned up and eventually will eat up on the system resources
This methods takes care of killing them at the end of each test, if such processes exists
'''
num_primary = self.config.get_countprimarysegments()
for i in range(num_primary):
(host, port) = self.config.get_hostandport_of_segment(psegmentNumber=i)
grep_cmd = "ps -ef|grep %s|grep 'Distributed'" % port
cmd = Command('Check for dangling process', cmdStr = 'gpssh -h %s -e "%s" ' % (host, grep_cmd))
cmd.run()
result = cmd.get_results()
if len(result.stdout.splitlines()) > 2 :
grep_and_kill_cmd = "ps -ef|grep %s|grep 'Distributed'|awk '{print \$2}'|xargs kill -9" % port
cmd = Command('Kill dangling processes', cmdStr='gpssh -h %s -e "%s" ' % (host, grep_and_kill_cmd ))
cmd.run()
tinctest.logger.info('Killing the dangling processes')
|
from __future__ import print_function
import pyxb.utils.domutils
import xml.dom.minidom
import cablelabs.core as core
import cablelabs.offer as offer
import cablelabs.title as title
import cablelabs.vod30 as vod30
import custom
pyxb.utils.domutils.BindingDOMSupport.DeclareNamespace(core.Namespace, 'core')
pyxb.utils.domutils.BindingDOMSupport.DeclareNamespace(offer.Namespace, 'offer')
pyxb.utils.domutils.BindingDOMSupport.DeclareNamespace(title.Namespace, 'title')
pyxb.utils.domutils.BindingDOMSupport.DeclareNamespace(vod30.Namespace, 'vod30')
pyxb.utils.domutils.BindingDOMSupport.DeclareNamespace(custom.Namespace, 'custom')
adi3 = vod30.ADI3()
cgt = offer.ContentGroupType()
venue = custom.Venue('some multiplex')
ext = core.ExtType(venue)
try:
cgt.Ext = ext
except pyxb.ValidationError as e:
print(e.details())
raise
cgt.uriId = 'urn:one'
cgt.TitleRef = core.AssetRefType(uriId='urn:aMovie')
cgt.PosterRef.append(core.AssetRefType(uriId='urn:aPoster'))
adi3.ContentGroup.append(cgt)
try:
xmls = adi3.toDOM().toprettyxml()
except pyxb.ValidationError as e:
print(e.details())
raise
print(xmls)
try:
instance = core.CreateFromDocument(xmls)
except pyxb.ValidationError as e:
print(e.details())
raise
ivenue = instance.ContentGroup[0].Ext.wildcardElements()[0]
assert isinstance(ivenue, type(venue))
assert ivenue.strip() == venue.strip()
cgt.Ext.append(custom.Venue('another location'))
cgt.Ext.append(custom.Venue('and yet another'))
xmls = adi3.toDOM().toprettyxml()
print(xmls)
|
import json
from magnum.tests.functional.common import models
class BayModelPatchData(models.BaseModel):
"""Data that encapsulates baymodelpatch attributes"""
pass
class BayModelPatchEntity(models.EntityModel):
"""Entity Model that represents a single instance of BayModelPatchData"""
ENTITY_NAME = 'baymodelpatch'
MODEL_TYPE = BayModelPatchData
class BayModelPatchCollection(models.CollectionModel):
"""Collection Model that represents a list of BayModelPatchData objects"""
MODEL_TYPE = BayModelPatchData
COLLECTION_NAME = 'baymodelpatchlist'
def to_json(self):
"""Converts BayModelPatchCollection to json
Retrieves list from COLLECTION_NAME attribute and converts each object
to dict, appending it to a list. Then converts the entire list to json
This is required due to COLLECTION_NAME holding a list of objects that
needed to be converted to dict individually
:returns: json object
"""
data = getattr(self, BayModelPatchCollection.COLLECTION_NAME)
collection = []
for d in data:
collection.append(d.to_dict())
return json.dumps(collection)
@classmethod
def from_dict(cls, data):
"""Converts dict to BayModelPatchData
Converts data dict to list of BayModelPatchData objects and stores it
in COLLECTION_NAME
Example of dict data:
[{
"path": "/name",
"value": "myname",
"op": "replace"
}]
:param data: dict of patch data
:returns: json object
"""
model = cls()
collection = []
for d in data:
collection.append(cls.MODEL_TYPE.from_dict(d))
setattr(model, cls.COLLECTION_NAME, collection)
return model
|
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, PasswordField
from wtforms.validators import DataRequired
class RegistrationForm(Form):
email = StringField('email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
|
import sys
import os
project = 'python-neutronclient'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
copyright = u'OpenStack LLC'
add_function_parentheses = True
add_module_names = True
pygments_style = 'sphinx'
html_theme = 'nature'
htmlhelp_basename = '%sdoc' % project
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack LLC', 'manual'),
]
intersphinx_mapping = {'http://docs.python.org/': None}
|
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class ntpserver(base_resource) :
""" Configuration for NTP server resource. """
def __init__(self) :
self._serverip = ""
self._servername = ""
self._minpoll = 0
self._maxpoll = 0
self._autokey = False
self._key = 0
self._preferredntpserver = ""
self.___count = 0
@property
def serverip(self) :
ur"""IP address of the NTP server.<br/>Minimum length = 1.
"""
try :
return self._serverip
except Exception as e:
raise e
@serverip.setter
def serverip(self, serverip) :
ur"""IP address of the NTP server.<br/>Minimum length = 1
"""
try :
self._serverip = serverip
except Exception as e:
raise e
@property
def servername(self) :
ur"""Fully qualified domain name of the NTP server.
"""
try :
return self._servername
except Exception as e:
raise e
@servername.setter
def servername(self, servername) :
ur"""Fully qualified domain name of the NTP server.
"""
try :
self._servername = servername
except Exception as e:
raise e
@property
def minpoll(self) :
ur"""Minimum time after which the NTP server must poll the NTP messages. In seconds, expressed as a power of 2.<br/>Minimum length = 4<br/>Maximum length = 17.
"""
try :
return self._minpoll
except Exception as e:
raise e
@minpoll.setter
def minpoll(self, minpoll) :
ur"""Minimum time after which the NTP server must poll the NTP messages. In seconds, expressed as a power of 2.<br/>Minimum length = 4<br/>Maximum length = 17
"""
try :
self._minpoll = minpoll
except Exception as e:
raise e
@property
def maxpoll(self) :
ur"""Maximum time after which the NTP server must poll the NTP messages. In seconds, expressed as a power of 2.<br/>Minimum length = 4<br/>Maximum length = 17.
"""
try :
return self._maxpoll
except Exception as e:
raise e
@maxpoll.setter
def maxpoll(self, maxpoll) :
ur"""Maximum time after which the NTP server must poll the NTP messages. In seconds, expressed as a power of 2.<br/>Minimum length = 4<br/>Maximum length = 17
"""
try :
self._maxpoll = maxpoll
except Exception as e:
raise e
@property
def autokey(self) :
ur"""Use the Autokey protocol for key management for this server, with the cryptographic values (for example, symmetric key, host and public certificate files, and sign key) generated by the ntp-keygen utility. To require authentication for communication with the server, you must set either the value of this parameter or the key parameter.
"""
try :
return self._autokey
except Exception as e:
raise e
@autokey.setter
def autokey(self, autokey) :
ur"""Use the Autokey protocol for key management for this server, with the cryptographic values (for example, symmetric key, host and public certificate files, and sign key) generated by the ntp-keygen utility. To require authentication for communication with the server, you must set either the value of this parameter or the key parameter.
"""
try :
self._autokey = autokey
except Exception as e:
raise e
@property
def key(self) :
ur"""Key to use for encrypting authentication fields. All packets sent to and received from the server must include authentication fields encrypted by using this key. To require authentication for communication with the server, you must set either the value of this parameter or the autokey parameter.<br/>Minimum length = 1<br/>Maximum length = 65534.
"""
try :
return self._key
except Exception as e:
raise e
@key.setter
def key(self, key) :
ur"""Key to use for encrypting authentication fields. All packets sent to and received from the server must include authentication fields encrypted by using this key. To require authentication for communication with the server, you must set either the value of this parameter or the autokey parameter.<br/>Minimum length = 1<br/>Maximum length = 65534
"""
try :
self._key = key
except Exception as e:
raise e
@property
def preferredntpserver(self) :
ur"""Preferred NTP server. The NetScaler appliance chooses this NTP server for time synchronization among a set of correctly operating hosts.<br/>Default value: NO<br/>Possible values = YES, NO.
"""
try :
return self._preferredntpserver
except Exception as e:
raise e
@preferredntpserver.setter
def preferredntpserver(self, preferredntpserver) :
ur"""Preferred NTP server. The NetScaler appliance chooses this NTP server for time synchronization among a set of correctly operating hosts.<br/>Default value: NO<br/>Possible values = YES, NO
"""
try :
self._preferredntpserver = preferredntpserver
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(ntpserver_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ntpserver
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.serverip is not None :
return str(self.serverip)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add ntpserver.
"""
try :
if type(resource) is not list :
addresource = ntpserver()
addresource.serverip = resource.serverip
addresource.servername = resource.servername
addresource.minpoll = resource.minpoll
addresource.maxpoll = resource.maxpoll
addresource.autokey = resource.autokey
addresource.key = resource.key
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ ntpserver() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].serverip = resource[i].serverip
addresources[i].servername = resource[i].servername
addresources[i].minpoll = resource[i].minpoll
addresources[i].maxpoll = resource[i].maxpoll
addresources[i].autokey = resource[i].autokey
addresources[i].key = resource[i].key
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete ntpserver.
"""
try :
if type(resource) is not list :
deleteresource = ntpserver()
if type(resource) != type(deleteresource):
deleteresource.serverip = resource
else :
deleteresource.serverip = resource.serverip
deleteresource.servername = resource.servername
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ ntpserver() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].serverip = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ ntpserver() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].serverip = resource[i].serverip
deleteresources[i].servername = resource[i].servername
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update ntpserver.
"""
try :
if type(resource) is not list :
updateresource = ntpserver()
updateresource.serverip = resource.serverip
updateresource.servername = resource.servername
updateresource.minpoll = resource.minpoll
updateresource.maxpoll = resource.maxpoll
updateresource.preferredntpserver = resource.preferredntpserver
updateresource.autokey = resource.autokey
updateresource.key = resource.key
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ ntpserver() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].serverip = resource[i].serverip
updateresources[i].servername = resource[i].servername
updateresources[i].minpoll = resource[i].minpoll
updateresources[i].maxpoll = resource[i].maxpoll
updateresources[i].preferredntpserver = resource[i].preferredntpserver
updateresources[i].autokey = resource[i].autokey
updateresources[i].key = resource[i].key
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of ntpserver resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = ntpserver()
if type(resource) != type(unsetresource):
unsetresource.serverip = resource
else :
unsetresource.serverip = resource.serverip
unsetresource.servername = resource.servername
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ ntpserver() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].serverip = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ ntpserver() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].serverip = resource[i].serverip
unsetresources[i].servername = resource[i].servername
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the ntpserver resources that are configured on netscaler.
"""
try :
if not name :
obj = ntpserver()
response = obj.get_resources(client, option_)
else :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [ntpserver() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of ntpserver resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = ntpserver()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the ntpserver resources configured on NetScaler.
"""
try :
obj = ntpserver()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of ntpserver resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = ntpserver()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Preferredntpserver:
YES = "YES"
NO = "NO"
class ntpserver_response(base_response) :
def __init__(self, length=1) :
self.ntpserver = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ntpserver = [ntpserver() for _ in range(length)]
|
from oslo_log import log as logging
from oslo_utils import encodeutils
import stevedore
from trove.common import base_exception as exception
from trove.common import cfg
from trove.common.i18n import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class ModuleDriverManager(object):
MODULE_DRIVER_NAMESPACE = 'trove.guestagent.module.drivers'
def __init__(self):
LOG.info(_('Initializing module driver manager.'))
self._drivers = {}
self._module_types = [mt.lower() for mt in CONF.module_types]
self._load_drivers()
def _load_drivers(self):
manager = stevedore.enabled.EnabledExtensionManager(
namespace=self.MODULE_DRIVER_NAMESPACE,
check_func=self._check_extension,
invoke_on_load=True,
invoke_kwds={})
try:
manager.map(self.add_driver_extension)
except stevedore.exception.NoMatches:
LOG.info(_("No module drivers loaded"))
def _check_extension(self, extension):
"""Checks for required methods in driver objects."""
driver = extension.obj
supported = False
try:
LOG.info(_('Loading Module driver: %s'), driver.get_type())
if driver.get_type() != driver.get_type().lower():
raise AttributeError(_("Driver 'type' must be lower-case"))
LOG.debug(' description: %s', driver.get_description())
LOG.debug(' updated : %s', driver.get_updated())
required_attrs = ['apply', 'remove']
for attr in required_attrs:
if not hasattr(driver, attr):
raise AttributeError(
_("Driver '%(type)s' missing attribute: %(attr)s")
% {'type': driver.get_type(), 'attr': attr})
if driver.get_type() in self._module_types:
supported = True
else:
LOG.info(_("Driver '%s' not supported, skipping"),
driver.get_type())
except AttributeError as ex:
LOG.exception(_("Exception loading module driver: %s"),
encodeutils.exception_to_unicode(ex))
return supported
def add_driver_extension(self, extension):
# Add a module driver from the extension.
# If the stevedore manager is changed to one that doesn't
# check the extension driver, then it should be done manually here
# by calling self._check_extension(extension)
driver = extension.obj
driver_type = driver.get_type()
LOG.info(_('Loaded module driver: %s'), driver_type)
if driver_type in self._drivers:
raise exception.Error(_("Found duplicate driver: %s") %
driver_type)
self._drivers[driver_type] = driver
def get_driver(self, driver_type):
found = None
if driver_type in self._drivers:
found = self._drivers[driver_type]
return found
|
import scalaris
from datetime import datetime
from threading import Thread
import time, threading
import random, string
import os, sys, traceback
_BENCH_DATA_SIZE = 1000
"""The size of a single data item that is send to scalaris."""
_benchTime = 0
"""This is used to create different erlang keys for each run."""
_PERCENT_TO_REMOVE = 5
"""Cut 5% off of both ends of the result list."""
_TESTRUNS = 1;
"""Number of test runs (accumulates results over all test runs)."""
if 'SCALARIS_JSON_URLS' in os.environ and os.environ['SCALARIS_JSON_URLS'] != '':
DEFAULT_URLS = os.environ['SCALARIS_JSON_URLS'].split()
else:
DEFAULT_URLS = [scalaris.DEFAULT_URL]
def minibench(operations, threads_per_node, benchmarks):
"""
Default minimal benchmark.
Tests some strategies for writing key/value pairs to scalaris:
1) writing binary objects (random data, size = _BENCH_DATA_SIZE)
2) writing string objects (random data, size = _BENCH_DATA_SIZE)
each with the given number of consecutive operations and parallel
threads per Scalaris node,
* first using a new Transaction or TransactionSingleOp for each test,
* then using a new Transaction or TransactionSingleOp but re-using a single connection,
* and finally re-using a single Transaction or TransactionSingleOp object.
"""
# The time when the (whole) benchmark suite was started.
# This is used to create different erlang keys for each run.
_benchTime = _getCurrentMillis()
parallel_runs = len(DEFAULT_URLS) * threads_per_node;
print 'Number of available nodes: ' + str(len(DEFAULT_URLS))
print '-> Using ' + str(parallel_runs) + ' parallel instances per test run...'
sys.stdout.flush()
print 'Benchmark of scalaris.TransactionSingleOp:'
sys.stdout.flush()
test_types = ['binary', 'string']
test_types_str = ['B', 'S']
columns = ['TransactionSingleOp.write(string, bytearray)',
'TransactionSingleOp.write(string, string)']
test_bench = [TransSingleOpBench1, TransSingleOpBench2, TransSingleOpBench3]
rows = ['separate connection', 're-use connection', 're-use object']
test_group = 'transsinglebench';
results = _getResultArray(rows, columns)
_runBenchAndPrintResults(benchmarks, results, columns, rows, test_types,
test_types_str, test_bench, test_group, 1, operations, parallel_runs)
print '-----'
print 'Benchmark of scalaris.Transaction:'
sys.stdout.flush()
test_types = ['binary', 'string']
test_types_str = ['B', 'S']
columns = ['Transaction.write(string, bytearray)',
'Transaction.write(string, string)']
test_bench = [TransBench1, TransBench2, TransBench3]
rows = ['separate connection', 're-use connection', 're-use object']
test_group = 'transbench';
results = _getResultArray(rows, columns)
_runBenchAndPrintResults(benchmarks, results, columns, rows, test_types,
test_types_str, test_bench, test_group, 1, operations, parallel_runs)
print '-----'
print 'Benchmark incrementing an integer key (read+write):'
sys.stdout.flush()
test_types = ['int']
test_types_str = ['I']
columns = ['Transaction.add_add_on_nr(string, int)']
test_bench = [TransIncrementBench1, TransIncrementBench2, TransIncrementBench3]
rows = ['separate connection', 're-use connection', 're-use object']
test_group = 'transbench_inc';
results = _getResultArray(rows, columns)
_runBenchAndPrintResults(benchmarks, results, columns, rows, test_types,
test_types_str, test_bench, test_group, 7, operations, parallel_runs)
print '-----'
print 'Benchmark read 5 + write 5:'
sys.stdout.flush()
test_types = ['binary', 'string']
test_types_str = ['B', 'S']
columns = ['Transaction.read(string) + Transaction.write(string, binary)',
'Transaction.read(string) + Transaction.write(string, string)']
test_bench = [TransRead5Write5Bench1, TransRead5Write5Bench2, TransRead5Write5Bench3]
rows = ['separate connection', 're-use connection', 're-use object']
test_group = 'transbench_r5w5';
results = _getResultArray(rows, columns)
_runBenchAndPrintResults(benchmarks, results, columns, rows, test_types,
test_types_str, test_bench, test_group, 10, operations, parallel_runs)
print '-----'
print 'Benchmark appending to a String list (read+write):'
sys.stdout.flush()
test_types = ['string']
test_types_str = ['S']
columns = ['Transaction.add_add_del_on_list(string, [string], [])']
test_bench = [TransAppendToListBench1, TransAppendToListBench2, TransAppendToListBench3]
rows = ['separate connection', 're-use connection', 're-use object']
test_group = 'transbench_append';
results = _getResultArray(rows, columns)
_runBenchAndPrintResults(benchmarks, results, columns, rows, test_types,
test_types_str, test_bench, test_group, 16, operations, parallel_runs)
class BenchRunnable(Thread):
"""
Abstract base class of a test run that is to be run in a thread.
"""
def __init__(self, key, value, operations):
"""
Create a new runnable.
"""
Thread.__init__(self)
self._key = key
self._value = value
self._operations = operations
self._shouldStop = False
self._timeAtStart = 0
self._speed = -1
def _testBegin(self):
"""
Call this method when a benchmark is started.
Sets the time the benchmark was started.
"""
self._timeAtStart = _getCurrentMillis()
def _testEnd(self, testRuns):
"""
Call this method when a benchmark is finished.
Calculates the time the benchmark took and the number of transactions
performed during this time.
"""
timeTaken = _getCurrentMillis() - self._timeAtStart
speed = (testRuns * 1000) / timeTaken
return speed
def pre_init(self, j = None):
"""
Will be called before the benchmark starts with all possible
variations of "j" in the operation() call.
"j" with None is the overall initialisation run at first.
"""
pass
def init(self):
"""
Will be called at the start of the benchmark.
"""
pass
def cleanup(self):
"""
Will be called before the end of the benchmark.
"""
pass
def operation(self, j):
"""
The operation to execute during the benchmark.
"""
pass
def run(self):
threading.currentThread().name = "BenchRunnable-" + self._key
retry = 0
while (retry < 3) and (not self._shouldStop):
try:
self.pre_init()
for j in xrange(self._operations):
self.pre_init(j)
self._testBegin()
self.init()
for j in xrange(self._operations):
self.operation(j)
self.cleanup()
self._speed = self._testEnd(self._operations)
break
except:
# _printException()
pass
retry += 1
def getSpeed(self):
return self._speed
def shouldStop(self):
self._shouldStop = True
class BenchRunnable2(BenchRunnable):
def __init__(self, key, value, operations):
"""
Create a new runnable.
"""
BenchRunnable.__init__(self, key, value, operations)
def init(self):
"""
Will be called at the start of the benchmark.
"""
self._connection = _getConnection()
def cleanup(self):
"""
Will be called before the end of the benchmark.
"""
self._connection.close()
class TransSingleOpBench1(BenchRunnable):
"""
Performs a benchmark writing objects using a new TransactionSingleOp object
for each test.
"""
def __init__(self, key, value, operations):
BenchRunnable.__init__(self, key, value, operations)
def operation(self, j):
tx = scalaris.TransactionSingleOp(conn = _getConnection())
tx.write(self._key + '_' + str(j), self._value)
tx.close_connection()
class TransSingleOpBench2(BenchRunnable2):
"""
Performs a benchmark writing objects using a new TransactionSingleOp but
re-using a single connection for each test.
"""
def __init__(self, key, value, operations):
BenchRunnable2.__init__(self, key, value, operations)
def operation(self, j):
tx = scalaris.TransactionSingleOp(conn = self._connection)
tx.write(self._key + '_' + str(j), self._value)
class TransSingleOpBench3(BenchRunnable):
"""
Performs a benchmark writing objects using a single TransactionSingleOp
object for all tests.
"""
def __init__(self, key, value, operations):
BenchRunnable.__init__(self, key, value, operations)
def init(self):
self._tx = scalaris.TransactionSingleOp(conn = _getConnection())
def cleanup(self):
self._tx.close_connection()
def operation(self, j):
self._tx.write(self._key + '_' + str(j), self._value)
class TransBench1(BenchRunnable):
"""
Performs a benchmark writing objects using a new Transaction for each test.
"""
def __init__(self, key, value, operations):
BenchRunnable.__init__(self, key, value, operations)
def operation(self, j):
tx = scalaris.Transaction(conn = _getConnection())
tx.write(self._key + '_' + str(j), self._value)
tx.commit()
tx.close_connection()
class TransBench2(BenchRunnable2):
"""
Performs a benchmark writing objects using a new Transaction but re-using a
single connection for each test.
"""
def __init__(self, key, value, operations):
BenchRunnable2.__init__(self, key, value, operations)
def operation(self, j):
tx = scalaris.Transaction(conn = self._connection)
tx.write(self._key + '_' + str(j), self._value)
tx.commit()
class TransBench3(BenchRunnable):
"""
Performs a benchmark writing objects using a single Transaction object
for all tests.
"""
def __init__(self, key, value, operations):
BenchRunnable.__init__(self, key, value, operations)
def init(self):
self._tx = scalaris.Transaction(conn = _getConnection())
def cleanup(self):
self._tx.close_connection()
def operation(self, j):
self._tx.write(self._key + '_' + str(j), self._value)
self._tx.commit()
class TransIncrementBench(BenchRunnable):
"""
Performs a benchmark writing integer numbers on a single key and
increasing them.
Provides convenience methods for the full increment benchmark
implementations.
"""
def __init__(self, key, value, operations):
BenchRunnable.__init__(self, key, value, operations)
def pre_init(self, j = None):
tx_init = scalaris.Transaction(conn = _getConnection())
tx_init.write(self._key, 0)
tx_init.commit()
tx_init.close_connection()
def operation2(self, tx, j):
reqs = tx.new_req_list()
reqs.add_add_on_nr(self._key, 1).add_commit()
# value_old = tx.read(self._key)
# reqs.add_write(key, value_old + 1).add_commit()
results = tx.req_list(reqs)
# tx.process_result_write(results[0])
tx.process_result_add_on_nr(results[0])
class TransIncrementBench1(TransIncrementBench):
"""
Performs a benchmark writing integer numbers on a single key and
increasing them using a new Transaction for each test.
"""
def __init__(self, key, value, operations):
TransIncrementBench.__init__(self, key, value, operations)
def operation(self, j):
tx = scalaris.Transaction(conn = _getConnection())
self.operation2(tx, j)
tx.close_connection()
class TransIncrementBench2(TransIncrementBench):
"""
Performs a benchmark writing integer numbers on a single key and
increasing them using a new Transaction but re-using a single
connection for each test.
"""
def __init__(self, key, value, operations):
TransIncrementBench.__init__(self, key, value, operations)
def init(self):
self._connection = _getConnection()
def cleanup(self):
self._connection.close()
def operation(self, j):
tx = scalaris.Transaction(conn = self._connection)
self.operation2(tx, j)
class TransIncrementBench3(TransIncrementBench):
"""
Performs a benchmark writing objects using a single Transaction
object for all tests.
"""
def __init__(self, key, value, operations):
TransIncrementBench.__init__(self, key, value, operations)
def init(self):
self._tx = scalaris.Transaction(conn = _getConnection())
def cleanup(self):
self._tx.close_connection()
def operation(self, j):
self.operation2(self._tx, j)
class TransReadXWriteXBench(BenchRunnable):
"""
Performs a benchmark reading X values and overwriting them afterwards
inside a transaction.
Provides convenience methods for the full read-x, write-x benchmark
implementations.
"""
def __init__(self, key, value, nr_keys, operations):
BenchRunnable.__init__(self, key, value, operations)
self._keys = []
self._value_write = []
for i in xrange(nr_keys):
self._keys.append(key + "_" + str(i))
self._value_write.append(_getRandom(_BENCH_DATA_SIZE, type(value).__name__))
def pre_init(self, j = None):
value_init = []
for i in xrange(len(self._keys)):
value_init.append(_getRandom(_BENCH_DATA_SIZE, type(self._value).__name__))
tx_init = scalaris.Transaction(conn = _getConnection())
reqs = tx_init.new_req_list()
for i in xrange(len(self._keys)):
reqs.add_write(self._keys[i], value_init[i])
reqs.add_commit()
results = tx_init.req_list(reqs)
for i in xrange(len(self._keys)):
tx_init.process_result_write(results[i])
tx_init.close_connection()
def operation2(self, tx, j):
reqs = tx.new_req_list()
# read old values into the transaction
for i in xrange(len(self._keys)):
reqs.add_read(self._keys[i])
reqs.add_commit()
results = tx.req_list(reqs)
for i in xrange(len(self._keys)):
tx.process_result_read(results[i])
# write new values...
reqs = tx.new_req_list()
for i in xrange(len(self._keys)):
value = self._value_write[j % len(self._value_write)]
reqs.add_write(self._keys[i], value)
reqs.add_commit()
results = tx.req_list(reqs)
for i in xrange(len(self._keys)):
tx.process_result_write(results[i])
class TransRead5Write5Bench1(TransReadXWriteXBench):
"""
Performs a benchmark reading 5 values and overwriting them afterwards
inside a transaction using a new Transaction for each test.
"""
def __init__(self, key, value, operations):
TransReadXWriteXBench.__init__(self, key, value, 5, operations)
def operation(self, j):
tx = scalaris.Transaction(conn = _getConnection())
self.operation2(tx, j)
tx.close_connection()
class TransRead5Write5Bench2(TransReadXWriteXBench):
"""
Performs a benchmark reading 5 values and overwriting them afterwards
inside a transaction using a new Transaction but re-using a single
connection for each test.
"""
def __init__(self, key, value, operations):
TransReadXWriteXBench.__init__(self, key, value, 5, operations)
def init(self):
self._connection = _getConnection()
def cleanup(self):
self._connection.close()
def operation(self, j):
tx = scalaris.Transaction(conn = self._connection)
self.operation2(tx, j)
class TransRead5Write5Bench3(TransReadXWriteXBench):
"""
Performs a benchmark reading 5 values and overwriting them afterwards
inside a transaction using a single Transaction object for all tests.
"""
def __init__(self, key, value, operations):
TransReadXWriteXBench.__init__(self, key, value, 5, operations)
def init(self):
self._tx = scalaris.Transaction(conn = _getConnection())
def cleanup(self):
self._tx.close_connection()
def operation(self, j):
self.operation2(self._tx, j)
class TransAppendToListBench(BenchRunnable):
"""
Performs a benchmark adding values to a list inside a transaction.
Provides convenience methods for the full append-to-list benchmark
implementations.
"""
def __init__(self, key, value, nr_keys, operations):
BenchRunnable.__init__(self, key, value, operations)
self._value_init = []
for _i in xrange(nr_keys):
self._value_init.append(_getRandom(_BENCH_DATA_SIZE, 'string'))
def pre_init(self, j = None):
if j is None:
return
tx_init = scalaris.Transaction(conn = _getConnection())
reqs = tx_init.new_req_list()
reqs.add_write(self._key + '_' + str(j), self._value_init).add_commit()
results = tx_init.req_list(reqs)
tx_init.process_result_write(results[0])
tx_init.close_connection()
def operation2(self, tx, j):
reqs = tx.new_req_list()
reqs.add_add_del_on_list(self._key + '_' + str(j), [self._value], []).add_commit()
# read old list into the transaction
# list = scalaris.str_to_list(tx.read(self._key + '_' + str(j)))
# write new list ...
# list.append(self._value)
# reqs.add_write(self._key + '_' + str(j), list).add_commit())
results = tx.req_list(reqs)
# tx.process_result_write(results[0])
tx.process_result_add_del_on_list(results[0])
class TransAppendToListBench1(TransAppendToListBench):
"""
Performs a benchmark adding values to a list inside a transaction
using a new Transaction for each test.
"""
def __init__(self, key, value, operations):
TransAppendToListBench.__init__(self, key, value, 5, operations)
def operation(self, j):
tx = scalaris.Transaction(conn = _getConnection())
self.operation2(tx, j)
tx.close_connection()
class TransAppendToListBench2(TransAppendToListBench):
"""
Performs a benchmark adding values to a list inside a transaction using a
new Transaction but re-using a single connection for each test.
"""
def __init__(self, key, value, operations):
TransAppendToListBench.__init__(self, key, value, 5, operations)
def init(self):
self._connection = _getConnection()
def cleanup(self):
self._connection.close()
def operation(self, j):
tx = scalaris.Transaction(conn = self._connection)
self.operation2(tx, j)
class TransAppendToListBench3(TransAppendToListBench):
"""
Performs a benchmark adding values to a list inside a transaction using a
single Transaction object for all tests.
"""
def __init__(self, key, value, operations):
TransAppendToListBench.__init__(self, key, value, 5, operations)
def init(self):
self._tx = scalaris.Transaction(conn = _getConnection())
def cleanup(self):
self._tx.close_connection()
def operation(self, j):
self.operation2(self._tx, j)
def _getCurrentMillis():
"""
Gets the number of milliseconds since epoch.
"""
now = datetime.now()
return int(time.mktime(now.timetuple())) * 1000 + (now.microsecond // 1000)
def _testBegin():
"""
Call this method when a benchmark is started.
Sets the time the benchmark was started.
"""
global _timeAtStart
_timeAtStart = _getCurrentMillis()
def _testEnd(testruns):
"""
Call this method when a benchmark is finished.
Calculates the time the benchmark took and the number of transactions
performed during this time.
Returns the number of achieved transactions per second.
"""
global _timeAtStart
timeTaken = _getCurrentMillis() - _timeAtStart
speed = (testruns * 1000) // timeTaken
return speed
def _getConnection():
url = random.choice(DEFAULT_URLS)
return scalaris.JSONConnection(url = url)
def _getResultArray(rows, columns):
"""
Returns a pre-initialized results array with values <tt>-1</tt>.
"""
results = {}
for row in rows:
results[row] = {}
for column in columns:
results[row][column] = -1
return results
def _getRandom(size, mytype):
"""
Creates an random string or binary object from <size> random characters/bytes.
"""
if mytype == 'int':
return random.randint(0, 2147483647)
elif mytype == 'string' or mytype == 'str':
return ''.join(random.choice(string.printable) for _x in xrange(size))
elif mytype == 'binary':
return bytearray(random.randrange(0, 256) for _x in xrange(size))
def _integrateResults(results, i, worker, failed):
"""
Integrates the workers' results into the result array.
"""
try:
for bench_thread in worker:
if failed >= 3:
bench_thread.shouldStop()
try:
while(bench_thread.isAlive()): # non-blocking join so we are able to receive CTRL-C
bench_thread.join(1)
except RuntimeError:
pass
else:
try:
while(bench_thread.isAlive()): # non-blocking join so we are able to receive CTRL-C
bench_thread.join(1)
speed = bench_thread.getSpeed()
except RuntimeError:
speed = -1
if speed < 0:
failed += 1
else:
results[i] += speed
return failed
except KeyboardInterrupt:
print 'CTRL-C received, aborting...'
for bench_thread in worker:
bench_thread.shouldStop()
sys.exit(1)
def _getAvgSpeed(results):
"""
Calculates the average number of transactions per second from the results
of executing 10 transactions per test run. Will remove the top and bottom
_PERCENT_TO_REMOVE percent of the sorted results array.
Returns the average number of transactions per second.
"""
results.sort()
toRemove = int((len(results) * _PERCENT_TO_REMOVE) // 100);
avgSpeed = 0;
for i in xrange(toRemove, (len(results) - toRemove)):
avgSpeed += results[i]
avgSpeed //= len(results) - 2 * toRemove
return avgSpeed
def _runBenchAndPrintResults(benchmarks, results, columns, rows, test_types,
test_types_str, test_bench, test_group,
first_bench_id, operations, parallel_runs):
"""
Runs the given benchmarks and prints a results table.
"""
# assume non-empty results dict:
for test in xrange(len(results) * len(results[list(results.keys())[0]])):
try:
i = test % len(results);
j = test // len(results);
if (test + first_bench_id) in benchmarks:
results[rows[i]][columns[j]] = _runBench(operations,
_getRandom(_BENCH_DATA_SIZE, test_types[j]),
test_group + "_" + test_types_str[j] + "_" + str(i + 1),
test_bench[i], parallel_runs)
time.sleep(1)
else:
results[rows[i]][columns[j]] = -2
except Exception: # do not catch SystemExit
_printException()
_printResults(columns, rows, results, operations, parallel_runs)
def _runBench(operations, value, name, clazz, parallel_runs):
"""
Runs the given benchmark.
"""
key = str(_benchTime) + name
results = [-1]*_TESTRUNS
for i in xrange(_TESTRUNS):
worker = []
for thread in xrange(parallel_runs):
new_worker = clazz(key + '_' + str(i) + '_' + str(thread), value, operations)
worker.append(new_worker)
new_worker.start()
failed = 0
failed = _integrateResults(results, i, worker, failed);
if failed >= 3:
return -1
return _getAvgSpeed(results)
def _printResults(columns, rows, results, operations, parallel_runs):
"""
Prints a result table.
"""
print 'Concurrent threads: ' + str(parallel_runs) + ', each using ' + str(operations) + ' transactions'
colLen = 25
emptyFirstColumn = ''.join([' ']*colLen)
print emptyFirstColumn + '\tspeed (transactions / second)'
print emptyFirstColumn,
i = 1
for column in columns:
print '\t(' + str(i) + ')',
i += 1
print ''
for row in rows:
print row + ''.join([' ']*(colLen - len(row))),
for column in columns:
value = results[row][column]
if (value == -2):
print '\tn/a',
elif (value == -1):
print '\tfailed',
else:
print '\t' + str(int(value)),
print ''
i = 1
for column in columns:
print '(' + str(i) + ') ' + column
i += 1
sys.stdout.flush()
def _printException():
mytype, message, trace = sys.exc_info()
print str(mytype) + str(message)
traceback.print_tb(trace)
def run_from_cmd(argv):
nr_operations = 500
threads_per_node = 10
allBenchs = False
if (len(argv) == 1):
allBenchs = True
elif (len(argv) == 2):
nr_operations = int(argv[1])
elif (len(argv) == 3):
nr_operations = int(argv[1])
threads_per_node = int(argv[2])
elif (len(argv) >= 4):
nr_operations = int(argv[1])
threads_per_node = int(argv[2])
benchmarks = []
for i in xrange(3, len(argv)):
if argv[i] == 'all':
allBenchs = True
else:
benchmarks.append(int(argv[i]))
if allBenchs:
benchmarks = xrange(1, 19, 1)
minibench(nr_operations, threads_per_node, benchmarks)
if __name__ == "__main__":
run_from_cmd(sys.argv)
|
'''
Copyright (C) 2013 TopCoder Inc., All Rights Reserved.
'''
'''
This is the module that defines all the views which will respond to client requests.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
v1.1 - Healthcare Fraud Prevention Release Assembly v1.0
- updated for added StudyID
@author: TCSASSEMBLER
@version: 1.1
'''
from django.template.loader import get_template
from django.template import Context
from django.utils.decorators import method_decorator
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.views.decorators.http import require_http_methods
from urllib.parse import urlencode
from decision_module import helper
from httpservices import DataRequestHandler, handle_deny_operation
from threading import Thread
from validationhelper import check_string
from appliance.config import dbconfig
from appliance.request_persistence import MySQLRequestPersistence
from appliance.request_persistence import RedisRequestPersistence
import isodate
import logging
def translateRequests(requests):
'''
This method translate requests list of sequent type into of map type.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@param requests: The partner requests.
@return: The mapped partner requests.
'''
nRequests = []
requestColumns = ['request_id', 'study_id', 'query', 'expiration_time',
'cache_available', 'cache_timestamp', 'status']
for req in requests:
nReq = {}
idx = 0
if dbconfig["type"] == "mysql":
req = req[1:]
for field in requestColumns:
nReq[field] = req[idx]
idx = idx + 1
nRequests.append(nReq)
return nRequests
def get_request_persistence():
"""
Get appropriate db persistence object from config.
"""
if dbconfig["type"]=='redis':
return RedisRequestPersistence()
elif dbconfig["type"] == "mysql":
return MySQLRequestPersistence()
else:
raise ValueError("Invalid db type: " + config.dbconfig["type"])
@require_http_methods(["GET"])
def list_partner_requests(request):
'''
This is the view function for listing one partner request.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@param request: the http request
@return: the http response
'''
CLASS_NAME = 'decision_module.views'
LOGGER = logging.getLogger(CLASS_NAME)
# Do logging
signature = CLASS_NAME + '.list_partner_requests'
helper.log_entrance(LOGGER, signature, {'request': request})
p = get_request_persistence()
p.connectionConfig = dbconfig
pending = []
approved = []
denied = []
try:
p.begin()
if dbconfig["type"]=='redis':
pending = translateRequests(p.queryRequests('status=pending', None, None))
approved = translateRequests(p.queryRequests('status=approved', None, None))
denied = translateRequests(p.queryRequests('status=denied', None, None))
else:# MySQL, no other possibilities, otherwise exceptin would be raise before
pending = translateRequests(p.queryRequests('status="pending"', None, None))
approved = translateRequests(p.queryRequests('status="approved"', None, None))
denied = translateRequests(p.queryRequests('status="denied"', None, None))
finally:
if p.connection:
p.close()
# Render templates
t = get_template('RequestList.html')
ret = HttpResponse(t.render(Context(
{'pending': pending,
'approved': approved if len(approved) > 0 else None,
'denied': denied if len(denied) > 0 else None})))
# Do logging
helper.log_exit(LOGGER, signature, [ret])
return ret
@require_http_methods(["POST"])
def create_partner_request(request):
'''
This is the view function for creating one partner request.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@param request: the http request
@return: the http response
'''
CLASS_NAME = 'decision_module.views'
LOGGER = logging.getLogger(CLASS_NAME)
# Do logging
signature = CLASS_NAME + '.create_partner_request'
helper.log_entrance(LOGGER, signature, {'request': request})
# Check posted values
try:
check_string('request_id', request.POST['request_id'])
check_string('study_id', request.POST['study_id'])
check_string('query', request.POST['query'])
check_string('expiration_time', request.POST['expiration_time'])
check_string('cache_available', request.POST['cache_available'])
if request.POST['cache_available'] == 'true':
check_string('cache_timestamp', request.POST['cache_timestamp'])
check_string('status', request.POST['status'])
except Exception as e:
helper.log_exception(LOGGER, signature, e)
if dbconfig["type"]=='redis':
fields = [request.POST['request_id'], request.POST['study_id'], request.POST['query'],
request.POST['expiration_time'], request.POST['cache_available'],
request.POST['cache_timestamp'], request.POST['status'],]
else:# MySQL - SQL statements must translate ' to double ', or sql statement is illegal.
fields = [request.POST['request_id'], request.POST['study_id'], request.POST['query'].replace("'", "''"),
request.POST['expiration_time'], request.POST['cache_available'],
request.POST['cache_timestamp'], request.POST['status'],]
p = get_request_persistence()
p.connectionConfig = dbconfig
try:
p.begin()
p.createRequest(fields)
p.commit()
except:
p.rollback()
finally:
if p.connection:
p.close()
# Redirect to /partner_tags
# ret = HttpResponseRedirect('/')
ret = HttpResponse(status=200)
# Do logging
helper.log_exit(LOGGER, signature, [ret])
return ret
@require_http_methods(["POST"])
def approval_partner_request(request):
'''
This is the view function for approval one partner request.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@param request: the http request
@return: the http response
'''
CLASS_NAME = 'decision_module.views'
LOGGER = logging.getLogger(CLASS_NAME)
# Do logging
signature = CLASS_NAME + '.approval_partner_request'
helper.log_entrance(LOGGER, signature, {'request': request})
request_id = request.POST['request_id']
p = get_request_persistence()
p.connectionConfig = dbconfig
req = []
try:
p.begin()
if dbconfig["type"]=='redis':
req = p.queryRequests('request_id={0}'.format(request_id), None, None)
if len(req) > 0:
req = req[0]
p.updateRequests('status=approved', 'request_id={0}'.format(request_id))
p.commit()
else:# MySQL, no other possibilities, otherwise exceptin would be raise before
req = p.queryRequests('request_id="{0}"'.format(request_id), None, None)
if len(req) > 0:
req = req[0]
p.updateRequests('status="approved"', 'request_id="{0}"'.format(request_id))
p.commit()
except:
p.rollback()
finally:
if p.connection:
p.close()
# Kick off a new thread to handle the request
try:
if len(req) == 8:
req = req[1:]
if len(req) < 7:
raise ValueError('Request misses parameters')
request_id = req[0]
study_id = req[1]
query = req[2]
expiration_time = isodate.parse_datetime(req[3])
cache_available = 'true' == req[4]
cache_timestamp = None
if req[5] and len(req[5]) > 0:
cache_timestamp = isodate.parse_datetime(req[5])
handler = DataRequestHandler()
t = Thread(target=handler.handle_data_request, args=(request_id, study_id, query,
expiration_time, cache_available,
cache_timestamp, True))
t.daemon = False
t.start()
except Exception as e:
helper.log_exception(LOGGER, signature, e)
# Redirect to /partner_tags
ret = HttpResponseRedirect('/')
# Do logging
helper.log_exit(LOGGER, signature, [ret])
return ret
@require_http_methods(["POST"])
def deny_partner_request(request):
'''
This is the view function for denying one partner request.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@param request: the http request
@return: the http response
'''
CLASS_NAME = 'decision_module.views'
LOGGER = logging.getLogger(CLASS_NAME)
# Do logging
signature = CLASS_NAME + '.deny_partner_request'
helper.log_entrance(LOGGER, signature, {'request': request})
request_id = request.POST['request_id']
p = get_request_persistence()
p.connectionConfig = dbconfig
req = []
try:
p.begin()
if dbconfig["type"]=='redis':
req = p.queryRequests('request_id={0}'.format(request_id), None, None)
if len(req) > 0:
req = req[0]
p.updateRequests('status=denied', 'request_id={0}'.format(request_id))
p.commit()
else:# MySQL, no other possibilities, otherwise exceptin would be raise before
req = p.queryRequests('request_id="{0}"'.format(request_id), None, None)
if len(req) > 0:
req = req[0]
p.updateRequests('status="denied"', 'request_id="{0}"'.format(request_id))
p.commit()
except:
p.rollback()
finally:
if p.connection:
p.close()
# Kick off a new thread to handle the request
try:
if len(req) == 8:
req = req[1:]
if len(req) < 7:
raise ValueError('Request misses parameters')
request_id = req[0]
t = Thread(target=handle_deny_operation, args=([request_id],))
t.daemon = False
t.start()
except Exception as e:
helper.log_exception(LOGGER, signature, e)
# Redirect to /partner_tags
ret = HttpResponseRedirect('/')
# Do logging
helper.log_exit(LOGGER, signature, [ret])
return ret
|
from __future__ import absolute_import
import re
import six
__author__ = 'ross'
import unittest
name_re=re.compile(r'''name\s*=\s*["']+\w+["']''')
desc_re=re.compile(r'''description\s*=\s*["']+[\w\s]+["']''')
attr_re=re.compile(r'''["']+[\w\s]+["']''')
vd={'A':'Foo Bar'}
class ValveCommand(object):
valve=None
def load_str(self, txt):
m=name_re.match(txt)
if m:
a=self._extract_attr(m)
if a:
self.valve=a
return
m=desc_re.match(txt)
if m:
a=self._extract_attr(m)
v=next((k for k, v in six.iteritems(vd) if v==a), None)
if v:
self.valve=v
return
if attr_re.match(txt):
self.valve=txt[1:-1]
def _extract_attr(self, m):
name = m.group(0)
a = attr_re.findall(name)[0]
if a:
return a[1:-1]
class ValveCase(unittest.TestCase):
def setUp(self):
self.cmd=ValveCommand()
def testName(self):
t='name="A"'
self.cmd.load_str(t)
self.assertEqual(self.cmd.valve, 'A')
def testName2(self):
t="name='A'"
self.cmd.load_str(t)
self.assertEqual(self.cmd.valve, 'A')
def testNone(self):
self.cmd.load_str("'A'")
self.assertEqual(self.cmd.valve, 'A')
def testDescription(self):
t = "description='Foo Bar'"
self.cmd.load_str(t)
self.assertEqual(self.cmd.valve, 'A')
def testBoth(self):
t = "name='A',description='Foo'"
self.cmd.load_str(t)
self.assertEqual(self.cmd.valve, 'A')
def testSwitchedBoth(self):
t = "description='Foo Bar',name='A'"
self.cmd.load_str(t)
self.assertEqual(self.cmd.valve, 'A')
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
import eventlet
from integration.orquesta import base
from st2common.constants import action as ac_const
class InquiryWiringTest(base.TestWorkflowExecution):
def test_basic_inquiry(self):
# Launch the workflow. The workflow will paused at the pending task.
ex = self._execute_workflow("examples.orquesta-ask-basic")
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Respond to the inquiry.
ac_exs = self._wait_for_task(
ex, "get_approval", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(ac_exs[0].id, {"approved": True})
# Wait for completion.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_consecutive_inquiries(self):
# Launch the workflow. The workflow will paused at the pending task.
ex = self._execute_workflow("examples.orquesta-ask-consecutive")
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Respond to the first inquiry.
t1_ac_exs = self._wait_for_task(
ex, "get_approval", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(t1_ac_exs[0].id, {"approved": True})
# Wait for the workflow to pause again.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Respond to the second inquiry.
t2_ac_exs = self._wait_for_task(
ex, "get_confirmation", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(t2_ac_exs[0].id, {"approved": True})
# Wait for completion.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_parallel_inquiries(self):
# Launch the workflow. The workflow will paused at the pending task.
ex = self._execute_workflow("examples.orquesta-ask-parallel")
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Respond to the first inquiry.
t1_ac_exs = self._wait_for_task(
ex, "ask_jack", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(t1_ac_exs[0].id, {"approved": True})
t1_ac_exs = self._wait_for_task(
ex, "ask_jack", ac_const.LIVEACTION_STATUS_SUCCEEDED
)
# Allow some time for the first inquiry to get processed.
eventlet.sleep(2)
# Respond to the second inquiry.
t2_ac_exs = self._wait_for_task(
ex, "ask_jill", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(t2_ac_exs[0].id, {"approved": True})
t2_ac_exs = self._wait_for_task(
ex, "ask_jill", ac_const.LIVEACTION_STATUS_SUCCEEDED
)
# Wait for completion.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_nested_inquiry(self):
# Launch the workflow. The workflow will paused at the pending task.
ex = self._execute_workflow("examples.orquesta-ask-nested")
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Get the action execution of the subworkflow
ac_exs = self._wait_for_task(
ex, "get_approval", ac_const.LIVEACTION_STATUS_PAUSED
)
# Respond to the inquiry in the subworkflow.
t2_t2_ac_exs = self._wait_for_task(
ac_exs[0], "get_approval", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(t2_t2_ac_exs[0].id, {"approved": True})
# Wait for completion.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
|
from __future__ import absolute_import
from behave.tag_expression.model import Expression, Literal
from behave.tag_expression.model_ext import Matcher
import pytest
class TestExpression(object):
def test_check__can_be_used(self):
tag_expression = Literal("foo")
assert tag_expression.check(["foo"]) is True
assert tag_expression.check(["other"]) is False
class TestMatcher(object):
@pytest.mark.parametrize("expected, tag, case", [
(True, "foo.bar", "startswith_1"),
(True, "foo.bax", "startswith_2"),
(True, "foo.", "exact_match"),
(False, "something.foo.bar", "not_starts_with"),
(False, "foo_bar", "similar"),
])
def test_evaluate_with_startswith_pattern(self, expected, tag, case):
expression = Matcher("foo.*")
assert expression.evaluate([tag]) == expected
@pytest.mark.parametrize("expected, tag, case", [
(True, "bar.foo", "endswith_1"),
(True, "bax.foo", "endswith_2"),
(True, ".foo", "exact_match"),
(False, "something.foo.bar", "not_endswith"),
(False, "bar_foo", "similar"),
])
def test_evaluate_with_endswith_pattern(self, expected, tag, case):
expression = Matcher("*.foo")
assert expression.evaluate([tag]) == expected
@pytest.mark.parametrize("expected, tag, case", [
(False, "bar.foo", "startwith_1"),
(False, "foo.bax", "endswith_2"),
(True, "bar.foo.bax", "contains"),
(True, ".foo.", "exact_match"),
(False, "bar_foo.bax", "similar"),
])
def test_evaluate_with_contains_pattern(self, expected, tag, case):
expression = Matcher("*.foo.*")
assert expression.evaluate([tag]) == expected
|
from AppKit import *
from PyObjCTools.TestSupport import *
class TestNSTokenFieldHelper (NSObject):
def tokenField_completionsForSubstring_indexOfToken_indexOfSelectedItem_(self, a, b, c, d): return 1
def tokenField_shouldAddObjects_atIndex_(self, a, b, c): return 1
def tokenField_writeRepresentedObjects_toPasteboard_(self, a, b, c): return 1
def tokenField_hasMenuForRepresentedObject_(self, a, b): return 1
def tokenField_styleForRepresentedObject_(self, a, b): return 1
class TestNSTokenField (TestCase):
def testProtocols(self):
self.assertArgHasType(TestNSTokenFieldHelper.tokenField_completionsForSubstring_indexOfToken_indexOfSelectedItem_, 2, objc._C_NSInteger)
self.assertArgHasType(TestNSTokenFieldHelper.tokenField_completionsForSubstring_indexOfToken_indexOfSelectedItem_, 3, b'o^' + objc._C_NSInteger)
self.assertArgHasType(TestNSTokenFieldHelper.tokenField_shouldAddObjects_atIndex_, 2, objc._C_NSUInteger)
self.assertResultIsBOOL(TestNSTokenFieldHelper.tokenField_writeRepresentedObjects_toPasteboard_)
self.assertResultIsBOOL(TestNSTokenFieldHelper.tokenField_hasMenuForRepresentedObject_)
self.assertResultHasType(TestNSTokenFieldHelper.tokenField_styleForRepresentedObject_, objc._C_NSUInteger)
if __name__ == "__main__":
main()
|
"""SCons.Platform.posix
Platform-specific initialization for POSIX (Linux, UNIX, etc.) systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
__revision__ = "src/engine/SCons/Platform/posix.py 3897 2009/01/13 06:45:54 scons"
import errno
import os
import os.path
import string
import subprocess
import sys
import select
import SCons.Util
from SCons.Platform import TempFileMunge
exitvalmap = {
2 : 127,
13 : 126,
}
def escape(arg):
"escape shell special characters"
slash = '\\'
special = '"$()'
arg = string.replace(arg, slash, slash+slash)
for c in special:
arg = string.replace(arg, c, slash+c)
return '"' + arg + '"'
def exec_system(l, env):
stat = os.system(string.join(l))
if stat & 0xff:
return stat | 0x80
return stat >> 8
def exec_spawnvpe(l, env):
stat = os.spawnvpe(os.P_WAIT, l[0], l, env)
# os.spawnvpe() returns the actual exit code, not the encoding
# returned by os.waitpid() or os.system().
return stat
def exec_fork(l, env):
pid = os.fork()
if not pid:
# Child process.
exitval = 127
try:
os.execvpe(l[0], l, env)
except OSError, e:
exitval = exitvalmap.get(e[0], e[0])
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
else:
# Parent process.
pid, stat = os.waitpid(pid, 0)
if stat & 0xff:
return stat | 0x80
return stat >> 8
def _get_env_command(sh, escape, cmd, args, env):
s = string.join(args)
if env:
l = ['env', '-'] + \
map(lambda t, e=escape: e(t[0])+'='+e(t[1]), env.items()) + \
[sh, '-c', escape(s)]
s = string.join(l)
return s
def env_spawn(sh, escape, cmd, args, env):
return exec_system([_get_env_command( sh, escape, cmd, args, env)], env)
def spawnvpe_spawn(sh, escape, cmd, args, env):
return exec_spawnvpe([sh, '-c', string.join(args)], env)
def fork_spawn(sh, escape, cmd, args, env):
return exec_fork([sh, '-c', string.join(args)], env)
def process_cmd_output(cmd_stdout, cmd_stderr, stdout, stderr):
stdout_eof = stderr_eof = 0
while not (stdout_eof and stderr_eof):
try:
(i,o,e) = select.select([cmd_stdout, cmd_stderr], [], [])
if cmd_stdout in i:
str = cmd_stdout.read()
if len(str) == 0:
stdout_eof = 1
elif stdout != None:
stdout.write(str)
if cmd_stderr in i:
str = cmd_stderr.read()
if len(str) == 0:
#sys.__stderr__.write( "stderr_eof=1\n" )
stderr_eof = 1
else:
#sys.__stderr__.write( "str(stderr) = %s\n" % str )
stderr.write(str)
except select.error, (_errno, _strerror):
if _errno != errno.EINTR:
raise
def exec_popen3(l, env, stdout, stderr):
proc = subprocess.Popen(string.join(l),
stdout=stdout,
stderr=stderr,
shell=True)
stat = proc.wait()
if stat & 0xff:
return stat | 0x80
return stat >> 8
def exec_piped_fork(l, env, stdout, stderr):
# spawn using fork / exec and providing a pipe for the command's
# stdout / stderr stream
if stdout != stderr:
(rFdOut, wFdOut) = os.pipe()
(rFdErr, wFdErr) = os.pipe()
else:
(rFdOut, wFdOut) = os.pipe()
rFdErr = rFdOut
wFdErr = wFdOut
# do the fork
pid = os.fork()
if not pid:
# Child process
os.close( rFdOut )
if rFdOut != rFdErr:
os.close( rFdErr )
os.dup2( wFdOut, 1 ) # is there some symbolic way to do that ?
os.dup2( wFdErr, 2 )
os.close( wFdOut )
if stdout != stderr:
os.close( wFdErr )
exitval = 127
try:
os.execvpe(l[0], l, env)
except OSError, e:
exitval = exitvalmap.get(e[0], e[0])
stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
else:
# Parent process
pid, stat = os.waitpid(pid, 0)
os.close( wFdOut )
if stdout != stderr:
os.close( wFdErr )
childOut = os.fdopen( rFdOut )
if stdout != stderr:
childErr = os.fdopen( rFdErr )
else:
childErr = childOut
process_cmd_output(childOut, childErr, stdout, stderr)
os.close( rFdOut )
if stdout != stderr:
os.close( rFdErr )
if stat & 0xff:
return stat | 0x80
return stat >> 8
def piped_env_spawn(sh, escape, cmd, args, env, stdout, stderr):
# spawn using Popen3 combined with the env command
# the command name and the command's stdout is written to stdout
# the command's stderr is written to stderr
return exec_popen3([_get_env_command(sh, escape, cmd, args, env)],
env, stdout, stderr)
def piped_fork_spawn(sh, escape, cmd, args, env, stdout, stderr):
# spawn using fork / exec and providing a pipe for the command's
# stdout / stderr stream
return exec_piped_fork([sh, '-c', string.join(args)],
env, stdout, stderr)
def generate(env):
# If os.spawnvpe() exists, we use it to spawn commands. Otherwise
# if the env utility exists, we use os.system() to spawn commands,
# finally we fall back on os.fork()/os.exec().
#
# os.spawnvpe() is prefered because it is the most efficient. But
# for Python versions without it, os.system() is prefered because it
# is claimed that it works better with threads (i.e. -j) and is more
# efficient than forking Python.
#
# NB: Other people on the scons-users mailing list have claimed that
# os.fork()/os.exec() works better than os.system(). There may just
# not be a default that works best for all users.
if os.__dict__.has_key('spawnvpe'):
spawn = spawnvpe_spawn
elif env.Detect('env'):
spawn = env_spawn
else:
spawn = fork_spawn
if env.Detect('env'):
pspawn = piped_env_spawn
else:
pspawn = piped_fork_spawn
if not env.has_key('ENV'):
env['ENV'] = {}
env['ENV']['PATH'] = '/usr/local/bin:/opt/bin:/bin:/usr/bin'
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.o'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = ''
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
env['SHLIBPREFIX'] = '$LIBPREFIX'
env['SHLIBSUFFIX'] = '.so'
env['LIBPREFIXES'] = [ '$LIBPREFIX' ]
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['PSPAWN'] = pspawn
env['SPAWN'] = spawn
env['SHELL'] = 'sh'
env['ESCAPE'] = escape
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
#Based on LINUX: ARG_MAX=ARG_MAX=131072 - 3000 for environment expansion
#Note: specific platforms might rise or lower this value
env['MAXLINELENGTH'] = 128072
# This platform supports RPATH specifications.
env['__RPATH'] = '$_RPATH'
|
import pytest
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy_utils import generic_relationship
from . import GenericRelationshipTestCase
@pytest.fixture
def Building(Base):
class Building(Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
return Building
@pytest.fixture
def User(Base):
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
return User
@pytest.fixture
def EventBase(Base):
class EventBase(Base):
__abstract__ = True
object_type = sa.Column(sa.Unicode(255))
object_id = sa.Column(sa.Integer, nullable=False)
@declared_attr
def object(cls):
return generic_relationship('object_type', 'object_id')
return EventBase
@pytest.fixture
def Event(EventBase):
class Event(EventBase):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
return Event
@pytest.fixture
def init_models(Building, User, Event):
pass
class TestGenericRelationshipWithAbstractBase(GenericRelationshipTestCase):
pass
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from social_auth.models import UserSocialAuth
from sentry.testutils import APITestCase
class UserSocialIdentityDetailsEndpointTest(APITestCase):
def setUp(self):
self.login_as(self.user)
def test_can_disconnect(self):
auth = UserSocialAuth.create_social_auth(self.user, "1234", "github")
url = reverse(
"sentry-api-0-user-social-identity-details",
kwargs={"user_id": self.user.id, "identity_id": auth.id},
)
with self.settings(GITHUB_APP_ID="app-id", GITHUB_API_SECRET="secret"):
response = self.client.delete(url)
assert response.status_code == 204
assert not len(UserSocialAuth.objects.filter(user=self.user))
def test_disconnect_id_not_found(self):
url = reverse(
"sentry-api-0-user-social-identity-details",
kwargs={"user_id": self.user.id, "identity_id": 999},
)
with self.settings(GITHUB_APP_ID="app-id", GITHUB_API_SECRET="secret"):
response = self.client.delete(url)
assert response.status_code == 404
assert not len(UserSocialAuth.objects.filter(user=self.user))
|
"""Declarative container provider override example."""
import sqlite3
from unittest import mock
from dependency_injector import containers, providers
class Container(containers.DeclarativeContainer):
database = providers.Singleton(sqlite3.connect, ":memory:")
if __name__ == "__main__":
container = Container(database=mock.Mock(sqlite3.Connection))
database = container.database()
assert isinstance(database, mock.Mock)
|
"""A test to verify an implementation of the Face layer of RPC Framework."""
import abc
import unittest
from grpc.framework.face import interfaces
from grpc.framework.face.testing import callback as testing_callback
from grpc.framework.face.testing import control
from grpc.framework.face.testing import coverage
from grpc.framework.face.testing import digest
from grpc.framework.face.testing import stock_service
from grpc.framework.face.testing import test_case
_TIMEOUT = 3
class EventInvocationSynchronousEventServiceTestCase(
test_case.FaceTestCase, coverage.FullCoverage):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must also extend unittest.TestCase.
"""
__metaclass__ = abc.ABCMeta
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self.control = control.PauseFailControl()
self.digest = digest.digest(
stock_service.STOCK_TEST_SERVICE, self.control, None)
self.server, self.stub, self.memo = self.set_up_implementation(
self.digest.name, self.digest.methods,
{}, {}, {}, {},
self.digest.event_unary_unary_methods,
self.digest.event_unary_stream_methods,
self.digest.event_stream_unary_methods,
self.digest.event_stream_stream_methods,
None)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self.tear_down_implementation(self.memo)
def testSuccessfulUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
self.stub.event_value_in_value_out(
name, request, callback.complete, callback.abort, _TIMEOUT)
callback.block_until_terminated()
response = callback.response()
test_messages.verify(request, response, self)
def testSuccessfulUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
self.stub.event_value_in_stream_out(
name, request, callback, callback.abort, _TIMEOUT)
callback.block_until_terminated()
responses = callback.responses()
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
unused_call, request_consumer = self.stub.event_stream_in_value_out(
name, callback.complete, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
request_consumer.terminate()
callback.block_until_terminated()
response = callback.response()
test_messages.verify(requests, response, self)
def testSuccessfulStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
unused_call, request_consumer = self.stub.event_stream_in_stream_out(
name, callback, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
request_consumer.terminate()
callback.block_until_terminated()
responses = callback.responses()
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
# pylint: disable=cell-var-from-loop
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_callback = testing_callback.Callback()
second_callback = testing_callback.Callback()
def make_second_invocation(first_response):
first_callback.complete(first_response)
self.stub.event_value_in_value_out(
name, second_request, second_callback.complete,
second_callback.abort, _TIMEOUT)
self.stub.event_value_in_value_out(
name, first_request, make_second_invocation, first_callback.abort,
_TIMEOUT)
second_callback.block_until_terminated()
first_response = first_callback.response()
second_response = second_callback.response()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
def testExpiredUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
with self.control.pause():
self.stub.event_value_in_value_out(
name, request, callback.complete, callback.abort, _TIMEOUT)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
def testExpiredUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
with self.control.pause():
self.stub.event_value_in_stream_out(
name, request, callback, callback.abort, _TIMEOUT)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
def testExpiredStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for unused_test_messages in test_messages_sequence:
callback = testing_callback.Callback()
self.stub.event_stream_in_value_out(
name, callback.complete, callback.abort, _TIMEOUT)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
def testExpiredStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
unused_call, request_consumer = self.stub.event_stream_in_stream_out(
name, callback, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
def testFailedUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
with self.control.fail():
self.stub.event_value_in_value_out(
name, request, callback.complete, callback.abort, _TIMEOUT)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
def testFailedUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
with self.control.fail():
self.stub.event_value_in_stream_out(
name, request, callback, callback.abort, _TIMEOUT)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
def testFailedStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
with self.control.fail():
unused_call, request_consumer = self.stub.event_stream_in_value_out(
name, callback.complete, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
request_consumer.terminate()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
def testFailedStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
with self.control.fail():
unused_call, request_consumer = self.stub.event_stream_in_stream_out(
name, callback, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
request_consumer.terminate()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
def testParallelInvocations(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
first_callback = testing_callback.Callback()
second_request = test_messages.request()
second_callback = testing_callback.Callback()
self.stub.event_value_in_value_out(
name, first_request, first_callback.complete, first_callback.abort,
_TIMEOUT)
self.stub.event_value_in_value_out(
name, second_request, second_callback.complete,
second_callback.abort, _TIMEOUT)
first_callback.block_until_terminated()
second_callback.block_until_terminated()
first_response = first_callback.response()
second_response = second_callback.response()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
@unittest.skip('TODO(nathaniel): implement.')
def testWaitingForSomeButNotAllParallelInvocations(self):
raise NotImplementedError()
def testCancelledUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
with self.control.pause():
call = self.stub.event_value_in_value_out(
name, request, callback.complete, callback.abort, _TIMEOUT)
call.cancel()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
def testCancelledUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
call = self.stub.event_value_in_stream_out(
name, request, callback, callback.abort, _TIMEOUT)
call.cancel()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
def testCancelledStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
call, request_consumer = self.stub.event_stream_in_value_out(
name, callback.complete, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
call.cancel()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
def testCancelledStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for unused_test_messages in test_messages_sequence:
callback = testing_callback.Callback()
call, unused_request_consumer = self.stub.event_stream_in_stream_out(
name, callback, callback.abort, _TIMEOUT)
call.cancel()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
|
import urllib
import urllib2
import pprint
from utils import transform_datetime
from utils import flatten
from warnings import warn
from django.utils import simplejson
_debug = 1
class ChimpyException(Exception):
pass
class ChimpyWarning(Warning):
pass
class Connection(object):
"""mailchimp api connection"""
output = "json"
version = '1.2'
def __init__(self, apikey=None, secure=False):
self._apikey = apikey
proto = 'http'
if secure:
proto = 'https'
api_host = 'api.mailchimp.com'
if '-' in apikey:
key, dc = apikey.split('-')
else:
dc = 'us1'
api_host = dc + '.' + api_host
self.url = '%s://%s/%s/' % (proto, api_host, self.version)
self.opener = urllib2.build_opener()
self.opener.addheaders = [('Content-Type', 'application/x-www-form-urlencoded')]
def _rpc(self, method, **params):
"""make an rpc call to the server"""
params = urllib.urlencode(params, doseq=True)
if _debug > 1:
print __name__, "making request with parameters"
pprint.pprint(params)
print __name__, "encoded parameters:", params
response = self.opener.open("%s?method=%s" %(self.url, method), params)
data = response.read()
response.close()
if _debug > 1:
print __name__, "rpc call received", data
result = simplejson.loads(data)
try:
if 'error' in result:
raise ChimpyException("%s:\n%s" % (result['error'], params))
except TypeError:
# thrown when results is not iterable (eg bool)
pass
return result
def _api_call(self, method, **params):
"""make an api call"""
# flatten dict variables
params = dict([(str(k), v.encode('utf-8') if isinstance(v, unicode) else v) for k,v in flatten(params).items()])
params['output'] = self.output
params['apikey'] = self._apikey
return self._rpc(method=method, **params)
def ping(self):
return self._api_call(method='ping')
def lists(self):
return self._api_call(method='lists')
def list_batch_subscribe(self,
id,
batch,
double_optin=True,
update_existing=False,
replace_interests=False):
return self._api_call(method='listBatchSubscribe',
id=id,
batch=batch,
double_optin=double_optin,
update_existing=update_existing,
replace_interests=replace_interests)
def list_batch_unsubscribe(self,
id,
emails,
delete_member=False,
send_goodbye=True,
send_notify=False):
return self._api_call(method='listBatchUnsubscribe',
id=id,
emails=emails,
delete_member=delete_member,
send_goodbye=send_goodbye,
send_notify=send_notify)
def list_subscribe(self,
id,
email_address,
merge_vars,
email_type='text',
double_optin=True):
return self._api_call(method='listSubscribe',
id=id,
email_address=email_address,
merge_vars=merge_vars,
email_type=email_type,
double_optin=double_optin)
def list_unsubscribe(self,
id,
email_address,
delete_member=False,
send_goodbye=True,
send_notify=True):
return self._api_call(method='listUnsubscribe',
id=id,
email_address=email_address,
delete_member=delete_member,
send_goodbye=send_goodbye,
send_notify=send_notify)
def list_update_member(self,
id,
email_address,
merge_vars,
email_type='',
replace_interests=True):
return self._api_call(method='listUpdateMember',
id=id,
email_address=email_address,
merge_vars=merge_vars,
email_type=email_type,
replace_interests=replace_interests)
def list_member_info(self, id, email_address):
return self._api_call(method='listMemberInfo',
id=id,
email_address=email_address)
def list_members(self, id, status='subscribed', since=None, start=0, limit=100):
return self._api_call(method='listMembers', id=id, status=status, since=since, start=start, limit=limit)
def list_interest_groups(self, id):
return self._api_call(method='listInterestGroups', id=id)
def list_interest_group_add(self, id, name):
return self._api_call(method='listInterestGroupAdd', id=id, group_name=name)
def list_interest_group_del(self, id, name):
return self._api_call(method='listInterestGroupDel', id=id, group_name=name)
def list_merge_vars(self, id):
return self._api_call(method='listMergeVars', id=id)
def list_merge_var_add(self, id, tag, name, req=False):
return self._api_call(method='listMergeVarAdd', id=id, tag=tag, name=name, req=req)
def list_merge_var_del(self, id, tag):
return self._api_call(method='listMergeVarDel', id=id, tag=tag)
def list_webhooks(self, id):
return self._api_call(method='listWebhooks', id=id)
# public static listWebhookAdd(string apikey, string id, string url, array actions, array sources)
def list_webhook_add(self, id, url, actions, sources):
return self._api_call(method='listWebhookAdd', id=id, url=url, actions=actions, sources=sources)
def list_webhook_del(self, id, url):
return self._api_call(method='listWebhookDel', id=id, url=url)
def campaign_content(self, cid):
"""Get the content (both html and text) for a campaign, exactly as it would appear in the campaign archive
http://www.mailchimp.com/api/1.1/campaigncontent.func.php
"""
return self._api_call(method='campaignContent', cid=cid)
def campaign_create(self, campaign_type, options, content, **kwargs):
"""Create a new draft campaign to send.
http://www.mailchimp.com/api/1.1/campaigncreate.func.php
Optional parameters: segment_opts, type_opts
"""
# enforce the 100 char limit (urlencoded!!!)
title = options.get('title', options['subject'])
if isinstance(title, unicode):
title = title.encode('utf-8')
titlelen = len(urllib.quote_plus(title))
if titlelen > 99:
title = title[:-(titlelen - 96)] + '...'
warn("cropped campaign title to fit the 100 character limit, new title: '%s'" % title, ChimpyWarning)
subject = options['subject']
if isinstance(subject, unicode):
subject = subject.encode('utf-8')
subjlen = len(urllib.quote_plus(subject))
if subjlen > 99:
subject = subject[:-(subjlen - 96)] + '...'
warn("cropped campaign subject to fit the 100 character limit, new subject: '%s'" % subject, ChimpyWarning)
options['title'] = title
options['subject'] = subject
return self._api_call(method='campaignCreate', type=campaign_type, options=options, content=content, **kwargs)
def campaign_delete(self, cid):
"""Delete a campaign.
http://www.mailchimp.com/api/1.1/campaigndelete.func.php
"""
return self._api_call(method='campaignDelete', cid=cid)
def campaign_folders(self):
"""List all the folders for a user account.
http://www.mailchimp.com/api/1.1/campaignfolders.func.php
"""
return self._api_call(method='campaignFolders')
def campaign_pause(self, cid):
"""Pause a RSS campaign from sending.
http://www.mailchimp.com/api/1.1/campaignpause.func.php
"""
return self._api_call(method='campaignPause', cid=cid)
def campaign_replicate(self, cid):
"""Replicate a campaign.
http://www.mailchimp.com/api/1.1/campaignreplicate.func.php
"""
return self._api_call(method='campaignReplicate', cid=cid)
def campaign_resume(self, cid):
"""Resume sending a RSS campaign.
http://www.mailchimp.com/api/1.1/campaignresume.func.php
"""
return self._api_call(method='campaignResume', cid=cid)
def campaign_schedule(self, cid, schedule_time, schedule_time_b=None):
"""Schedule a campaign to be sent in the future.
http://www.mailchimp.com/api/1.1/campaignschedule.func.php
"""
schedule_time = transform_datetime(schedule_time)
if schedule_time_b:
schedule_time_b = transform_datetime(schedule_time_b)
return self._api_call(method='campaignSchedule', cid=cid, schedule_time=schedule_time, schedule_time_b=schedule_time_b)
def campaign_send_now(self, cid):
"""Send a given campaign immediately.
http://www.mailchimp.com/api/1.1/campaignsendnow.func.php
"""
return self._api_call(method='campaignSendNow', cid=cid)
def campaign_send_test(self, cid, test_emails, **kwargs):
"""Send a test of this campaign to the provided email address.
Optional parameter: send_type
http://www.mailchimp.com/api/1.1/campaignsendtest.func.php
"""
if isinstance(test_emails, str):
test_emails = [test_emails]
return self._api_call(method='campaignSendTest', cid=cid, test_emails=test_emails, **kwargs)
def campaign_templates(self):
""" Retrieve all templates defined for your user account """
return self._api_call(method='campaignTemplates')
def campaign_unschedule(self, cid):
"""Unschedule a campaign that is scheduled to be sent in the future """
return self._api_call(method='campaignUnschedule', cid=cid)
def campaign_update(self, cid, name, value):
"""Update just about any setting for a campaign that has not been sent.
http://www.mailchimp.com/api/1.1/campaignupdate.func.php
"""
return self._api_call(method='campaignUpdate', cid=cid, name=name, value=value)
def campaigns(self, filter_id='', filter_folder=None, filter_fromname='', filter_fromemail='',
filter_title='', filter_subject='', filter_sendtimestart=None, filter_sendtimeend=None,
filter_exact=False, start=0, limit=50):
"""Get the list of campaigns and their details matching the specified filters.
Timestamps should be passed as datatime objects.
http://www.mailchimp.com/api/1.1/campaigns.func.php
"""
filter_sendtimestart = transform_datetime(filter_sendtimestart)
filter_sendtimeend = transform_datetime(filter_sendtimeend)
return self._api_call(method='campaigns',
filter_id=filter_id, filter_folder=filter_folder, filter_fromname=filter_fromname,
filter_fromemail=filter_fromemail, filter_title=filter_title, filter_subject=filter_subject,
filter_sendtimestart=filter_sendtimestart, filter_sendtimeend=filter_sendtimeend,
filter_exact=filter_exact, start=start, limit=limit)
def campaign_segment_test(self, list_id, options):
return self._api_call(method='campaignSegmentTest', list_id=list_id, options=options)
|
import unittest
from django.conf import settings
from django.core.checks import Error
from django.core.checks.model_checks import _check_lazy_references
from django.core.exceptions import ImproperlyConfigured
from django.db import connections, models
from django.db.models.signals import post_init
from django.test import SimpleTestCase
from django.test.utils import isolate_apps, override_settings
def get_max_column_name_length():
allowed_len = None
db_alias = None
for db in settings.DATABASES.keys():
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
return (allowed_len, db_alias)
@isolate_apps('invalid_models_tests')
class IndexTogetherTests(SimpleTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = 42
errors = Model.check()
expected = [
Error(
"'index_together' must be a list or tuple.",
obj=Model,
id='models.E008',
),
]
self.assertEqual(errors, expected)
def test_non_list(self):
class Model(models.Model):
class Meta:
index_together = 'not-a-list'
errors = Model.check()
expected = [
Error(
"'index_together' must be a list or tuple.",
obj=Model,
id='models.E008',
),
]
self.assertEqual(errors, expected)
def test_list_containing_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = [('a', 'b'), 42]
errors = Model.check()
expected = [
Error(
"All 'index_together' elements must be lists or tuples.",
obj=Model,
id='models.E009',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
index_together = [
["missing_field"],
]
errors = Model.check()
expected = [
Error(
"'index_together' refers to the non-existent field 'missing_field'.",
obj=Model,
id='models.E012',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_non_local_field(self):
class Foo(models.Model):
field1 = models.IntegerField()
class Bar(Foo):
field2 = models.IntegerField()
class Meta:
index_together = [
["field2", "field1"],
]
errors = Bar.check()
expected = [
Error(
"'index_together' refers to field 'field1' which is not "
"local to model 'Bar'.",
hint=("This issue may be caused by multi-table inheritance."),
obj=Bar,
id='models.E016',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_m2m_field(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
index_together = [
["m2m"],
]
errors = Model.check()
expected = [
Error(
"'index_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'index_together'.",
obj=Model,
id='models.E013',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class UniqueTogetherTests(SimpleTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
unique_together = 42
errors = Model.check()
expected = [
Error(
"'unique_together' must be a list or tuple.",
obj=Model,
id='models.E010',
),
]
self.assertEqual(errors, expected)
def test_list_containing_non_iterable(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
unique_together = [('a', 'b'), 42]
errors = Model.check()
expected = [
Error(
"All 'unique_together' elements must be lists or tuples.",
obj=Model,
id='models.E011',
),
]
self.assertEqual(errors, expected)
def test_non_list(self):
class Model(models.Model):
class Meta:
unique_together = 'not-a-list'
errors = Model.check()
expected = [
Error(
"'unique_together' must be a list or tuple.",
obj=Model,
id='models.E010',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
# unique_together can be a simple tuple
unique_together = ('one', 'two')
errors = Model.check()
self.assertEqual(errors, [])
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
unique_together = [
["missing_field"],
]
errors = Model.check()
expected = [
Error(
"'unique_together' refers to the non-existent field 'missing_field'.",
obj=Model,
id='models.E012',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_m2m(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
unique_together = [
["m2m"],
]
errors = Model.check()
expected = [
Error(
"'unique_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'unique_together'.",
obj=Model,
id='models.E013',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class FieldNamesTests(SimpleTestCase):
def test_ending_with_underscore(self):
class Model(models.Model):
field_ = models.CharField(max_length=10)
m2m_ = models.ManyToManyField('self')
errors = Model.check()
expected = [
Error(
'Field names must not end with an underscore.',
obj=Model._meta.get_field('field_'),
id='fields.E001',
),
Error(
'Field names must not end with an underscore.',
obj=Model._meta.get_field('m2m_'),
id='fields.E001',
),
]
self.assertEqual(errors, expected)
max_column_name_length, column_limit_db_alias = get_max_column_name_length()
@unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.")
def test_M2M_long_column_name(self):
"""
#13711 -- Model check for long M2M column names when database has
column name length limits.
"""
allowed_len, db_alias = get_max_column_name_length()
# A model with very long name which will be used to set relations to.
class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(models.Model):
title = models.CharField(max_length=11)
# Main model for which checks will be performed.
class ModelWithLongField(models.Model):
m2m_field = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn1"
)
m2m_field2 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn2", through='m2msimple'
)
m2m_field3 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn3",
through='m2mcomplex'
)
fk = models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
related_name="rn4",
)
# Models used for setting `through` in M2M field.
class m2msimple(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
class m2mcomplex(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
long_field_name = 'a' * (self.max_column_name_length + 1)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
).contribute_to_class(m2msimple, long_field_name)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
db_column=long_field_name
).contribute_to_class(m2mcomplex, long_field_name)
errors = ModelWithLongField.check()
# First error because of M2M field set on the model with long name.
m2m_long_name = "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id"
if self.max_column_name_length > len(m2m_long_name):
# Some databases support names longer than the test name.
expected = []
else:
expected = [
Error(
'Autogenerated column name too long for M2M field "%s". '
'Maximum length is "%s" for database "%s".'
% (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'.",
obj=ModelWithLongField,
id='models.E019',
)
]
# Second error because the FK specified in the `through` model
# `m2msimple` has auto-generated name longer than allowed.
# There will be no check errors in the other M2M because it
# specifies db_column for the FK in `through` model even if the actual
# name is longer than the limits of the database.
expected.append(
Error(
'Autogenerated column name too long for M2M field "%s_id". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'.",
obj=ModelWithLongField,
id='models.E019',
)
)
self.assertEqual(errors, expected)
@unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.")
def test_local_field_long_column_name(self):
"""
#13711 -- Model check for long column names
when database does not support long names.
"""
allowed_len, db_alias = get_max_column_name_length()
class ModelWithLongField(models.Model):
title = models.CharField(max_length=11)
long_field_name = 'a' * (self.max_column_name_length + 1)
long_field_name2 = 'b' * (self.max_column_name_length + 1)
models.CharField(max_length=11).contribute_to_class(ModelWithLongField, long_field_name)
models.CharField(max_length=11, db_column='vlmn').contribute_to_class(ModelWithLongField, long_field_name2)
errors = ModelWithLongField.check()
# Error because of the field with long name added to the model
# without specifying db_column
expected = [
Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Set the column name manually using 'db_column'.",
obj=ModelWithLongField,
id='models.E018',
)
]
self.assertEqual(errors, expected)
def test_including_separator(self):
class Model(models.Model):
some__field = models.IntegerField()
errors = Model.check()
expected = [
Error(
'Field names must not contain "__".',
obj=Model._meta.get_field('some__field'),
id='fields.E002',
)
]
self.assertEqual(errors, expected)
def test_pk(self):
class Model(models.Model):
pk = models.IntegerField()
errors = Model.check()
expected = [
Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=Model._meta.get_field('pk'),
id='fields.E003',
)
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ShadowingFieldsTests(SimpleTestCase):
def test_field_name_clash_with_child_accessor(self):
class Parent(models.Model):
pass
class Child(Parent):
child = models.CharField(max_length=100)
errors = Child.check()
expected = [
Error(
"The field 'child' clashes with the field "
"'child' from model 'invalid_models_tests.parent'.",
obj=Child._meta.get_field('child'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_multiinheritance_clash(self):
class Mother(models.Model):
clash = models.IntegerField()
class Father(models.Model):
clash = models.IntegerField()
class Child(Mother, Father):
# Here we have two clashed: id (automatic field) and clash, because
# both parents define these fields.
pass
errors = Child.check()
expected = [
Error(
"The field 'id' from parent model "
"'invalid_models_tests.mother' clashes with the field 'id' "
"from parent model 'invalid_models_tests.father'.",
obj=Child,
id='models.E005',
),
Error(
"The field 'clash' from parent model "
"'invalid_models_tests.mother' clashes with the field 'clash' "
"from parent model 'invalid_models_tests.father'.",
obj=Child,
id='models.E005',
)
]
self.assertEqual(errors, expected)
def test_inheritance_clash(self):
class Parent(models.Model):
f_id = models.IntegerField()
class Target(models.Model):
# This field doesn't result in a clash.
f_id = models.IntegerField()
class Child(Parent):
# This field clashes with parent "f_id" field.
f = models.ForeignKey(Target, models.CASCADE)
errors = Child.check()
expected = [
Error(
"The field 'f' clashes with the field 'f_id' "
"from model 'invalid_models_tests.parent'.",
obj=Child._meta.get_field('f'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_multigeneration_inheritance(self):
class GrandParent(models.Model):
clash = models.IntegerField()
class Parent(GrandParent):
pass
class Child(Parent):
pass
class GrandChild(Child):
clash = models.IntegerField()
errors = GrandChild.check()
expected = [
Error(
"The field 'clash' clashes with the field 'clash' "
"from model 'invalid_models_tests.grandparent'.",
obj=GrandChild._meta.get_field('clash'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_id_clash(self):
class Target(models.Model):
pass
class Model(models.Model):
fk = models.ForeignKey(Target, models.CASCADE)
fk_id = models.IntegerField()
errors = Model.check()
expected = [
Error(
"The field 'fk_id' clashes with the field 'fk' from model "
"'invalid_models_tests.model'.",
obj=Model._meta.get_field('fk_id'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class OtherModelTests(SimpleTestCase):
def test_unique_primary_key(self):
invalid_id = models.IntegerField(primary_key=False)
class Model(models.Model):
id = invalid_id
errors = Model.check()
expected = [
Error(
"'id' can only be used as a field name if the field also sets "
"'primary_key=True'.",
obj=Model,
id='models.E004',
),
]
self.assertEqual(errors, expected)
def test_ordering_non_iterable(self):
class Model(models.Model):
class Meta:
ordering = "missing_field"
errors = Model.check()
expected = [
Error(
"'ordering' must be a tuple or list "
"(even if you want to order by only one field).",
obj=Model,
id='models.E014',
),
]
self.assertEqual(errors, expected)
def test_just_ordering_no_errors(self):
class Model(models.Model):
order = models.PositiveIntegerField()
class Meta:
ordering = ['order']
self.assertEqual(Model.check(), [])
def test_just_order_with_respect_to_no_errors(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
class Meta:
order_with_respect_to = 'question'
self.assertEqual(Answer.check(), [])
def test_ordering_with_order_with_respect_to(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
order = models.IntegerField()
class Meta:
order_with_respect_to = 'question'
ordering = ['order']
errors = Answer.check()
expected = [
Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=Answer,
id='models.E021',
),
]
self.assertEqual(errors, expected)
def test_non_valid(self):
class RelationModel(models.Model):
pass
class Model(models.Model):
relation = models.ManyToManyField(RelationModel)
class Meta:
ordering = ['relation']
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'relation'.",
obj=Model,
id='models.E015',
),
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
ordering = ("missing_field",)
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'missing_field'.",
obj=Model,
id='models.E015',
)
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_missing_foreignkey_field(self):
# refs #22711
class Model(models.Model):
missing_fk_field = models.IntegerField()
class Meta:
ordering = ("missing_fk_field_id",)
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'missing_fk_field_id'.",
obj=Model,
id='models.E015',
)
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_existing_foreignkey_field(self):
# refs #22711
class Parent(models.Model):
pass
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE)
class Meta:
ordering = ("parent_id",)
self.assertFalse(Child.check())
def test_name_beginning_with_underscore(self):
class _Model(models.Model):
pass
self.assertEqual(_Model.check(), [
Error(
"The model name '_Model' cannot start or end with an underscore "
"as it collides with the query lookup syntax.",
obj=_Model,
id='models.E023',
)
])
def test_name_ending_with_underscore(self):
class Model_(models.Model):
pass
self.assertEqual(Model_.check(), [
Error(
"The model name 'Model_' cannot start or end with an underscore "
"as it collides with the query lookup syntax.",
obj=Model_,
id='models.E023',
)
])
def test_name_contains_double_underscores(self):
class Test__Model(models.Model):
pass
self.assertEqual(Test__Model.check(), [
Error(
"The model name 'Test__Model' cannot contain double underscores "
"as it collides with the query lookup syntax.",
obj=Test__Model,
id='models.E024',
)
])
@override_settings(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model')
def test_swappable_missing_app_name(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE'
errors = Model.check()
expected = [
Error(
"'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form 'app_label.app_name'.",
id='models.E001',
),
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target')
def test_swappable_missing_app(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL'
errors = Model.check()
expected = [
Error(
"'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', "
'which has not been installed, or is abstract.',
id='models.E002',
),
]
self.assertEqual(errors, expected)
def test_two_m2m_through_same_relationship(self):
class Person(models.Model):
pass
class Group(models.Model):
primary = models.ManyToManyField(Person, through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership", related_name="secondary")
class Membership(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
errors = Group.check()
expected = [
Error(
"The model has two many-to-many relations through "
"the intermediate model 'invalid_models_tests.Membership'.",
obj=Group,
id='models.E003',
)
]
self.assertEqual(errors, expected)
def test_missing_parent_link(self):
msg = 'Add parent_link=True to invalid_models_tests.ParkingLot.parent.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
class Place(models.Model):
pass
class ParkingLot(Place):
parent = models.OneToOneField(Place, models.CASCADE)
def test_m2m_table_name_clash(self):
class Foo(models.Model):
bar = models.ManyToManyField('Bar', db_table='myapp_bar')
class Meta:
db_table = 'myapp_foo'
class Bar(models.Model):
class Meta:
db_table = 'myapp_bar'
self.assertEqual(Foo.check(), [
Error(
"The field's intermediary table 'myapp_bar' clashes with the "
"table name of 'invalid_models_tests.Bar'.",
obj=Foo._meta.get_field('bar'),
id='fields.E340',
)
])
def test_m2m_field_table_name_clash(self):
class Foo(models.Model):
pass
class Bar(models.Model):
foos = models.ManyToManyField(Foo, db_table='clash')
class Baz(models.Model):
foos = models.ManyToManyField(Foo, db_table='clash')
self.assertEqual(Bar.check() + Baz.check(), [
Error(
"The field's intermediary table 'clash' clashes with the "
"table name of 'invalid_models_tests.Baz.foos'.",
obj=Bar._meta.get_field('foos'),
id='fields.E340',
),
Error(
"The field's intermediary table 'clash' clashes with the "
"table name of 'invalid_models_tests.Bar.foos'.",
obj=Baz._meta.get_field('foos'),
id='fields.E340',
)
])
def test_m2m_autogenerated_table_name_clash(self):
class Foo(models.Model):
class Meta:
db_table = 'bar_foos'
class Bar(models.Model):
# The autogenerated `db_table` will be bar_foos.
foos = models.ManyToManyField(Foo)
class Meta:
db_table = 'bar'
self.assertEqual(Bar.check(), [
Error(
"The field's intermediary table 'bar_foos' clashes with the "
"table name of 'invalid_models_tests.Foo'.",
obj=Bar._meta.get_field('foos'),
id='fields.E340',
)
])
def test_m2m_unmanaged_shadow_models_not_checked(self):
class A1(models.Model):
pass
class C1(models.Model):
mm_a = models.ManyToManyField(A1, db_table='d1')
# Unmanaged models that shadow the above models. Reused table names
# shouldn't be flagged by any checks.
class A2(models.Model):
class Meta:
managed = False
class C2(models.Model):
mm_a = models.ManyToManyField(A2, through='Intermediate')
class Meta:
managed = False
class Intermediate(models.Model):
a2 = models.ForeignKey(A2, models.CASCADE, db_column='a1_id')
c2 = models.ForeignKey(C2, models.CASCADE, db_column='c1_id')
class Meta:
db_table = 'd1'
managed = False
self.assertEqual(C1.check(), [])
self.assertEqual(C2.check(), [])
def test_m2m_to_concrete_and_proxy_allowed(self):
class A(models.Model):
pass
class Through(models.Model):
a = models.ForeignKey('A', models.CASCADE)
c = models.ForeignKey('C', models.CASCADE)
class ThroughProxy(Through):
class Meta:
proxy = True
class C(models.Model):
mm_a = models.ManyToManyField(A, through=Through)
mm_aproxy = models.ManyToManyField(A, through=ThroughProxy, related_name='proxied_m2m')
self.assertEqual(C.check(), [])
@isolate_apps('django.contrib.auth', kwarg_name='apps')
def test_lazy_reference_checks(self, apps):
class DummyModel(models.Model):
author = models.ForeignKey('Author', models.CASCADE)
class Meta:
app_label = 'invalid_models_tests'
class DummyClass:
def __call__(self, **kwargs):
pass
def dummy_method(self):
pass
def dummy_function(*args, **kwargs):
pass
apps.lazy_model_operation(dummy_function, ('auth', 'imaginarymodel'))
apps.lazy_model_operation(dummy_function, ('fanciful_app', 'imaginarymodel'))
post_init.connect(dummy_function, sender='missing-app.Model', apps=apps)
post_init.connect(DummyClass(), sender='missing-app.Model', apps=apps)
post_init.connect(DummyClass().dummy_method, sender='missing-app.Model', apps=apps)
expected = [
Error(
"%r contains a lazy reference to auth.imaginarymodel, "
"but app 'auth' doesn't provide model 'imaginarymodel'." % dummy_function,
obj=dummy_function,
id='models.E022',
),
Error(
"%r contains a lazy reference to fanciful_app.imaginarymodel, "
"but app 'fanciful_app' isn't installed." % dummy_function,
obj=dummy_function,
id='models.E022',
),
Error(
"An instance of class 'DummyClass' was connected to "
"the 'post_init' signal with a lazy reference to the sender "
"'missing-app.model', but app 'missing-app' isn't installed.",
hint=None,
obj='invalid_models_tests.test_models',
id='signals.E001',
),
Error(
"Bound method 'DummyClass.dummy_method' was connected to the "
"'post_init' signal with a lazy reference to the sender "
"'missing-app.model', but app 'missing-app' isn't installed.",
hint=None,
obj='invalid_models_tests.test_models',
id='signals.E001',
),
Error(
"The field invalid_models_tests.DummyModel.author was declared "
"with a lazy reference to 'invalid_models_tests.author', but app "
"'invalid_models_tests' isn't installed.",
hint=None,
obj=DummyModel.author.field,
id='fields.E307',
),
Error(
"The function 'dummy_function' was connected to the 'post_init' "
"signal with a lazy reference to the sender "
"'missing-app.model', but app 'missing-app' isn't installed.",
hint=None,
obj='invalid_models_tests.test_models',
id='signals.E001',
),
]
self.assertEqual(_check_lazy_references(apps), expected)
|
from cumulusci.core.exceptions import CumulusCIException
class MetadataApiError(CumulusCIException):
def __init__(self, message, response):
super(MetadataApiError, self).__init__(message)
self.response = response
class MetadataComponentFailure(MetadataApiError):
pass
class MissingOAuthError(CumulusCIException):
pass
class MissingOrgCredentialsError(CumulusCIException):
pass
|
DATE_FORMAT = 'j בF Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j בF Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j בF'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i:s'
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
|
"""
This test basically just plays around with image.rollimg.
It has three examples
* image_reduce: this takes an Image having, say, an axis 't' and returns another
Image having reduced over 't'
* need_specific_axis_reduce: this takes an Image and a specific
axis name, like 't' and produces an Image reduced over 't'. raises an
exception if Image has no axis 't'
* image_call: this takes an Image having, say, an axis 't'
and does something along this axis -- like fits a regression model? and
outputs a new Image with the 't' axis replaced by 'new'
* image_modify_copy: this takes an Image and an axis specification,
such as 'x+LR', 'l', or 2, modifies a copy of the data by iterating over this
axis, and returns an Image with the same axes
Notes
-----
In these loaded Images, 't' is both an axis name and a world coordinate name so
it is not ambiguous to say 't' axis. It is slightly ambiguous to say 'x+LR' axis
if the axisnames are ['slice', 'frequency', 'phase'] but image.rollimg
identifies 'x+LR' == 'slice' == 0.
"""
from __future__ import absolute_import
import numpy as np
from ..image import (Image, rollimg, synchronized_order)
from ...reference.coordinate_map import (AffineTransform as AT, drop_io_dim,
AxisError)
from ...reference.coordinate_system import CoordinateSystem as CS
from ...reference.spaces import mni_csm
from ...image.image_spaces import xyz_affine
from nose.tools import (assert_raises, assert_equal)
from numpy.testing import assert_almost_equal, assert_array_equal
MNI3 = mni_csm(3)
MNI4 = mni_csm(4)
def image_reduce(img, reduce_op, axis='t'):
"""
Take an Image, perform some reduce operation on it, over
a specified axis, and return a new Image.
For the sake of testing things out, we will assume that
the operation reduces over the first axis only.
Parameters
----------
image : Image
reduce_op : callable
An operation that reduces over the first axis,
such as lambda x: x.sum(0)
axis : str or int
Specification of axis of Image
Returns
-------
newim : Image, missing axis
"""
img = rollimg(img, axis)
axis_name = img.axes.coord_names[0]
output_axes = list(img.axes.coord_names)
output_axes.remove(axis_name)
newdata = reduce_op(img.get_data())
return Image(newdata, drop_io_dim(img.coordmap, axis))
def need_specific_axis_reduce(img, reduce_op):
"""
Take an Image, perform some reduce operation on it, over the axis named
'specific', and return a new Image.
For the sake of testing things out, we will assume that the operation
reduces over the first axis only.
Parameters
----------
img : Image
reduce_op : callable
An operation that reduces over the first axis,
such as lambda x: x.sum(0)
Returns
-------
newim : Image, missing axis
"""
return image_reduce(img, reduce_op, 'specific')
def image_call(img, function, inaxis='t', outaxis='new'):
"""
Take an Image, perform some operation on it, over a specified axis, and
return a new Image.
For the sake of testing things out, we will assume that the operation can
only operate on the first axis of the array.
Parameters
----------
img : Image
function : callable
An operation that does something over the first axis,
such as lambda x: x[::2]
inaxis : str or int
Specification of axis of Image
outaxis : str
Name of new axis in new Image
Returns
-------
newim : Image
with axis `inaxis` replaced with `outaxis`
"""
rolled_img = rollimg(img, inaxis)
inaxis = rolled_img.axes.coord_names[0] # now it's a string
newdata = function(rolled_img.get_data())
new_coordmap = rolled_img.coordmap.renamed_domain({inaxis: outaxis})
new_image = Image(newdata, new_coordmap)
# we have to roll the axis back
axis_index = img.axes.index(inaxis) + 1
return rollimg(new_image, 0, axis_index)
def image_modify(img, modify, axis='y+PA'):
"""
Take an Image, perform some operation on it, over a specified axis, and
return a new Image.
For this operation, we are allowed to iterate over spatial axes.
For the sake of testing things out, we will assume that the operation modify
can only operate by iterating over the first axis of an array.
Parameters
----------
img : Image
modify : callable
An operation that modifies an array. Something like::
def f(x):
x[:] = x.mean()
axis : str or int
Specification of axis of Image
Returns
-------
newim : Image
with a modified copy of img._data.
"""
rolled_img = rollimg(img, axis)
data = rolled_img.get_data().copy()
for d in data:
modify(d)
import copy
new_image = Image(data, copy.copy(rolled_img.coordmap))
# Now, we have to put the data back to same order as img
return synchronized_order(new_image, img)
def test_reduce():
shape = (3, 5, 7, 9)
x = np.random.standard_normal(shape)
im = Image(x, AT(CS('ijkq'), MNI4, np.diag([3, 4, 5, 6, 1])))
newim = image_reduce(im, lambda x: x.sum(0), 'q')
assert_array_equal(xyz_affine(im), xyz_affine(newim))
assert_equal(newim.axes.coord_names, tuple('ijk'))
assert_equal(newim.shape, (3, 5, 7))
assert_almost_equal(newim.get_data(), x.sum(3))
im_nd = Image(x, AT(CS('ijkq'), MNI4, np.array(
[[0, 1, 2, 0, 10],
[3, 4, 5, 0, 11],
[6, 7, 8, 0, 12],
[0, 0, 0, 9, 13],
[0, 0, 0, 0, 1]])))
for i, o, n in zip('ijk', MNI3.coord_names, range(3)):
for axis_id in (i, o, n):
# Non-diagonal reduce raise an error
assert_raises(AxisError, image_reduce, im_nd,
lambda x: x.sum(0), axis_id)
# Diagonal reduces are OK
newim = image_reduce(im, lambda x: x.sum(0), axis_id)
def test_specific_reduce():
shape = (3, 5, 7, 9)
x = np.random.standard_normal(shape)
im = Image(x, AT(CS('ijkq'), MNI4, np.diag([3, 4, 5, 6, 1])))
# we have to rename the axis before we can call the function
# need_specific_axis_reduce on it
assert_raises(AxisError, need_specific_axis_reduce, im, lambda x: x.sum(0))
im = im.renamed_axes(q='specific')
newim = need_specific_axis_reduce(im, lambda x: x.sum(0))
assert_array_equal(xyz_affine(im), xyz_affine(newim))
assert_equal(newim.axes.coord_names, tuple('ijk'))
assert_equal(newim.shape, (3, 5, 7))
assert_almost_equal(newim.get_data(), x.sum(3))
def test_call():
shape = (3, 5, 7, 12)
x = np.random.standard_normal(shape)
affine = np.eye(5)
affine[:3, :3] = np.random.standard_normal((3, 3))
affine[:4, 4] = np.random.standard_normal((4,))
im = Image(x, AT(CS('ijkq'), MNI4, affine))
newim = image_call(im, lambda x: x[::2], 'q', 'out')
assert_array_equal(xyz_affine(im), xyz_affine(newim))
assert_equal(newim.axes.coord_names, tuple('ijk') + ('out',))
assert_equal(newim.shape, (3, 5, 7, 6))
assert_almost_equal(newim.get_data(), x[:,:,:,::2])
def test_modify():
shape = (3, 5, 7, 12)
x = np.random.standard_normal(shape)
affine = np.eye(5)
affine[:3, :3] = np.random.standard_normal((3, 3))
affine[:4, 4] = np.random.standard_normal((4,))
im = Image(x, AT(CS('ijkq'), MNI4, affine))
def nullmodify(d):
pass
def meanmodify(d):
d[:] = d.mean()
for i, o, n in zip('ijkq', MNI3.coord_names + ('q',), range(4)):
for a in i, o, n:
nullim = image_modify(im, nullmodify, a)
meanim = image_modify(im, meanmodify, a)
assert_array_equal(nullim.get_data(), im.get_data())
assert_array_equal(xyz_affine(im), xyz_affine(nullim))
assert_equal(nullim.axes, im.axes)
# yield assert_equal, nullim, im
assert_array_equal(xyz_affine(im), xyz_affine(meanim))
assert_equal(meanim.axes, im.axes)
# Make sure that meanmodify works as expected
d = im.get_data()
d = np.rollaxis(d, n)
meand = meanim.get_data()
meand = np.rollaxis(meand, n)
for i in range(d.shape[0]):
assert_almost_equal(meand[i], d[i].mean())
|
"""
Generic helpers for RQ task execution
"""
from __future__ import absolute_import
import rq
from django.conf import settings
from .utils import get_redis_connection
def enqueue(function, args=None, kwargs=None, timeout=None, queue='default'):
async = not settings.RQ_EAGER
if args is None:
args = ()
if kwargs is None:
kwargs = {}
conn = get_redis_connection()
queue = rq.Queue(queue, connection=conn, async=async)
return queue.enqueue_call(func=function, args=tuple(args), kwargs=kwargs,
timeout=timeout)
|
from TestCase import TestCase
from WidgetTest import WidgetTest
from MenuTest import MenuTest
from SplitContainerTest import SplitContainerTest
from WindowTest import WindowTest
from ListContainerTest import ListContainerTest
from EventSignalCombinerTest import EventSignalCombinerTest
from FrameTest import FrameTest
from NameGadgetTest import NameGadgetTest
from LinearContainerTest import LinearContainerTest
from NodeGadgetTest import NodeGadgetTest
from GadgetTest import GadgetTest
from TabbedContainerTest import TabbedContainerTest
from NodeGraphTest import NodeGraphTest
from WidgetSignalTest import WidgetSignalTest
from EventLoopTest import EventLoopTest
from SplinePlugGadgetTest import SplinePlugGadgetTest
from TextWidgetTest import TextWidgetTest
from CheckBoxTest import CheckBoxTest
from ImageTest import ImageTest
from ButtonTest import ButtonTest
from CollapsibleTest import CollapsibleTest
from ImageGadgetTest import ImageGadgetTest
from StandardNodeGadgetTest import StandardNodeGadgetTest
from ColorSwatchTest import ColorSwatchTest
from VariantTest import VariantTest
from GridContainerTest import GridContainerTest
from NoduleTest import NoduleTest
from ProgressBarTest import ProgressBarTest
from ContainerWidgetTest import ContainerWidgetTest
from SelectionMenuTest import SelectionMenuTest
from StandardStyleTest import StandardStyleTest
from CompoundParameterValueWidgetTest import CompoundParameterValueWidgetTest
from EditorWidgetTest import EditorWidgetTest
from NumericSliderTest import NumericSliderTest
from RenderableGadgetTest import RenderableGadgetTest
from PlugValueWidgetTest import PlugValueWidgetTest
from PathListingWidgetTest import PathListingWidgetTest
from MultiLineTextWidgetTest import MultiLineTextWidgetTest
from LabelTest import LabelTest
from ScrolledContainerTest import ScrolledContainerTest
from ParameterValueWidgetTest import ParameterValueWidgetTest
from NodeEditorTest import NodeEditorTest
from ScriptWindowTest import ScriptWindowTest
from CompoundPlugValueWidgetTest import CompoundPlugValueWidgetTest
from CompoundEditorTest import CompoundEditorTest
from MultiSelectionMenuTest import MultiSelectionMenuTest
from MetadataTest import MetadataTest
from StandardGraphLayoutTest import StandardGraphLayoutTest
from StandardNodeUITest import StandardNodeUITest
from ViewTest import ViewTest
from SliderTest import SliderTest
from NumericPlugValueWidgetTest import NumericPlugValueWidgetTest
from CompoundNumericPlugValueWidgetTest import CompoundNumericPlugValueWidgetTest
if __name__ == "__main__":
unittest.main()
|
from django.contrib.gis import admin
from lingcod.spacing.models import *
from lingcod.spacing.views import create_pickled_graph_from_all_land as create_graph
from django.conf.urls.defaults import patterns, url
class SpacingPointAdmin(admin.GeoModelAdmin):
pass
admin.site.register(SpacingPoint,SpacingPointAdmin)
class LandAdmin(admin.GeoModelAdmin):
pass
admin.site.register(Land,LandAdmin)
class PickledGraphAdmin(admin.GeoModelAdmin):
def get_urls(self):
urls = super(PickledGraphAdmin, self).get_urls()
my_urls = patterns('',
url(r'^regenerate/$', self.admin_site.admin_view(create_graph), name='regenerate_pickledgraph')
)
return my_urls + urls
admin.site.register(PickledGraph,PickledGraphAdmin)
|
""" basic inference routines """
from collections import abc
from numbers import Number
import re
from typing import Pattern
import numpy as np
from pandas._libs import lib
from pandas._typing import ArrayLike
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
is_scalar = lib.is_scalar
is_decimal = lib.is_decimal
is_interval = lib.is_interval
is_list_like = lib.is_list_like
is_iterator = lib.is_iterator
def is_number(obj) -> bool:
"""
Check if the object is a number.
Returns True when the object is a number, and False if is not.
Parameters
----------
obj : any type
The object to check if is a number.
Returns
-------
is_number : bool
Whether `obj` is a number or not.
See Also
--------
api.types.is_integer: Checks a subgroup of numbers.
Examples
--------
>>> from pandas.api.types import is_number
>>> is_number(1)
True
>>> is_number(7.15)
True
Booleans are valid because they are int subclass.
>>> is_number(False)
True
>>> is_number("foo")
False
>>> is_number("5")
False
"""
return isinstance(obj, (Number, np.number))
def iterable_not_string(obj) -> bool:
"""
Check if the object is an iterable but not a string.
Parameters
----------
obj : The object to check.
Returns
-------
is_iter_not_string : bool
Whether `obj` is a non-string iterable.
Examples
--------
>>> iterable_not_string([1, 2, 3])
True
>>> iterable_not_string("foo")
False
>>> iterable_not_string(1)
False
"""
return isinstance(obj, abc.Iterable) and not isinstance(obj, str)
def is_file_like(obj) -> bool:
"""
Check if the object is a file-like object.
For objects to be considered file-like, they must
be an iterator AND have either a `read` and/or `write`
method as an attribute.
Note: file-like objects must be iterable, but
iterable objects need not be file-like.
Parameters
----------
obj : The object to check
Returns
-------
is_file_like : bool
Whether `obj` has file-like properties.
Examples
--------
>>> import io
>>> buffer = io.StringIO("data")
>>> is_file_like(buffer)
True
>>> is_file_like([1, 2, 3])
False
"""
if not (hasattr(obj, "read") or hasattr(obj, "write")):
return False
return bool(hasattr(obj, "__iter__"))
def is_re(obj) -> bool:
"""
Check if the object is a regex pattern instance.
Parameters
----------
obj : The object to check
Returns
-------
is_regex : bool
Whether `obj` is a regex pattern.
Examples
--------
>>> is_re(re.compile(".*"))
True
>>> is_re("foo")
False
"""
return isinstance(obj, Pattern)
def is_re_compilable(obj) -> bool:
"""
Check if the object can be compiled into a regex pattern instance.
Parameters
----------
obj : The object to check
Returns
-------
is_regex_compilable : bool
Whether `obj` can be compiled as a regex pattern.
Examples
--------
>>> is_re_compilable(".*")
True
>>> is_re_compilable(1)
False
"""
try:
re.compile(obj)
except TypeError:
return False
else:
return True
def is_array_like(obj) -> bool:
"""
Check if the object is array-like.
For an object to be considered array-like, it must be list-like and
have a `dtype` attribute.
Parameters
----------
obj : The object to check
Returns
-------
is_array_like : bool
Whether `obj` has array-like properties.
Examples
--------
>>> is_array_like(np.array([1, 2, 3]))
True
>>> is_array_like(pd.Series(["a", "b"]))
True
>>> is_array_like(pd.Index(["2016-01-01"]))
True
>>> is_array_like([1, 2, 3])
False
>>> is_array_like(("a", "b"))
False
"""
return is_list_like(obj) and hasattr(obj, "dtype")
def is_nested_list_like(obj) -> bool:
"""
Check if the object is list-like, and that all of its elements
are also list-like.
Parameters
----------
obj : The object to check
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_nested_list_like([[1, 2, 3]])
True
>>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])
True
>>> is_nested_list_like(["foo"])
False
>>> is_nested_list_like([])
False
>>> is_nested_list_like([[1, 2, 3], 1])
False
Notes
-----
This won't reliably detect whether a consumable iterator (e. g.
a generator) is a nested-list-like without consuming the iterator.
To avoid consuming it, we always return False if the outer container
doesn't define `__len__`.
See Also
--------
is_list_like
"""
return (
is_list_like(obj)
and hasattr(obj, "__len__")
and len(obj) > 0
and all(is_list_like(item) for item in obj)
)
def is_dict_like(obj) -> bool:
"""
Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True
"""
dict_like_attrs = ("__getitem__", "keys", "__contains__")
return (
all(hasattr(obj, attr) for attr in dict_like_attrs)
# [GH 25196] exclude classes
and not isinstance(obj, type)
)
def is_named_tuple(obj) -> bool:
"""
Check if the object is a named tuple.
Parameters
----------
obj : The object to check
Returns
-------
is_named_tuple : bool
Whether `obj` is a named tuple.
Examples
--------
>>> from collections import namedtuple
>>> Point = namedtuple("Point", ["x", "y"])
>>> p = Point(1, 2)
>>>
>>> is_named_tuple(p)
True
>>> is_named_tuple((1, 2))
False
"""
return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields")
def is_hashable(obj) -> bool:
"""
Return True if hash(obj) will succeed, False otherwise.
Some types will pass a test against collections.abc.Hashable but fail when
they are actually hashed with hash().
Distinguish between these and other types by trying the call to hash() and
seeing if they raise TypeError.
Returns
-------
bool
Examples
--------
>>> import collections
>>> a = ([],)
>>> isinstance(a, collections.abc.Hashable)
True
>>> is_hashable(a)
False
"""
# Unfortunately, we can't use isinstance(obj, collections.abc.Hashable),
# which can be faster than calling hash. That is because numpy scalars
# fail this test.
# Reconsider this decision once this numpy bug is fixed:
# https://github.com/numpy/numpy/issues/5562
try:
hash(obj)
except TypeError:
return False
else:
return True
def is_sequence(obj) -> bool:
"""
Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
>>> l = [1, 2, 3]
>>>
>>> is_sequence(l)
True
>>> is_sequence(iter(l))
False
"""
try:
iter(obj) # Can iterate over it.
len(obj) # Has a length associated with it.
return not isinstance(obj, (str, bytes))
except (TypeError, AttributeError):
return False
def is_dataclass(item):
"""
Checks if the object is a data-class instance
Parameters
----------
item : object
Returns
--------
is_dataclass : bool
True if the item is an instance of a data-class,
will return false if you pass the data class itself
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> is_dataclass(Point)
False
>>> is_dataclass(Point(0,2))
True
"""
try:
from dataclasses import is_dataclass
return is_dataclass(item) and not isinstance(item, type)
except ImportError:
return False
def is_inferred_bool_dtype(arr: ArrayLike) -> bool:
"""
Check if this is a ndarray[bool] or an ndarray[object] of bool objects.
Parameters
----------
arr : np.ndarray or ExtensionArray
Returns
-------
bool
Notes
-----
This does not include the special treatment is_bool_dtype uses for
Categorical.
"""
if not isinstance(arr, np.ndarray):
return False
dtype = arr.dtype
if dtype == np.dtype(bool):
return True
elif dtype == np.dtype("object"):
return lib.is_bool_array(arr.ravel("K"))
return False
|
import sys
import recipe_util # pylint: disable=F0401
class Infra(recipe_util.Recipe):
"""Basic Recipe class for the Infrastructure repositories."""
@staticmethod
def fetch_spec(_props):
solution = lambda name, path_infix = None: {
'name' : name,
'url' : 'https://chromium.googlesource.com/infra/%s%s.git' % (
path_infix + '/' if path_infix else '', name
),
'deps_file': '.DEPS.git',
'managed' : False,
}
spec = {
'solutions': [
solution('infra'),
solution('expect_tests', 'testing'),
solution('testing_support', 'testing'),
],
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'infra'
def main(argv=None):
return Infra().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
import tensorflow as tf
import common
from collections import OrderedDict
class TRANSITION(object):
def __init__(self, in_dim, out_dim, size, lr, do_keep_prob, weight_decay):
self.arch_params = {
'in_dim': in_dim,
'out_dim': out_dim,
'n_hidden_0': size[0],
'n_hidden_1': size[1],
'do_keep_prob': do_keep_prob
}
self.solver_params = {
'lr': lr,
'weight_decay': weight_decay,
'weights_stddev': 0.01,
}
self._init_layers()
def forward(self, state_, action, autoencoder):
'''
state_: matrix
action: matrix
'''
if autoencoder is None:
_input = state_
else:
_input, _ = autoencoder.forward(state_)
concat = tf.concat(concat_dim=1, values=[_input, action], name='input')
z0 = tf.nn.xw_plus_b(concat, self.weights['0'], self.biases['0'], name='h0')
h0 = tf.nn.relu(z0)
z1 = tf.nn.xw_plus_b(h0, self.weights['1'], self.biases['1'], name='h1')
h1 = tf.nn.relu(z1)
h1_do = tf.nn.dropout(h1, self.arch_params['do_keep_prob'])
delta = tf.nn.xw_plus_b(h1_do, self.weights['c'], self.biases['c'], name='delta')
previous_state = tf.stop_gradient(state_)
state = previous_state + delta
return state
def backward(self, loss):
# create an optimizer
opt = tf.train.AdamOptimizer(learning_rate=self.solver_params['lr'])
# weight decay
if self.solver_params['weight_decay']:
loss += self.solver_params['weight_decay'] * tf.add_n([tf.nn.l2_loss(v) for v in self.weights.values()])
# compute the gradients for a list of variables
grads_and_vars = opt.compute_gradients(loss=loss, var_list=self.weights.values() + self.biases.values())
mean_abs_grad, mean_abs_w = common.compute_mean_abs_norm(grads_and_vars)
# apply the gradient
apply_grads = opt.apply_gradients(grads_and_vars)
return apply_grads, mean_abs_grad, mean_abs_w
def train(self, objective):
self.loss = objective
self.minimize, self.mean_abs_grad, self.mean_abs_w = self.backward(self.loss)
self.loss_summary = tf.scalar_summary('loss_t', objective)
def create_variables(self):
weights = OrderedDict([
('0', tf.Variable(tf.random_normal([self.arch_params['in_dim'] , self.arch_params['n_hidden_0']], stddev=self.solver_params['weights_stddev']))),
('1', tf.Variable(tf.random_normal([self.arch_params['n_hidden_0'], self.arch_params['n_hidden_1']], stddev=self.solver_params['weights_stddev']))),
('c', tf.Variable(tf.random_normal([self.arch_params['n_hidden_1'], self.arch_params['out_dim']] , stddev=self.solver_params['weights_stddev']))),
])
biases = OrderedDict([
('0', tf.Variable(tf.random_normal([self.arch_params['n_hidden_0']], stddev=self.solver_params['weights_stddev']))),
('1', tf.Variable(tf.random_normal([self.arch_params['n_hidden_1']], stddev=self.solver_params['weights_stddev']))),
('c', tf.Variable(tf.random_normal([self.arch_params['out_dim']], stddev=self.solver_params['weights_stddev'])))
])
return weights, biases
def _init_layers(self):
self.weights, self.biases = self.create_variables()
|
"""
.. NOTE::
Added `imdb_original_name` recently, so in case the title lookup translations cause problems
switch to find_entry to use that instead!
"""
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase, use_vcr
class TestImdb(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
# tests search
- {title: 'Spirited Away'}
# tests direct url
- {title: 'Princess Mononoke', imdb_url: 'http://www.imdb.com/title/tt0119698/'}
# generic test material, some tricky ones here :)
- {title: 'Taken[2008]DvDrip[Eng]-FOO'}
# test short title, with repack and without year
- {title: 'Up.REPACK.720p.Bluray.x264-FlexGet'}
imdb:
min_votes: 20
year:
mock:
- {title: 'Princess Mononoke', imdb_url: 'http://www.imdb.com/title/tt0119698/'}
- {title: 'Taken[2008]DvDrip[Eng]-FOO', imdb_url: 'http://www.imdb.com/title/tt0936501/'}
- {title: 'Inglourious Basterds 2009', imdb_url: 'http://www.imdb.com/title/tt0361748/'}
imdb:
min_year: 2003
max_year: 2008
actor:
mock:
- {title: 'The Matrix', imdb_url: 'http://www.imdb.com/title/tt0133093/'}
- {title: 'The Terminator', imdb_url: 'http://www.imdb.com/title/tt0088247/'}
imdb:
accept_actors:
- nm0000206
reject_actors:
- nm0000216
director:
mock:
- {title: 'The Matrix', imdb_url: 'http://www.imdb.com/title/tt0133093/'}
- {title: 'The Terminator', imdb_url: 'http://www.imdb.com/title/tt0088247/'}
imdb:
accept_directors:
- nm0905152
- nm0905154
reject_directors:
- nm0000116
score:
mock:
- {title: 'The Matrix', imdb_url: 'http://www.imdb.com/title/tt0133093/'}
- {title: 'Battlefield Earth', imdb_url: 'http://www.imdb.com/title/tt0185183/'}
imdb:
min_score: 5.0
genre:
mock:
- {title: 'The Matrix', imdb_url: 'http://www.imdb.com/title/tt0133093/'}
- {title: 'Terms of Endearment', imdb_url: 'http://www.imdb.com/title/tt0086425/'}
- {title: 'Frozen', imdb_url: 'http://www.imdb.com/title/tt2294629/'}
imdb:
reject_genres:
- drama
accept_genres:
- sci-fi
language:
mock:
- {title: 'The Matrix', imdb_url: 'http://www.imdb.com/title/tt0133093/'}
- {title: '22 Bullets', imdb_url: 'http://www.imdb.com/title/tt1167638/'}
- {title: 'Crank', imdb_url: 'http://www.imdb.com/title/tt0479884/'}
- {title: 'The Damned United', imdb_url: 'http://www.imdb.com/title/tt1226271/'}
- {title: 'Rockstar', imdb_url: 'http://www.imdb.com/title/tt1839596/'}
- {title: 'The Host', imdb_url: 'http://www.imdb.com/title/tt0468492/'}
imdb:
accept_languages:
- english
reject_languages:
- french
mpaa:
mock:
- title: Saw 2004
imdb_url: http://www.imdb.com/title/tt0387564/
- title: Aladdin 1992
imdb_url: http://www.imdb.com/title/tt0103639/
imdb:
reject_mpaa_ratings:
- R
"""
@use_vcr
def test_lookup(self):
"""IMDB: Test Lookup (ONLINE)"""
self.execute_task('test')
assert self.task.find_entry(imdb_name='Spirited Away'), \
'Failed IMDB lookup (search Spirited Away)'
assert self.task.find_entry(imdb_name='Princess Mononoke'), \
'Failed imdb lookup (direct)'
assert self.task.find_entry(imdb_name='Taken', imdb_id='tt0936501'), \
'Failed to pick correct Taken from search results'
assert self.task.find_entry(imdb_id='tt1049413'), \
'Failed to lookup Up.REPACK.720p.Bluray.x264-FlexGet'
@use_vcr
def test_year(self):
self.execute_task('year')
assert self.task.find_entry('accepted', imdb_name='Taken'), \
'Taken should\'ve been accepted'
# mononoke should not be accepted or rejected
assert not self.task.find_entry('accepted', imdb_name='Mononoke-hime'), \
'Mononoke-hime should not have been accepted'
assert not self.task.find_entry('rejected', imdb_name='Mononoke-hime'), \
'Mononoke-hime should not have been rejected'
assert not self.task.find_entry('accepted', imdb_name='Inglourious Basterds 2009'), \
'Inglourious Basterds should not have been accepted'
@use_vcr
def test_actors(self):
self.execute_task('actor')
# check that actors have been parsed properly
matrix = self.task.find_entry(imdb_name='The Matrix')
assert matrix, 'entry for matrix missing'
assert 'nm0000206' in matrix['imdb_actors'], \
'Keanu Reeves is missing'
assert matrix['imdb_actors']['nm0000206'] == 'Keanu Reeves', \
'Keanu Reeves name is missing'
assert self.task.find_entry('accepted', imdb_name='The Matrix'), \
'The Matrix should\'ve been accepted'
assert not self.task.find_entry('rejected', imdb_name='The Terminator'), \
'The The Terminator have been rejected'
@use_vcr
def test_directors(self):
self.execute_task('director')
# check that directors have been parsed properly
matrix = self.task.find_entry(imdb_name='The Matrix')
assert 'nm0905154' in matrix['imdb_directors'], \
'Lana Wachowski is missing'
assert matrix['imdb_directors']['nm0905154'] == 'Lana Wachowski', \
'Lana Wachowski name is missing'
assert self.task.find_entry('accepted', imdb_name='The Matrix'), \
'The Matrix should\'ve been accepted'
assert not self.task.find_entry('rejected', imdb_name='The Terminator'), \
'The The Terminator have been rejected'
@use_vcr
def test_score(self):
self.execute_task('score')
assert self.task.find_entry(imdb_name='The Matrix'), 'The Matrix not found'
matrix = float(self.task.find_entry(imdb_name='The Matrix')['imdb_score'])
# Currently The Matrix has an 8.7, check a range in case it changes
assert 8.6 < matrix < 8.8, \
'The Matrix should have score 8.7 not %s. (Did the rating change?)' % matrix
assert int(self.task.find_entry(imdb_name='The Matrix')['imdb_votes']) > 450000, \
'The Matrix should have more than 450000 votes'
bfe = float(self.task.find_entry(title='Battlefield Earth')['imdb_score'])
# Currently Battlefield Earth has an 2.4, check a range in case it changes
assert 2.3 <= bfe <= 2.5, \
'Battlefield Earth should have score 2.3 not %s. (Did the rating change?)' % bfe
assert self.task.find_entry('accepted', imdb_name='The Matrix'), \
'The Matrix should\'ve been accepted'
assert not self.task.find_entry('accepted', title='Battlefield Earth'), \
'Battlefield Earth shouldn\'t have been accepted'
@use_vcr
def test_genre(self):
self.execute_task('genre')
matrix = (self.task.find_entry(imdb_name='The Matrix')['imdb_genres'])
assert matrix == ['action', 'sci-fi'], \
'Could not find genres for The Matrix'
toe = (self.task.find_entry(imdb_name='Terms of Endearment')['imdb_genres'])
assert toe == ['comedy', 'drama'], \
'Could not find genres for Terms of Endearment'
frozen = (self.task.find_entry(imdb_name='Frozen')['imdb_genres'])
assert frozen == ['animation', 'adventure', 'comedy', 'family', 'fantasy', 'musical'], \
'Could not find genres for Frozen'
assert self.task.find_entry('accepted', imdb_name='The Matrix'), \
'The Matrix should\'ve been accepted'
assert not self.task.find_entry('rejected', title='Terms of Endearment'), \
'Terms of Endearment should have been rejected'
assert not self.task.find_entry('rejected', title='Frozen'), \
'Frozen should have been rejected'
@use_vcr
def test_language(self):
self.execute_task('language')
matrix = self.task.find_entry(imdb_name='The Matrix')['imdb_languages']
assert matrix == ['english'], 'Could not find languages for The Matrix'
# IMDB may return imdb_name of "L'immortel" for 22 Bullets
bullets = self.task.find_entry(imdb_original_name='L\'immortel')['imdb_languages']
assert bullets[0] == 'french', 'Could not find languages for 22 Bullets'
for movie in ['The Matrix', 'Crank', 'The Damned United']:
assert self.task.find_entry('accepted', imdb_name=movie), \
'%s should\'ve been accepted' % movie
assert not self.task.find_entry('rejected', title='22 Bullets'), \
'22 Bullets should have been rejected'
# This test no longer valid (01/31/13) with IMDB language change
# rockstar = self.task.find_entry(imdb_name='Rockstar')['imdb_languages']
# # http://flexget.com/ticket/1399
# assert rockstar == ['hindi'], 'Did not find only primary language'
host_langs = self.task.find_entry(imdb_name='The Host')['imdb_languages']
# switched to panjabi since that's what I got ...
assert host_langs == ['korean', 'english'], \
'Languages were not returned in order of prominence, got %s' % (', '.join(host_langs))
@use_vcr
def test_mpaa(self):
self.execute_task('mpaa')
aladdin = self.task.find_entry(imdb_name='Aladdin')
assert aladdin['imdb_mpaa_rating'] == 'G', ('Didn\'t get right rating for Aladdin. Should be G got %s' %
aladdin['imdb_mpaa_rating'])
assert aladdin.accepted, 'Non R rated movie should have been accepted'
saw = self.task.find_entry(imdb_name='Saw')
assert saw['imdb_mpaa_rating'] == 'R', 'Didn\'t get right rating for Saw'
assert not saw.accepted, 'R rated movie should not have been accepted'
class TestImdbRequired(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'Taken[2008]DvDrip[Eng]-FOO', imdb_url: 'http://www.imdb.com/title/tt0936501/'}
- {title: 'ASDFASDFASDF'}
imdb_required: yes
"""
@use_vcr
def test_imdb_required(self):
self.execute_task('test')
assert not self.task.find_entry('rejected', title='Taken[2008]DvDrip[Eng]-FOO'), \
'Taken should NOT have been rejected'
assert self.task.find_entry('rejected', title='ASDFASDFASDF'), \
'ASDFASDFASDF should have been rejected'
class TestImdbLookup(FlexGetBase):
__yaml__ = """
tasks:
invalid url:
mock:
- {title: 'Taken', imdb_url: 'imdb.com/title/tt0936501/'}
imdb_lookup: yes
cached:
mock:
- title: The Matrix 1999 720p
- title: The Matrix 1080p
- title: The Matrix xvid
imdb_lookup: yes
"""
@use_vcr
def test_invalid_url(self):
self.execute_task('invalid url')
# check that these were created
assert self.task.entries[0]['imdb_score'], 'didn\'t get score'
assert self.task.entries[0]['imdb_year'], 'didn\'t get year'
assert self.task.entries[0]['imdb_plot_outline'], 'didn\'t get plot'
@use_vcr
def test_cache(self, cassette=None):
# Hmm, this test doesn't work so well when in vcr 'all' record mode. It records new requests/responses
# to the cassette, but still keeps the old recorded ones, causing this to fail.
# Delete old cassette instead of using all mode to re-record.
self.execute_task('cached')
assert all(e['imdb_name'] == 'The Matrix' for e in self.task.all_entries)
# Should have only been one call to the actual imdb page
if cassette:
imdb_calls = sum(1 for r in cassette.requests if 'title/tt0133093' in r.uri)
assert imdb_calls == 1
|
from __future__ import print_function, unicode_literals
import re
import stat
from _emerge.Package import Package
from _emerge.RootConfig import RootConfig
from repoman.modules.scan.scanbase import ScanBase
from repoman.qa_data import no_exec, allvars
from repoman._portage import portage
from portage import os
from portage.const import LIVE_ECLASSES
from portage.exception import InvalidPackageName
pv_toolong_re = re.compile(r'[0-9]{19,}')
class Ebuild(ScanBase):
'''Class to run primary checks on ebuilds'''
def __init__(self, **kwargs):
'''Class init
@param qatracker: QATracker instance
@param portdb: portdb instance
@param repo_settings: repository settings instance
@param vcs_settings: VCSSettings instance
@param checks: checks dictionary
'''
super(Ebuild, self).__init__(**kwargs)
self.qatracker = kwargs.get('qatracker')
self.portdb = kwargs.get('portdb')
self.repo_settings = kwargs.get('repo_settings')
self.vcs_settings = kwargs.get('vcs_settings')
self.checks = kwargs.get('checks')
self.root_config = RootConfig(self.repo_settings.repoman_settings,
self.repo_settings.trees[self.repo_settings.root], None)
self.changed = None
self.xpkg = None
self.y_ebuild = None
self.pkg = None
self.metadata = None
self.eapi = None
self.inherited = None
self.live_ebuild = None
self.keywords = None
self.pkgs = {}
def _set_paths(self, **kwargs):
repolevel = kwargs.get('repolevel')
self.relative_path = os.path.join(self.xpkg, self.y_ebuild + ".ebuild")
self.full_path = os.path.join(self.repo_settings.repodir, self.relative_path)
self.ebuild_path = self.y_ebuild + ".ebuild"
if repolevel < 3:
self.ebuild_path = os.path.join(kwargs.get('pkgdir'), self.ebuild_path)
if repolevel < 2:
self.ebuild_path = os.path.join(kwargs.get('catdir'), self.ebuild_path)
self.ebuild_path = os.path.join(".", self.ebuild_path)
@property
def untracked(self):
'''Determines and returns if the ebuild is not tracked by the vcs'''
do_check = self.vcs_settings.vcs in ("cvs", "svn", "bzr")
really_notadded = (self.checks['ebuild_notadded'] and
self.y_ebuild not in self.vcs_settings.eadded)
if do_check and really_notadded:
# ebuild not added to vcs
return True
return False
def check(self, **kwargs):
'''Perform a changelog and untracked checks on the ebuild
@param xpkg: Package in which we check (object).
@param y_ebuild: Ebuild which we check (string).
@param changed: dictionary instance
@param repolevel: The depth within the repository
@param catdir: The category directiory
@param pkgdir: the package directory
@returns: dictionary, including {ebuild object}
'''
self.xpkg = kwargs.get('xpkg')
self.y_ebuild = kwargs.get('y_ebuild')
self.changed = kwargs.get('changed')
changelog_modified = kwargs.get('changelog_modified')
self._set_paths(**kwargs)
if self.checks['changelog'] and not changelog_modified \
and self.ebuild_path in self.changed.new_ebuilds:
self.qatracker.add_error('changelog.ebuildadded', self.relative_path)
if self.untracked:
# ebuild not added to vcs
self.qatracker.add_error(
"ebuild.notadded", self.xpkg + "/" + self.y_ebuild + ".ebuild")
# update the dynamic data
dyn_ebuild = kwargs.get('ebuild')
dyn_ebuild.set(self)
return False
def set_pkg_data(self, **kwargs):
'''Sets some classwide data needed for some of the checks
@returns: dictionary
'''
self.pkg = self.pkgs[self.y_ebuild]
self.metadata = self.pkg._metadata
self.eapi = self.metadata["EAPI"]
self.inherited = self.pkg.inherited
self.live_ebuild = LIVE_ECLASSES.intersection(self.inherited)
self.keywords = self.metadata["KEYWORDS"].split()
self.archs = set(kw.lstrip("~") for kw in self.keywords if not kw.startswith("-"))
return False
def bad_split_check(self, **kwargs):
'''Checks for bad category/package splits.
@param pkgdir: string: path
@returns: dictionary
'''
pkgdir = kwargs.get('pkgdir')
myesplit = portage.pkgsplit(self.y_ebuild)
is_bad_split = myesplit is None or myesplit[0] != self.xpkg.split("/")[-1]
if is_bad_split:
is_pv_toolong = pv_toolong_re.search(myesplit[1])
is_pv_toolong2 = pv_toolong_re.search(myesplit[2])
if is_pv_toolong or is_pv_toolong2:
self.qatracker.add_error(
"ebuild.invalidname", self.xpkg + "/" + self.y_ebuild + ".ebuild")
return True
elif myesplit[0] != pkgdir:
print(pkgdir, myesplit[0])
self.qatracker.add_error(
"ebuild.namenomatch", self.xpkg + "/" + self.y_ebuild + ".ebuild")
return True
return False
def pkg_invalid(self, **kwargs):
'''Sets some pkg info and checks for invalid packages
@param validity_future: Future instance
@returns: dictionary, including {pkg object}
'''
fuse = kwargs.get('validity_future')
dyn_pkg = kwargs.get('pkg')
if self.pkg.invalid:
for k, msgs in self.pkg.invalid.items():
for msg in msgs:
self.qatracker.add_error(k, "%s: %s" % (self.relative_path, msg))
# update the dynamic data
fuse.set(False, ignore_InvalidState=True)
dyn_pkg.set(self.pkg)
return True
# update the dynamic data
dyn_pkg.set(self.pkg)
return False
def check_isebuild(self, **kwargs):
'''Test the file for qualifications that is is an ebuild
@param checkdirlist: list of files in the current package directory
@param checkdir: current package directory path
@param xpkg: current package directory being checked
@param validity_future: Future instance
@returns: dictionary, including {pkgs, can_force}
'''
checkdirlist = kwargs.get('checkdirlist').get()
checkdir = kwargs.get('checkdir')
xpkg = kwargs.get('xpkg')
fuse = kwargs.get('validity_future')
can_force = kwargs.get('can_force')
self.continue_ = False
ebuildlist = []
pkgs = {}
for y in checkdirlist:
file_is_ebuild = y.endswith(".ebuild")
file_should_be_non_executable = y in no_exec or file_is_ebuild
if file_should_be_non_executable:
file_is_executable = stat.S_IMODE(
os.stat(os.path.join(checkdir, y)).st_mode) & 0o111
if file_is_executable:
self.qatracker.add_error("file.executable", os.path.join(checkdir, y))
if file_is_ebuild:
pf = y[:-7]
ebuildlist.append(pf)
catdir = xpkg.split("/")[0]
cpv = "%s/%s" % (catdir, pf)
try:
myaux = dict(zip(allvars, self.portdb.aux_get(cpv, allvars)))
except KeyError:
fuse.set(False, ignore_InvalidState=True)
self.qatracker.add_error("ebuild.syntax", os.path.join(xpkg, y))
continue
except IOError:
fuse.set(False, ignore_InvalidState=True)
self.qatracker.add_error("ebuild.output", os.path.join(xpkg, y))
continue
except InvalidPackageName:
fuse.set(False, ignore_InvalidState=True)
self.qatracker.add_error("ebuild.invalidname", os.path.join(xpkg, y))
continue
if not portage.eapi_is_supported(myaux["EAPI"]):
fuse.set(False, ignore_InvalidState=True)
self.qatracker.add_error("EAPI.unsupported", os.path.join(xpkg, y))
continue
pkgs[pf] = Package(
cpv=cpv, metadata=myaux, root_config=self.root_config,
type_name="ebuild")
if len(pkgs) != len(ebuildlist):
# If we can't access all the metadata then it's totally unsafe to
# commit since there's no way to generate a correct Manifest.
# Do not try to do any more QA checks on this package since missing
# metadata leads to false positives for several checks, and false
# positives confuse users.
self.continue_ = True
can_force.set(False, ignore_InvalidState=True)
self.pkgs = pkgs
# set our updated data
dyn_pkgs = kwargs.get('pkgs')
dyn_pkgs.set(pkgs)
return self.continue_
@property
def runInPkgs(self):
'''Package level scans'''
return (True, [self.check_isebuild])
@property
def runInEbuilds(self):
'''Ebuild level scans'''
return (True, [self.check, self.set_pkg_data, self.bad_split_check, self.pkg_invalid])
|
from twisted.trial import unittest
from buildbot.test.util import www
from buildbot.test.util.misc import TestReactorMixin
from buildbot.www import resource
class ResourceSubclass(resource.Resource):
needsReconfig = True
class Resource(TestReactorMixin, www.WwwTestMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
def test_base_url(self):
master = self.make_master(url=b'h:/a/b/')
rsrc = resource.Resource(master)
self.assertEqual(rsrc.base_url, b'h:/a/b/')
def test_reconfigResource_registration(self):
master = self.make_master(url=b'h:/a/b/')
rsrc = ResourceSubclass(master)
master.www.resourceNeedsReconfigs.assert_called_with(rsrc)
class RedirectResource(TestReactorMixin, www.WwwTestMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
def test_redirect(self):
master = self.make_master(url=b'h:/a/b/')
rsrc = resource.RedirectResource(master, b'foo')
self.render_resource(rsrc, b'/')
self.assertEqual(self.request.redirected_to, b'h:/a/b/foo')
def test_redirect_cr_lf(self):
master = self.make_master(url=b'h:/a/b/')
rsrc = resource.RedirectResource(master, b'foo\r\nbar')
self.render_resource(rsrc, b'/')
self.assertEqual(self.request.redirected_to, b'h:/a/b/foo')
|
import os
import os.path
import sys
import time
import gzip
import shutil
import gettext
try:
# python 2
import cStringIO
except ImportError:
# python3
import io as cStringIO
import dumper
from spacewalk.common.usix import raise_with_tb
from spacewalk.common import rhnMail
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.common.rhnTB import Traceback, exitWithTraceback
from spacewalk.common.checksum import getFileChecksum
from spacewalk.server import rhnSQL
from spacewalk.server.rhnSQL import SQLError, SQLSchemaError, SQLConnectError
from spacewalk.satellite_tools.exporter import xmlWriter
from spacewalk.satellite_tools import xmlDiskSource, diskImportLib, progress_bar
from spacewalk.satellite_tools.syncLib import initEMAIL_LOG, dumpEMAIL_LOG, log2email, log2stderr, log2stdout
from iss_ui import UI
from iss_actions import ActionDeps
import iss_isos
t = gettext.translation('spacewalk-backend-server', fallback=True)
_ = t.ugettext
class ISSError(Exception):
def __init__(self, msg, tb):
Exception.__init__(self)
self.msg = msg
self.tb = tb
class ISSChannelPackageShortDiskSource:
def __init__(self, mount_point, channel_name=None):
self.mp = mount_point
self.channelid = channel_name
self.pathkey = "xml-channel-packages/rhn-channel-%d.data"
def setChannel(self, channel_id):
self.channelid = channel_id
def _getFile(self):
return os.path.join(self.mp, self.pathkey % (self.channelid,))
class FileMapper:
""" This class maps dumps to files. In other words, you give it
the type of dump you're doing and it gives you the file to
write it to.
"""
def __init__(self, mount_point):
self.mp = mount_point
self.filemap = {
'arches': xmlDiskSource.ArchesDiskSource(self.mp),
'arches-extra': xmlDiskSource.ArchesExtraDiskSource(self.mp),
'blacklists': xmlDiskSource.BlacklistsDiskSource(self.mp),
'channelfamilies': xmlDiskSource.ChannelFamilyDiskSource(self.mp),
'orgs': xmlDiskSource.OrgsDiskSource(self.mp),
'channels': xmlDiskSource.ChannelDiskSource(self.mp),
'channel-pkg-short': ISSChannelPackageShortDiskSource(self.mp),
'packages-short': xmlDiskSource.ShortPackageDiskSource(self.mp),
'packages': xmlDiskSource.PackageDiskSource(self.mp),
'sourcepackages': xmlDiskSource.SourcePackageDiskSource(self.mp),
'errata': xmlDiskSource.ErrataDiskSource(self.mp),
'kickstart_trees': xmlDiskSource.KickstartDataDiskSource(self.mp),
'kickstart_files': xmlDiskSource.KickstartFileDiskSource(self.mp),
'binary_rpms': xmlDiskSource.BinaryRPMDiskSource(self.mp),
'comps': xmlDiskSource.ChannelCompsDiskSource(self.mp),
'modules': xmlDiskSource.ChannelModulesDiskSource(self.mp),
'productnames': xmlDiskSource.ProductnamesDiskSource(self.mp),
}
# This will make sure that all of the directories leading up to the
# xml file actually exist.
@staticmethod
def setup_file(ofile):
# Split the path. The filename is [1], and the directories are in [0].
dirs_to_make = os.path.split(ofile)[0]
# Make the directories if they don't already exist.
if not os.path.exists(dirs_to_make):
os.makedirs(dirs_to_make)
return ofile
# The get*File methods will return the full path to the xml file that the dumps are placed in.
# pylint: disable=W0212
def getArchesFile(self):
return self.setup_file(self.filemap['arches']._getFile())
def getArchesExtraFile(self):
return self.setup_file(self.filemap['arches-extra']._getFile())
def getBlacklistsFile(self):
return self.setup_file(self.filemap['blacklists']._getFile())
def getOrgsFile(self):
return self.setup_file(self.filemap['orgs']._getFile())
def getChannelFamiliesFile(self):
return self.setup_file(self.filemap['channelfamilies']._getFile())
def getBinaryRPMFile(self):
return self.setup_file(self.filemap['binary_rpms']._getFile())
def getChannelsFile(self, channelname):
self.filemap['channels'].setChannel(channelname)
return self.setup_file(self.filemap['channels']._getFile())
def getChannelCompsFile(self, channelname):
self.filemap['comps'].setChannel(channelname)
return self.setup_file(self.filemap['comps']._getFile())
def getChannelModulesFile(self, channelname):
self.filemap['modules'].setChannel(channelname)
return self.setup_file(self.filemap['modules']._getFile())
def getChannelPackageShortFile(self, channel_id):
self.filemap['channel-pkg-short'].setChannel(channel_id)
return self.setup_file(self.filemap['channel-pkg-short']._getFile())
def getPackagesFile(self, packageid):
self.filemap['packages'].setID(packageid)
return self.setup_file(self.filemap['packages']._getFile())
def getShortPackagesFile(self, packageid):
self.filemap['packages-short'].setID(packageid)
return self.setup_file(self.filemap['packages-short']._getFile())
def getSourcePackagesFile(self, sp_id):
self.filemap['sourcepackages'].setID(sp_id)
return self.setup_file(self.filemap['sourcepackages']._getFile())
def getErrataFile(self, errataid):
self.filemap['errata'].setID(errataid)
return self.setup_file(self.filemap['errata']._getFile())
def getKickstartTreeFile(self, ks_id):
self.filemap['kickstart_trees'].setID(ks_id)
return self.setup_file(self.filemap['kickstart_trees']._getFile())
def getKickstartFileFile(self, ks_label, relative_path):
self.filemap['kickstart_files'].setID(ks_label)
self.filemap['kickstart_files'].set_relative_path(relative_path)
return self.setup_file(self.filemap['kickstart_files']._getFile())
def getProductNamesFile(self):
return self.setup_file(self.filemap['productnames']._getFile())
class Dumper(dumper.XML_Dumper):
""" This class subclasses the XML_Dumper class. It overrides
the _get_xml_writer method and adds a set_stream method,
which will let it write to a file instead of over the wire.
"""
def __init__(self, outputdir, channel_labels, org_ids, hardlinks,
start_date, end_date, use_rhn_date, whole_errata):
dumper.XML_Dumper.__init__(self)
self.fm = FileMapper(outputdir)
self.mp = outputdir
self.pb_label = "Exporting: "
self.pb_length = 20 # progress bar length
self.pb_complete = " - Done!" # string that's printed when progress bar is done.
self.pb_char = "#" # the string used as each unit in the progress bar.
self.hardlinks = hardlinks
self.filename = None
self.outstream = None
self.start_date = start_date
self.end_date = end_date
self.use_rhn_date = use_rhn_date
self.whole_errata = whole_errata
if self.start_date:
dates = {'start_date': self.start_date,
'end_date': self.end_date, }
else:
dates = {}
# The queries here are a little weird. They grab just enough information
# to satisfy the dumper objects, which will use the information to look up
# any additional information that they need. That's why they don't seem to grab all
# of the information that you'd think would be necessary to sync stuff.
####CHANNEL INFO###
try:
query = """
select ch.id channel_id, label,
TO_CHAR(last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel ch
where ch.label = :label
"""
self.channel_query = rhnSQL.Statement(query)
ch_data = rhnSQL.prepare(self.channel_query)
comps_query = """
select relative_filename
from rhnChannelComps
where channel_id = :channel_id
and comps_type_id = 1
order by id desc
"""
modules_query = """
select relative_filename
from rhnChannelComps
where channel_id = :channel_id
and comps_type_id = 2
order by id desc
"""
self.channel_comps_query = rhnSQL.Statement(comps_query)
channel_comps_sth = rhnSQL.prepare(self.channel_comps_query)
self.channel_modules_query = rhnSQL.Statement(modules_query)
channel_modules_sth = rhnSQL.prepare(self.channel_modules_query)
# self.channel_ids contains the list of dictionaries that hold the channel information
# The keys are 'channel_id', 'label', and 'last_modified'.
self.channel_comps = {}
self.channel_modules = {}
self.set_exportable_orgs(org_ids)
# Channel_labels should be the list of channels passed into rhn-satellite-exporter by the user.
log2stdout(1, "Gathering channel info...")
for ids in channel_labels:
ch_data.execute(label=ids)
ch_info = ch_data.fetchall_dict()
if not ch_info:
raise ISSError("Error: Channel %s not found." % ids, "")
self.channel_ids = self.channel_ids + ch_info
channel_comps_sth.execute(channel_id=ch_info[0]['channel_id'])
comps_info = channel_comps_sth.fetchone_dict()
channel_modules_sth.execute(channel_id=ch_info[0]['channel_id'])
modules_info = channel_modules_sth.fetchone_dict()
if comps_info is not None:
self.channel_comps[ch_info[0]['channel_id']] = comps_info['relative_filename']
if modules_info is not None:
self.channel_modules[ch_info[0]['channel_id']] = modules_info['relative_filename']
# For list of channel families, we want to also list those relevant for channels
# that are already on disk, so that we do not lose those families with
# "incremental" dumps. So we will gather list of channel ids for channels already
# in dump.
channel_labels_for_families = self.fm.filemap['channels'].list()
print("Appending channels %s" % (channel_labels_for_families))
for ids in channel_labels_for_families:
ch_data.execute(label=ids)
ch_info = ch_data.fetchall_dict()
if ch_info:
self.channel_ids_for_families = self.channel_ids_for_families + ch_info
except ISSError:
# Don't want calls to sys.exit to show up as a "bad" error.
raise
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught while getting channel info." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
###BINARY RPM INFO###
try:
if self.whole_errata and self.start_date:
query = """ select rcp.package_id id, rp.path path
from rhnChannelPackage rcp, rhnPackage rp
left join rhnErrataPackage rep on rp.id = rep.package_id
left join rhnErrata re on rep.errata_id = re.id
where rcp.package_id = rp.id
and rcp.channel_id = :channel_id
"""
else:
query = """
select rcp.package_id id, rp.path path
from rhnChannelPackage rcp, rhnPackage rp
where rcp.package_id = rp.id
and rcp.channel_id = :channel_id
"""
if self.start_date:
if self.whole_errata:
if self.use_rhn_date:
query += """ and
((re.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and re.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
) or (rep.package_id is NULL
and rp.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and rp.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS'))
)
"""
else:
query += """ and
((re.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and re.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
) or (rep.package_id is NULL
and rcp.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and rcp.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS'))
)
"""
elif self.use_rhn_date:
query += """
and rp.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and rp.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
"""
else:
query += """
and rcp.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and rcp.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
"""
self.brpm_query = rhnSQL.Statement(query)
brpm_data = rhnSQL.prepare(self.brpm_query)
# self.brpms is a list of binary rpm info. It is a list of dictionaries, where each dictionary
# has 'id' and 'path' as the keys.
self.brpms = []
log2stdout(1, "Gathering binary RPM info...")
for ch in self.channel_ids:
brpm_data.execute(channel_id=ch['channel_id'], **dates)
self.brpms = self.brpms + (brpm_data.fetchall_dict() or [])
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught while getting binary rpm info." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
###PACKAGE INFO###
# This will grab channel package information for a given channel.
try:
if self.whole_errata and self.start_date:
query = """
select rp.id package_id,
TO_CHAR(rp.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannelPackage rcp, rhnPackage rp
left join rhnErrataPackage rep on rp.id = rep.package_id
left join rhnErrata re on rep.errata_id = re.id
where rcp.channel_id = :channel_id
and rcp.package_id = rp.id
"""
else:
query = """
select rp.id package_id,
TO_CHAR(rp.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnPackage rp, rhnChannelPackage rcp
where rcp.channel_id = :channel_id
and rcp.package_id = rp.id
"""
if self.start_date:
if self.whole_errata:
if self.use_rhn_date:
query += """ and
((re.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and re.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
) or (rep.package_id is NULL
and rp.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and rp.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS'))
)
"""
else:
query += """ and
((re.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and re.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
) or (rep.package_id is NULL
and rcp.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and rcp.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS'))
)
"""
elif self.use_rhn_date:
query += """
and rp.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and rp.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
"""
else:
query += """
and (rcp.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and rcp.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS'))
"""
self.package_query = rhnSQL.Statement(query)
package_data = rhnSQL.prepare(self.package_query)
# self.pkg_info will be a list of dictionaries containing channel package information.
# The keys are 'package_id' and 'last_modified'.
self.pkg_info = []
# This fills in the pkg_info list with channel package information from the channels in
# self.channel_ids.
log2stdout(1, "Gathering package info...")
for channel_id in self.channel_ids:
package_data.execute(channel_id=channel_id['channel_id'], **dates)
a_package = package_data.fetchall_dict() or []
# Don't bother placing None into self.pkg_info.
if a_package:
self.pkg_info = self.pkg_info + a_package
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught while getting package info." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
###SOURCE PACKAGE INFO###
try:
query = """
select ps.id package_id,
TO_CHAR(ps.last_modified,'YYYYMMDDHH24MISS') last_modified,
ps.source_rpm_id source_rpm_id
from rhnPackageSource ps
"""
if self.start_date:
if self.whole_errata:
query += """
left join rhnErrataFilePackageSource refps on refps.package_id = ps.id
left join rhnErrataFile ref on refps.errata_file_id = ref.id
left join rhnErrata re on ref.errata_id = re.id
"""
if self.use_rhn_date:
query += """ and
((re.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and re.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
) or (refps.package_id is NULL
and ps.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and ps.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS'))
)
"""
else:
query += """ and
((re.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and re.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
) or (refps.package_id is NULL
and ps.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and ps.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS'))
)
"""
elif self.use_rhn_date:
query += """
where ps.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and ps.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
"""
else:
query += """
where ps.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and ps.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
"""
self.source_package_query = rhnSQL.Statement(query)
source_package_data = rhnSQL.prepare(self.source_package_query)
source_package_data.execute(**dates)
# self.src_pkg_info is a list of dictionaries containing the source package information.
# The keys for each dictionary are 'package_id', 'last_modified', and 'source_rpm_id'.
self.src_pkg_info = source_package_data.fetchall_dict() or []
# Again, don't bother placing None into the list.
if not self.src_pkg_info:
self.src_pkg_info = []
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught while getting source package info." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
###ERRATA INFO###
try:
query = """
select e.id errata_id,
TO_CHAR(e.last_modified,'YYYYMMDDHH24MISS') last_modified,
e.advisory_name "advisory-name"
from rhnChannelErrata ce, rhnErrata e
where ce.channel_id = :channel_id
and ce.errata_id = e.id
"""
if self.start_date:
if self.use_rhn_date:
query += """
and e.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and e.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
"""
else:
query += """
and ce.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and ce.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
"""
self.errata_query = rhnSQL.Statement(query)
errata_data = rhnSQL.prepare(self.errata_query)
# self.errata_info will be a list of dictionaries containing errata info for the channels
# that the user listed. The keys are 'errata_id' and 'last_modified'.
self.errata_info = []
log2stdout(1, "Gathering errata info...")
for channel_id in self.channel_ids:
errata_data.execute(channel_id=channel_id['channel_id'], **dates)
an_errata = errata_data.fetchall_dict() or []
if an_errata:
self.errata_info = self.errata_info + an_errata
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught while getting errata info." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
###KICKSTART DATA/TREES INFO###
try:
query = """
select kt.id kstree_id, kt.label kickstart_label,
TO_CHAR(kt.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnKickstartableTree kt
where kt.channel_id = :channel_id
"""
if self.start_date:
if self.use_rhn_date:
query += """
and kt.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and kt.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
and kt.org_id is Null
"""
else:
query += """
and kt.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and kt.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
and kt.org_id is Null
"""
self.kickstart_trees_query = rhnSQL.Statement(query)
kickstart_data = rhnSQL.prepare(self.kickstart_trees_query)
self.kickstart_trees = []
log2stdout(1, "Gathering kickstart data...")
for channel_id in self.channel_ids:
kickstart_data.execute(channel_id=channel_id['channel_id'],
**dates)
a_tree = kickstart_data.fetchall_dict() or []
if a_tree:
self.kickstart_trees = self.kickstart_trees + a_tree
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught while getting kickstart data info." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
###KICKSTART FILES INFO###
try:
query = """
select rktf.relative_filename "relative-path",
c.checksum_type "checksum-type", c.checksum,
rktf.file_size "file-size",
TO_CHAR(rktf.last_modified, 'YYYYMMDDHH24MISS') "last-modified",
rkt.base_path "base-path",
rkt.label "label",
TO_CHAR(rkt.modified, 'YYYYMMDDHH24MISS') "modified"
from rhnKSTreeFile rktf, rhnKickstartableTree rkt,
rhnChecksumView c
where rktf.kstree_id = :kstree_id
and rkt.id = rktf.kstree_id
and rktf.checksum_id = c.id
"""
if self.start_date:
if self.use_rhn_date:
query += """
and rkt.last_modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and rkt.last_modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
"""
else:
query += """
and rkt.modified >= TO_TIMESTAMP(:start_date, 'YYYYMMDDHH24MISS')
and rkt.modified <= TO_TIMESTAMP(:end_date, 'YYYYMMDDHH24MISS')
"""
self.kickstart_files_query = rhnSQL.Statement(query)
kickstart_files = rhnSQL.prepare(self.kickstart_files_query)
self.kickstart_files = []
log2stdout(1, "Gathering kickstart files info...")
for kstree in self.kickstart_trees:
kickstart_files.execute(kstree_id=kstree['kstree_id'], **dates)
a_file = kickstart_files.fetchall_dict() or []
if a_file:
self.kickstart_files = self.kickstart_files + a_file
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught while getting kickstart files info." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
# The close method overrides the parent classes close method. This implementation
# closes the self.outstream, which is an addition defined in this subclass.
# set_filename and _get_xml_writer for more info.
def close(self):
self.outstream.close()
# This is an addition that allows the caller to set the filename for the output stream.
def set_filename(self, filename):
self.filename = filename
# This method overrides the parent class's version of this method. This version allows the output stream to
# be a file, which should have been set prior to this via the set_filename method.
# TODO: Add error-checking. Either give self.outstream a sane default or have it throw an error if it hasn't
# been set yet.
def _get_xml_writer(self):
self.outstream = open(self.filename, "w")
return xmlWriter.XMLWriter(stream=self.outstream)
# The dump_* methods aren't really overrides because they don't preserve the method
# signature, but they are meant as replacements for the methods defined in the base
# class that have the same name. They will set up the file for the dump, collect info
# necessary for the dumps to take place, and then call the base class version of the
# method to do the actual dumping.
def _dump_simple(self, filename, dump_func, startmsg, endmsg, exceptmsg):
try:
print("\n")
log2stdout(1, startmsg)
pb = progress_bar.ProgressBar(self.pb_label,
self.pb_complete,
1,
self.pb_length,
self.pb_char)
pb.printAll(1)
self.set_filename(filename)
dump_func(self)
pb.addTo(1)
pb.printIncrement()
pb.printComplete()
log2stdout(4, endmsg % filename)
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError(exceptmsg % e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
def dump_arches(self, rpm_arch_type_only=0):
self._dump_simple(self.fm.getArchesFile(), dumper.XML_Dumper.dump_arches,
"Exporting arches...",
"Arches exported to %s",
"%s caught in dump_arches.")
# This dumps arches_extra
def dump_server_group_type_server_arches(self, rpm_arch_type_only=0, virt_filter=0):
self._dump_simple(self.fm.getArchesExtraFile(),
dumper.XML_Dumper.dump_server_group_type_server_arches,
"Exporting arches extra...",
"Arches Extra exported to %s",
"%s caught in dump_server_group_type_server_arches.")
def dump_blacklist_obsoletes(self):
self._dump_simple(self.fm.getBlacklistsFile(),
dumper.XML_Dumper.dump_blacklist_obsoletes,
"Exporting blacklists...",
"Blacklists exported to %s",
"%s caught in dump_blacklist_obsoletes.")
def dump_channel_families(self):
self._dump_simple(self.fm.getChannelFamiliesFile(),
dumper.XML_Dumper.dump_channel_families,
"Exporting channel families...",
"Channel Families exported to %s",
"%s caught in dump_channel_families.")
def dump_product_names(self):
self._dump_simple(self.fm.getProductNamesFile(),
dumper.XML_Dumper.dump_product_names,
"Exporting product names...",
"Product names exported to %s",
"%s caught in dump_product_names.")
def dump_orgs(self):
self._dump_simple(self.fm.getOrgsFile(),
dumper.XML_Dumper.dump_orgs,
"Exporting orgs...",
"Orgs exported to %s",
"%s caught in dump_orgs.")
def copy_repomd(self, repomds, channel, get_file_func):
if channel['channel_id'] in repomds:
full_filename = os.path.join(CFG.MOUNT_POINT, repomds[channel['channel_id']])
target_filename = get_file_func(channel['label'])
log2email(3, "Need to copy %s to %s" % (full_filename, target_filename))
if self.hardlinks:
os.link(full_filename, target_filename)
else:
shutil.copyfile(full_filename, target_filename)
def dump_channels(self, channel_labels=None, start_date=None, end_date=None,
use_rhn_date=True, whole_errata=False):
try:
print("\n")
log2stdout(1, "Exporting channel info...")
pb = progress_bar.ProgressBar(self.pb_label,
self.pb_complete,
len(self.channel_ids),
self.pb_length,
self.pb_char)
pb.printAll(1)
for channel in self.channel_ids:
self.set_filename(self.fm.getChannelsFile(channel['label']))
dumper.XML_Dumper.dump_channels(self, [channel],
self.start_date, self.end_date,
self.use_rhn_date, self.whole_errata)
log2email(4, "Channel: %s" % channel['label'])
log2email(5, "Channel exported to %s" % self.fm.getChannelsFile(channel['label']))
self.copy_repomd(self.channel_comps, channel, self.fm.getChannelCompsFile)
self.copy_repomd(self.channel_modules, channel, self.fm.getChannelModulesFile)
pb.addTo(1)
pb.printIncrement()
pb.printComplete()
log2stderr(3, "Number of channels exported: %s" % str(len(self.channel_ids)))
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught in dump_channels." % e.__class__.__name__,
tbout.getvalue()), sys.exc_info()[2])
def dump_channel_packages_short(self, channel_label=None, last_modified=None, filepath=None,
validate_channels=False, send_headers=False,
open_stream=True):
try:
print("\n")
for ch_id in self.channel_ids:
filepath = self.fm.getChannelPackageShortFile(ch_id['channel_id'])
self.set_filename(filepath)
dumper.XML_Dumper.dump_channel_packages_short(self, ch_id, ch_id['last_modified'], filepath)
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught in dump_channel_packages_short." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
def dump_packages(self, packages=None):
try:
print("\n")
log2stdout(1, "Exporting packages...")
pb = progress_bar.ProgressBar(self.pb_label,
self.pb_complete,
len(self.pkg_info),
self.pb_length,
self.pb_char)
pb.printAll(1)
for pkg_info in self.pkg_info:
package_name = "rhn-package-" + str(pkg_info['package_id'])
self.set_filename(self.fm.getPackagesFile(package_name))
dumper.XML_Dumper.dump_packages(self, [pkg_info])
log2email(4, "Package: %s" % package_name)
log2email(5, "Package exported to %s" % self.fm.getPackagesFile(package_name))
pb.addTo(1)
pb.printIncrement()
pb.printComplete()
log2stdout(3, "Number of packages exported: %s" % str(len(self.pkg_info)))
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught in dump_packages." % e.__class__.__name__,
tbout.getvalue()), sys.exc_info()[2])
def dump_packages_short(self, packages=None):
try:
print("\n")
log2stdout(1, "Exporting short packages...")
pb = progress_bar.ProgressBar(self.pb_label,
self.pb_complete,
len(self.pkg_info),
self.pb_length,
self.pb_char)
pb.printAll(1)
for pkg_info in self.pkg_info:
package_name = "rhn-package-" + str(pkg_info['package_id'])
self.set_filename(self.fm.getShortPackagesFile(package_name))
dumper.XML_Dumper.dump_packages_short(self, [pkg_info])
log2email(4, "Short Package: %s" % package_name)
log2email(5, "Short Package exported to %s" % package_name)
pb.addTo(1)
pb.printIncrement()
pb.printComplete()
log2stdout(3, "Number of short packages exported: %s" % str(len(self.pkg_info)))
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught in dump_packages_short." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
def dump_source_packages(self, packages=None):
try:
print("\n")
for pkg_info in self.src_pkg_info:
self.set_filename(self.fm.getSourcePackagesFile("rhn-source-package-" + str(pkg_info['package_id'])))
dumper.XML_Dumper.dump_source_packages(self, [pkg_info])
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught in dump_source_packages." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
def dump_errata(self, errata=None, verify_errata=False):
try:
print("\n")
log2stdout(1, "Exporting errata...")
pb = progress_bar.ProgressBar(self.pb_label,
self.pb_complete,
len(self.errata_info),
self.pb_length,
self.pb_char)
pb.printAll(1)
for errata_info in self.errata_info:
erratum_name = "rhn-erratum-" + str(errata_info['errata_id'])
self.set_filename(self.fm.getErrataFile(erratum_name))
dumper.XML_Dumper.dump_errata(self, [errata_info])
log2email(4, "Erratum: %s" % str(errata_info['advisory-name']))
log2email(5, "Erratum exported to %s" % self.fm.getErrataFile(erratum_name))
pb.addTo(1)
pb.printIncrement()
pb.printComplete()
log2stdout(3, "Number of errata exported: %s" % str(len(self.errata_info)))
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught in dump_errata." % e.__class__.__name__,
tbout.getvalue()), sys.exc_info()[2])
def dump_kickstart_data(self):
try:
print("\n")
log2stdout(1, "Exporting kickstart data...")
pb = progress_bar.ProgressBar(self.pb_label,
self.pb_complete,
len(self.kickstart_trees),
self.pb_length,
self.pb_char)
pb.printAll(1)
for kickstart_tree in self.kickstart_trees:
self.set_filename(self.fm.getKickstartTreeFile(kickstart_tree['kickstart_label'])) # , 'foo/bar'))
dumper.XML_Dumper.dump_kickstartable_trees(self, [kickstart_tree])
log2email(5, "KS Data: %s" % str(kickstart_tree['kickstart_label']))
pb.addTo(1)
pb.printIncrement()
pb.printComplete()
log2stdout(3, "Amount of kickstart data exported: %s" % str(len(self.kickstart_trees)))
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught in dump_kickstart_data." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
def dump_kickstart_files(self):
try:
print("\n")
log2stdout(1, "Exporting kickstart files...")
pb = progress_bar.ProgressBar(self.pb_label,
self.pb_complete,
len(self.kickstart_files),
self.pb_length,
self.pb_char)
pb.printAll(1)
for kickstart_file in self.kickstart_files:
# get the path to the kickstart files under the satellite's mount point
path_to_files = os.path.join(CFG.MOUNT_POINT,
kickstart_file['base-path'],
kickstart_file['relative-path'])
# Make sure the path actually exists
if not os.path.exists(path_to_files):
raise ISSError("Missing kickstart file under satellite mount-point: %s" % (path_to_files,), "")
# generate the path to the kickstart files under the export directory.
path_to_export_file = self.fm.getKickstartFileFile(
kickstart_file['label'],
kickstart_file['relative-path'])
#os.path.join(self.mp, kickstart_file['base-path'], kickstart_file['relative-path'])
if os.path.exists(path_to_export_file):
# already exists, skip ks file
continue
# Get the dirs to the file under the export directory.
dirs_to_file = os.path.split(path_to_export_file)[0]
# create the directory to the kickstart files under the export directory, if necessary.
if not os.path.exists(dirs_to_file):
os.makedirs(dirs_to_file)
try:
if self.hardlinks:
# Make hardlinks
try:
os.link(path_to_files, path_to_export_file)
except OSError:
pass
else:
# Copy file from satellite to export dir.
shutil.copyfile(path_to_files, path_to_export_file)
except IOError:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("Error: Error copying file: %s: %s" %
(path_to_files, e.__class__.__name__), tbout.getvalue()), sys.exc_info()[2])
log2email(5, "Kickstart File: %s" %
os.path.join(kickstart_file['base-path'],
kickstart_file['relative-path']))
pb.addTo(1)
pb.printIncrement()
pb.printComplete()
log2stdout(3, "Number of kickstart files exported: %s" % str(len(self.kickstart_files)))
except ISSError:
raise
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught in dump_kickstart_files." %
e.__class__.__name__, tbout.getvalue()), sys.exc_info()[2])
# RPM and SRPM dumping code
def dump_rpms(self):
try:
print("\n")
log2stdout(1, "Exporting binary RPMs...")
pb = progress_bar.ProgressBar(self.pb_label,
self.pb_complete,
len(self.brpms),
self.pb_length,
self.pb_char)
pb.printAll(1)
for rpm in self.brpms:
# generate path to the rpms under the mount point
path_to_rpm = diskImportLib.rpmsPath("rhn-package-%s" % str(rpm['id']), self.mp)
# get the dirs to the rpm
dirs_to_rpm = os.path.split(path_to_rpm)[0]
if (not rpm['path']):
raise ISSError("Error: Missing RPM under the satellite mount point. (Package id: %s)" %
rpm['id'], "")
# get the path to the rpm from under the satellite's mountpoint
satellite_path = os.path.join(CFG.MOUNT_POINT, rpm['path'])
if not os.path.exists(satellite_path):
raise ISSError("Error: Missing RPM under the satellite mount point: %s" % (satellite_path,), "")
# create the directory for the rpm, if necessary.
if not os.path.exists(dirs_to_rpm):
os.makedirs(dirs_to_rpm)
# check if the path to rpm hardlink already exists
if os.path.exists(path_to_rpm):
continue
try:
# copy the file to the path under the mountpoint.
if self.hardlinks:
os.link(satellite_path, path_to_rpm)
else:
shutil.copyfile(satellite_path, path_to_rpm)
except IOError:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("Error: Error copying file %s: %s" %
(os.path.join(CFG.MOUNT_POINT, rpm['path']), e.__class__.__name__),
tbout.getvalue()), sys.exc_info()[2])
except OSError:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("Error: Could not make hard link %s: %s (different filesystems?)" %
(os.path.join(CFG.MOUNT_POINT, rpm['path']), e.__class__.__name__),
tbout.getvalue()), sys.exc_info()[2])
log2email(5, "RPM: %s" % rpm['path'])
pb.addTo(1)
pb.printIncrement()
pb.printComplete()
log2stdout(3, "Number of RPMs exported: %s" % str(len(self.brpms)))
except ISSError:
raise
except Exception:
e = sys.exc_info()[1]
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
raise_with_tb(ISSError("%s caught in dump_rpms." % e.__class__.__name__,
tbout.getvalue()), sys.exc_info()[2])
def get_report():
body = dumpEMAIL_LOG()
return body
def print_report():
print("")
print("REPORT:")
report_string = get_report()
sys.stdout.write(str(report_string))
def sendMail():
# Send email summary
body = dumpEMAIL_LOG()
if body:
print("+++ sending log as an email +++")
headers = {
'Subject': 'Spacewalk Management Satellite Export report from %s' % os.uname()[1],
}
#sndr = CFG.get('traceback_mail', 'rhn-satellite')
sndr = 'rhn-satellite@%s' % os.uname()[1]
rhnMail.send(headers, body, sender=sndr)
else:
print("+++ email requested, but there is nothing to send +++")
def handle_error(message, traceback):
log2stderr(-1, "\n" + message)
log2email(-1, traceback)
class ExporterMain:
def __init__(self):
initCFG('server.iss')
# pylint: disable=E1101
self.options = UI()
self.action_deps = ActionDeps(self.options)
self.action_order, self.actions = self.action_deps.get_actions()
if self.options.debug_level:
debug_level = int(self.options.debug_level)
else:
debug_level = int(CFG.DEBUG)
CFG.set("TRACEBACK_MAIL", self.options.traceback_mail or CFG.TRACEBACK_MAIL)
CFG.set("DEBUG", debug_level)
CFG.set("ISSEMAIL", self.options.email)
initEMAIL_LOG()
# This was taken straight from satsync.py.
try:
rhnSQL.initDB()
except SQLConnectError:
print('SQLERROR: There was an error connecting to the Database.')
sys.exit(-1)
except (SQLError, SQLSchemaError):
e = sys.exc_info()[1]
# An SQL error is fatal... crash and burn
exitWithTraceback(e, 'SQL ERROR during xml processing', -1)
# This was cribbed from satsync.py.
if self.options.print_configuration:
CFG.show()
sys.exit(0)
if self.options.list_channels:
self.print_list_channels(self.list_channels())
sys.exit(0)
if self.options.list_orgs:
self.print_orgs(self.list_orgs())
sys.exit(0)
# From this point on everything should assume a list of channels, so it needs to be a list
# even if there's only one entry.
if self.options.all_channels:
channel_dict = self.list_channels()
self.options.channel = []
for pc in channel_dict:
self.options.channel.append(pc)
self.options.channel.extend(channel_dict[pc])
elif self.options.channel:
if not isinstance(self.options.channel, type([])):
self.options.channel = [self.options.channel]
else:
sys.stdout.write("--channel not included!\n")
sys.exit(0)
# Same as above but for orgs
if self.options.all_orgs:
orgs = self.list_orgs()
self.options.org = []
for org in orgs:
self.options.org.append(org['id'])
elif self.options.org:
if not type(self.options.org, type([])):
self.options.org = [self.options.org]
orgs = {}
for org in self.list_orgs():
orgs[org['name']] = str(org['id'])
using_orgs = []
for org in self.options.org:
# User might have specified org name or org id, try both
if org in list(orgs.values()): # ids
using_orgs.append(org)
elif org in list(orgs.keys()): # names
using_orgs.append(orgs[org])
else:
sys.stdout.write("Org not found: %s\n" % org)
exit(0)
self.options.org = using_orgs
else:
self.options.org = []
self.options.org = [str(x) for x in self.options.org]
# Since everything gets dumped to a directory it wouldn't make
# much sense if it wasn't required.
if self.options.dir:
self.isos_dir = os.path.join(self.options.dir, "satellite-isos")
self.outputdir = self.options.dir
else:
sys.stdout.write("--dir not included!\n")
sys.exit(0)
if self.options.use_sync_date and self.options.use_rhn_date:
sys.stderr.write("--use-rhn-date and --use-sync-date are mutually exclusive.\n")
sys.exit(1)
elif self.options.use_sync_date:
self.options.use_rhn_date = False
else:
self.options.use_rhn_date = True
if self.options.end_date and not self.options.start_date:
sys.stderr.write("--end-date must be used with --start-date.\n")
sys.exit(1)
if self.options.end_date and len(self.options.end_date) < 8:
sys.stdout.write(_("format of %s should be at least YYYYMMDD.\n") % '--end-date')
sys.exit(1)
if self.options.start_date and len(self.options.start_date) < 8:
sys.stdout.write(_("format of %s should be at least YYYYMMDD.\n") % '--start-date')
sys.exit(1)
if self.options.start_date:
if self.options.end_date is None:
self.end_date = time.strftime("%Y%m%d%H%M%S")
else:
self.end_date = self.options.end_date.ljust(14, '0')
self.start_date = self.options.start_date.ljust(14, '0')
print("start date limit: %s" % self.start_date)
print("end date limit: %s" % self.end_date)
else:
self.start_date = None
self.end_date = None
if self.start_date and self.options.whole_errata:
self.whole_errata = self.options.whole_errata
# verify mountpoint
if os.access(self.outputdir, os.F_OK | os.R_OK | os.W_OK):
if os.path.isdir(self.outputdir):
self.dumper = Dumper(self.outputdir,
self.options.channel,
self.options.org,
self.options.hard_links,
start_date=self.start_date,
end_date=self.end_date,
use_rhn_date=self.options.use_rhn_date,
whole_errata=self.options.whole_errata)
self.actionmap = {
'arches': {'dump': self.dumper.dump_arches},
'arches-extra': {'dump': self.dumper.dump_server_group_type_server_arches},
'blacklists': {'dump': self.dumper.dump_blacklist_obsoletes},
'channel-families': {'dump': self.dumper.dump_channel_families},
'channels': {'dump': self.dumper.dump_channels},
'packages': {'dump': self.dumper.dump_packages},
'short': {'dump': self.dumper.dump_packages_short},
#'channel-pkg-short' : {'dump' : self.dumper.dump_channel_packages_short},
#'source-packages' : {'dump' : self.dumper.dump_source_packages},
'errata': {'dump': self.dumper.dump_errata},
'kickstarts': {'dump': [self.dumper.dump_kickstart_data,
self.dumper.dump_kickstart_files]},
'rpms': {'dump': self.dumper.dump_rpms},
'orgs': {'dump': self.dumper.dump_orgs},
'productnames': {'dump': self.dumper.dump_product_names},
}
else:
print("The output directory is not a directory")
sys.exit(-1)
else:
print("can't access output directory")
sys.exit(-1)
@staticmethod
def list_channels():
""" return all available channels
the returned format is dictionary containing base_label as keys and value is list
of labels of child channels
"""
# The keys for channel_dict are the labels of the base channels.
# The values associated with each key is a list of the labels of
# the child channels whose parent channel is the key.
channel_dict = {}
# Grab some info on base channels. Base channels
# have parent_channel set to null.
base_channel_query = rhnSQL.Statement("""
select id, label
from rhnChannel
where parent_channel is null
""")
base_channel_data = rhnSQL.prepare(base_channel_query)
base_channel_data.execute()
base_channels = base_channel_data.fetchall_dict()
# Grab some info on child channels.
child_channel_query = rhnSQL.Statement("""
select id, label, parent_channel
from rhnChannel
where parent_channel = :id
""")
child_channel_data = rhnSQL.prepare(child_channel_query)
if base_channels:
for ch in base_channels:
base_label = ch['label']
base_id = ch['id']
# If the base channel isn't in channel_dict yet, create
# an empty list for it.
if not base_label in channel_dict:
channel_dict[base_label] = []
# grab the child channel information for this base channel.
child_channel_data.execute(id=base_id)
child_channels = child_channel_data.fetchall_dict()
# If the base channel has some child channels, add them
# to the list associated with the base channel in channel_dict.
# Organizing the labels this way makes it a lot easier to print
# out.
if child_channels:
for child in child_channels:
child_label = child['label']
channel_dict[base_label].append(child_label)
return channel_dict
@staticmethod
def print_list_channels(channel_dict):
""" channel_dict is dictionary containing base_label as keys and value is list
of labels of child channels
"""
if channel_dict:
# Print the legend.
print("Channel List:")
print("B = Base Channel")
print("C = Child Channel")
print("")
base_template = "B %s"
child_template = "C\t%s"
# Print channel information.
for pc in channel_dict.keys():
print(base_template % (pc,))
for cc in channel_dict[pc]:
print(child_template % (cc,))
print(" ")
else:
print("No Channels available for listing.")
@staticmethod
def list_orgs():
"""
Return a list of all orgs.
"""
org_query = rhnSQL.Statement("""
select id, name
from web_customer
""")
org_data = rhnSQL.prepare(org_query)
org_data.execute()
return org_data.fetchall_dict()
@staticmethod
def print_orgs(orgs):
if orgs:
print("Orgs available for export:")
for org in orgs:
print("Id: %s, Name: \'%s\'" % (org['id'], org['name']))
else:
print("No Orgs available for listing.")
def main(self):
# pylint: disable=E1101
try:
for action in self.action_order:
if self.actions[action] != 1:
continue
if not action in self.actionmap:
# If we get here there's a programming error. It means that self.action_order
# contains a action that isn't defined in self.actionmap.
sys.stderr.write("List of actions doesn't have %s.\n" % (action,))
continue
if isinstance(self.actionmap[action]['dump'], type([])):
for dmp in self.actionmap[action]['dump']:
dmp()
else:
self.actionmap[action]['dump']()
# Now Compress the dump data
if action == 'rpms':
continue
elif action == 'arches-extra':
action = 'arches'
elif action == 'short':
action = 'packages_short'
elif action == 'channel-families':
action = 'channel_families'
elif action == 'kickstarts':
action = 'kickstart_trees'
elif action == 'productnames':
action = 'product_names'
os_data_dir = os.path.join(self.outputdir, action)
if not os.path.exists(os_data_dir):
continue
for fpath, _dirs, files in os.walk(os_data_dir):
for f in files:
if f.endswith(".xml") or f.endswith(".yaml"):
filepath = os.path.join(fpath, f)
compress_file(filepath)
if self.options.make_isos:
#iso_output = os.path.join(self.isos_dir, self.dump_dir)
iso_output = self.isos_dir
if not os.path.exists(iso_output):
os.makedirs(iso_output)
iss_isos.create_isos(self.outputdir, iso_output,
"rhn-export", self.start_date, self.end_date,
iso_type=self.options.make_isos)
# Generate md5sum digest file for isos
if os.path.exists(iso_output):
f = open(os.path.join(iso_output, 'MD5SUM'), 'w')
for iso_file in os.listdir(iso_output):
if self.options.make_isos != "dvds" and iso_file != "MD5SUM":
md5_val = getFileChecksum('md5', (os.path.join(iso_output, iso_file)))
md5str = "%s %s\n" % (md5_val, iso_file)
f.write(md5str)
f.close()
if self.options.email:
sendMail()
if self.options.print_report:
print_report()
except SystemExit:
sys.exit(0)
except ISSError:
isserror = sys.exc_info()[1]
# I have the tb get generated in the functions that the the error occurred in to minimize
# the amount of extra crap that shows up in it.
tb = isserror.tb
msg = isserror.msg
handle_error(msg, tb)
if self.options.email:
sendMail()
if self.options.print_report:
print_report()
sys.exit(-1)
except Exception: # pylint: disable=E0012, W0703
e = sys.exc_info()[1]
# This should catch the vast majority of errors that aren't ISSErrors
tbout = cStringIO.StringIO()
Traceback(mail=0, ostream=tbout, with_locals=1)
msg = "Error: %s caught!" % e.__class__.__name__
handle_error(msg, tbout.getvalue())
if self.options.email:
sendMail()
if self.options.print_report:
print_report()
sys.exit(-1)
def compress_file(f):
"""
Gzip the given file and then remove the file.
"""
datafile = open(f, 'r')
gzipper = gzip.GzipFile(f + '.gz', 'w', 9)
gzipper.write(datafile.read())
gzipper.flush()
# close opened streams
gzipper.close()
datafile.close()
# removed the old file
os.unlink(f)
if __name__ == "__main__":
em = ExporterMain()
em.main()
|
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class perl_IO_Compress(test.test):
"""
Autotest module for testing basic functionality
of perl_IO_Compress
@author Madhuri Appana <maappana@in.ibm.com> ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./perl-IO-Compress.sh'], cwd="%s/perl_IO_Compress" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
import os, sys
dirname = os.path.dirname(sys.modules[__name__].__file__)
autotest_dir = os.path.abspath(os.path.join(dirname, "../../../"))
client_dir = os.path.join(autotest_dir, "client")
sys.path.insert(0, client_dir)
import setup_modules
sys.path.pop(0)
setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
|
XS_INST_NONE = 0
XS_INST_BOOT = (1 << 0)
XS_INST_LOAD = (1 << 1)
XS_POLICY_NONE = 0
XS_POLICY_ACM = (1 << 0)
ACM_LABEL_VM = (1 << 0)
ACM_LABEL_RES = (1 << 1)
XSERR_BASE = 0x1000
XSERR_SUCCESS = 0
XSERR_GENERAL_FAILURE = 1 + XSERR_BASE
XSERR_BAD_XML = 2 + XSERR_BASE # XML is wrong (not according to schema)
XSERR_XML_PROCESSING = 3 + XSERR_BASE
XSERR_POLICY_INCONSISTENT = 4 + XSERR_BASE # i.e., bootstrap name not a VM label
XSERR_FILE_ERROR = 5 + XSERR_BASE
XSERR_BAD_RESOURCE_FORMAT = 6 + XSERR_BASE # badly formatted resource
XSERR_BAD_LABEL_FORMAT = 7 + XSERR_BASE
XSERR_RESOURCE_NOT_LABELED = 8 + XSERR_BASE
XSERR_RESOURCE_ALREADY_LABELED = 9 + XSERR_BASE
XSERR_WRONG_POLICY_TYPE = 10 + XSERR_BASE
XSERR_BOOTPOLICY_INSTALLED = 11 + XSERR_BASE
XSERR_NO_DEFAULT_BOOT_TITLE = 12 + XSERR_BASE
XSERR_POLICY_LOAD_FAILED = 13 + XSERR_BASE
XSERR_POLICY_LOADED = 14 + XSERR_BASE
XSERR_POLICY_TYPE_UNSUPPORTED = 15 + XSERR_BASE
XSERR_BAD_CONFLICTSET = 16 + XSERR_BASE
XSERR_RESOURCE_IN_USE = 17 + XSERR_BASE
XSERR_BAD_POLICY_NAME = 18 + XSERR_BASE
XSERR_VERSION_PREVENTS_UPDATE = 19 + XSERR_BASE
XSERR_BAD_LABEL = 20 + XSERR_BASE
XSERR_VM_WRONG_STATE = 21 + XSERR_BASE
XSERR_POLICY_NOT_LOADED = 22 + XSERR_BASE
XSERR_RESOURCE_ACCESS = 23 + XSERR_BASE
XSERR_HV_OP_FAILED = 24 + XSERR_BASE
XSERR_BOOTPOLICY_INSTALL_ERROR = 25 + XSERR_BASE
XSERR_VM_NOT_AUTHORIZED = 26 + XSERR_BASE
XSERR_VM_IN_CONFLICT = 27 + XSERR_BASE
XSERR_POLICY_HAS_DUPLICATES = 28 + XSERR_BASE
XSERR_LAST = 28 + XSERR_BASE ## KEEP LAST
XSERR_MESSAGES = [
'',
'General Failure',
'XML is malformed',
'Error while processing XML',
'Policy has inconsistencies',
'A file access error occurred',
'The resource format is not valid',
'The label format is not valid',
'The resource is not labeld',
'The resource is already labeld',
'The policy type is wrong',
'The system boot policy is installed',
'Could not find the default boot title',
'Loading of the policy failed',
'The policy is loaded',
'The policy type is unsupported',
'There is a bad conflict set',
'The resource is in use',
'The policy has an invalid name',
'The version of the policy prevents an update',
'The label is bad',
'Operation not premittend - the VM is in the wrong state',
'The policy is not loaded',
'Error accessing resource',
'Operation failed in hypervisor',
'Boot policy installation error',
'VM is not authorized to run',
'VM label conflicts with another VM',
'Duplicate labels or types in policy'
]
def xserr2string(err):
if err == XSERR_SUCCESS:
return "Success"
if err >= XSERR_GENERAL_FAILURE and \
err <= XSERR_LAST:
return XSERR_MESSAGES[err - XSERR_BASE]
return "Unknown XSERR code '%s'." % (hex(err))
ACM_POLICY_ID = 'ACM'
INVALID_POLICY_PREFIX = 'INV_'
INVALID_SSIDREF = 0xFFFFFFFF
XS_INACCESSIBLE_LABEL = '__INACCESSIBLE__'
|
import wadis.node.model.saga as saga
from wadis.node.model.fake import categoryTypeDict
from django.db.models import Q
defaultList = {'id_substance':('exact','1000021')}
def makeQ(q, tuple):
defaultFlag = formatQ(q, tuple, True)
q &= Q(**{'id_%s_ds__status__exact' % tuple[0]:'public'})
if defaultFlag:
for field in defaultList:
q &= Q(**{field + '__' + defaultList[field][0]: defaultList[field][1]})
q &= Q(**{'id_%s_ds__composition__exact' % tuple[0]: 'Primary'})
return q
def formatQ(q, tuple, defaultFlag):
for k, c in enumerate(q.children):
if type(c) == Q:
defaultFlag = formatQ(c, tuple, defaultFlag)
else:
if (type(c[0]) == str and c[0][:c[0].rfind('__')] in defaultList) or (type(c[1]) == str and c[1][:c[1].rfind('__')] in defaultList):
defaultFlag = False
var1 = c[0] % tuple if type(c[0]) == str and c[0].count('%') == len(tuple) else c[0]
var2 = c[1] % tuple if type(c[1]) == str and c[1].count('%') == len(tuple) else c[1]
q.children[k] = (var1, var2)
return defaultFlag
def inchi2Id(*restrictionTuple):
restrictions = list(restrictionTuple)
variable, operator, values = restrictions[0], restrictions[1], restrictions[2:]
variable = 'inner.id_substance'
for index, value in enumerate(values):
if value == '(' or value == ')':
continue
value = value.replace('"', '').replace("'", '')
if value in saga.SubstanceDict.byInchi:
values[index] = str(saga.SubstanceDict.byInchi[value].id_substance)
else:
values[index] = '0'
restrictions = [variable, operator] + values
return restrictions
def inchiKey2Id(*restrictionTuple):
restrictions = list(restrictionTuple)
variable, operator, values = restrictions[0], restrictions[1], restrictions[2:]
variable = 'inner.id_substance'
for index, value in enumerate(values):
if value == '(' or value == ')':
continue
value = value.replace('"', '').replace("'", '')
if value in saga.SubstanceDict.byInchiKey:
values[index] = str(saga.SubstanceDict.byInchiKey[value].id_substance)
else:
values[index] = '0'
restrictions = [variable, operator] + values
return restrictions
def methodCategory2Type(*restrictionTuple):
restrictions = list(restrictionTuple)
variable, operator, values = restrictions[0], restrictions[1], restrictions[2:]
variable = 'inner.type'
for index, value in enumerate(values):
if value == '(' or value == ')':
continue
value = value.replace('"', '').replace("'", '')
if value in categoryTypeDict:
values[index] = str(categoryTypeDict[value])
else:
values[index] = str(-1)
restrictions = [variable, operator] + values
return restrictions
|
"""
Cooperative multitasking and asynchronous I/O using generators
multitask allows Python programs to use generators (a.k.a. coroutines)
to perform cooperative multitasking and asynchronous I/O.
Applications written using multitask consist of a set of cooperating
tasks that yield to a shared task manager whenever they perform a
(potentially) blocking operation, such as I/O on a socket or getting
data from a queue. The task manager temporarily suspends the task
(allowing other tasks to run in the meantime) and then restarts it
when the blocking operation is complete. Such an approach is suitable
for applications that would otherwise have to use select() and/or
multiple threads to achieve concurrency.
The functions and classes in the multitask module allow tasks to yield
for I/O operations on sockets and file descriptors, adding/removing
data to/from queues, or sleeping for a specified interval. When
yielding, a task can also specify a timeout. If the operation for
which the task yielded has not completed after the given number of
seconds, the task is restarted, and a Timeout exception is raised at
the point of yielding.
As a very simple example, here's how one could use multitask to allow
two unrelated tasks to run concurrently:
>>> def printer(message):
... while True:
... print message
... yield
...
>>> multitask.add(printer('hello'))
>>> multitask.add(printer('goodbye'))
>>> multitask.run()
hello
goodbye
hello
goodbye
hello
goodbye
[and so on ...]
For a more useful example, here's how one could implement a
multitasking server that can handle multiple concurrent client
connections:
def listener(sock):
while True:
conn, address = (yield multitask.accept(sock))
multitask.add(client_handler(conn))
def client_handler(sock):
while True:
request = (yield multitask.recv(sock, 1024))
if not request:
break
response = handle_request(request)
yield multitask.send(sock, response)
multitask.add(listener(sock))
multitask.run()
Tasks can also yield other tasks, which allows for composition of
tasks and reuse of existing multitasking code. A child task runs
until it either completes or raises an exception. To return output to
its parent, a child task raises StopIteration, passing the output
value(s) to the StopIteration constructor. An unhandled exception
raised within a child task is propagated to its parent. For example:
>>> def parent():
... print (yield return_none())
... print (yield return_one())
... print (yield return_many())
... try:
... yield raise_exception()
... except Exception, e:
... print 'caught exception: %s' % e
...
>>> def return_none():
... yield
... # do nothing
... # or return
... # or raise StopIteration
... # or raise StopIteration(None)
...
>>> def return_one():
... yield
... raise StopIteration(1)
...
>>> def return_many():
... yield
... raise StopIteration(2, 3) # or raise StopIteration((2, 3))
...
>>> def raise_exception():
... yield
... raise RuntimeError('foo')
...
>>> multitask.add(parent())
>>> multitask.run()
None
1
(2, 3)
caught exception: foo
"""
import collections
import errno
from functools import partial
import heapq
import os
import select
import sys
import time
import types
__author__ = 'Christopher Stawarz <cstawarz@csail.mit.edu>'
__version__ = '0.2.0'
__revision__ = int('$Revision: 121 $'.split()[1])
class Timeout(Exception):
'Raised in a yielding task when an operation times out'
pass
class _ChildTask(object):
def __init__(self, parent, task):
self.parent = parent
self.task = task
def send(self, value):
return self.task.send(value)
def throw(self, type, value=None, traceback=None):
return self.task.throw(type, value, traceback)
class YieldCondition(object):
"""
Base class for objects that are yielded by a task to the task
manager and specify the condition(s) under which the task should
be restarted. Only subclasses of this class are useful to
application code.
"""
def __init__(self, timeout=None):
"""
If timeout is None, the task will be suspended indefinitely
until the condition is met. Otherwise, if the condition is
not met within timeout seconds, a Timeout exception will be
raised in the yielding task.
"""
self.task = None
self.handle_expiration = None
if timeout is None:
self.expiration = None
else:
self.expiration = time.time() + float(timeout)
def _expires(self):
return (self.expiration is not None)
class _SleepDelay(YieldCondition):
def __init__(self, seconds):
seconds = float(seconds)
if seconds <= 0.0:
raise ValueError("'seconds' must be greater than 0")
super(_SleepDelay, self).__init__(seconds)
def sleep(seconds):
"""
A task that yields the result of this function will be resumed
after the specified number of seconds have elapsed. For example:
while too_early():
yield sleep(5) # Sleep for five seconds
do_something() # Done sleeping; get back to work
"""
return _SleepDelay(seconds)
class FDReady(YieldCondition):
"""
A task that yields an instance of this class will be suspended
until a specified file descriptor is ready for I/O.
"""
def __init__(self, fd, read=False, write=False, exc=False, timeout=None):
"""
Resume the yielding task when fd is ready for reading,
writing, and/or "exceptional" condition handling. fd can be
any object accepted by select.select() (meaning an integer or
an object with a fileno() method that returns an integer).
Any exception raised by select() due to fd will be re-raised
in the yielding task.
If timeout is not None, a Timeout exception will be raised in
the yielding task if fd is not ready after timeout seconds
have elapsed.
"""
super(FDReady, self).__init__(timeout)
self.fd = (fd if _is_file_descriptor(fd) else fd.fileno())
if not (read or write or exc):
raise ValueError("'read', 'write', and 'exc' cannot all be false")
self.read = read
self.write = write
self.exc = exc
def fileno(self):
'Return the file descriptor on which the yielding task is waiting'
return self.fd
def _add_to_fdsets(self, read_fds, write_fds, exc_fds):
for add, fdset in ((self.read, read_fds),
(self.write, write_fds),
(self.exc, exc_fds)):
if add:
fdset.add(self)
def _remove_from_fdsets(self, read_fds, write_fds, exc_fds):
for fdset in (read_fds, write_fds, exc_fds):
fdset.discard(self)
def _is_file_descriptor(fd):
return isinstance(fd, (int, long))
def readable(fd, timeout=None):
"""
A task that yields the result of this function will be resumed
when fd is readable. If timeout is not None, a Timeout exception
will be raised in the yielding task if fd is not readable after
timeout seconds have elapsed. For example:
try:
yield readable(sock, timeout=5)
data = sock.recv(1024)
except Timeout:
# No data after 5 seconds
"""
return FDReady(fd, read=True, timeout=timeout)
def writable(fd, timeout=None):
"""
A task that yields the result of this function will be resumed
when fd is writable. If timeout is not None, a Timeout exception
will be raised in the yielding task if fd is not writable after
timeout seconds have elapsed. For example:
try:
yield writable(sock, timeout=5)
nsent = sock.send(data)
except Timeout:
# Can't send after 5 seconds
"""
return FDReady(fd, write=True, timeout=timeout)
class FDAction(FDReady):
"""
A task that yields an instance of this class will be suspended
until an I/O operation on a specified file descriptor is complete.
"""
def __init__(self, fd, func, args=(), kwargs={}, read=False, write=False,
exc=False):
"""
Resume the yielding task when fd is ready for reading,
writing, and/or "exceptional" condition handling. fd can be
any object accepted by select.select() (meaning an integer or
an object with a fileno() method that returns an integer).
Any exception raised by select() due to fd will be re-raised
in the yielding task.
The value of the yield expression will be the result of
calling func with the specified args and kwargs (which
presumably performs a read, write, or other I/O operation on
fd). If func raises an exception, it will be re-raised in the
yielding task. Thus, FDAction is really just a convenient
subclass of FDReady that requests that the task manager
perform an I/O operation on the calling task's behalf.
If kwargs contains a timeout argument that is not None, a
Timeout exception will be raised in the yielding task if fd is
not ready after timeout seconds have elapsed.
"""
timeout = kwargs.pop('timeout', None)
super(FDAction, self).__init__(fd, read, write, exc, timeout)
self.func = func
self.args = args
self.kwargs = kwargs
def _eval(self):
return self.func(*(self.args), **(self.kwargs))
def read(fd, *args, **kwargs):
"""
A task that yields the result of this function will be resumed
when fd is readable, and the value of the yield expression will be
the result of reading from fd. If a timeout keyword is given and
is not None, a Timeout exception will be raised in the yielding
task if fd is not readable after timeout seconds have elapsed.
Other arguments will be passed to the read function (os.read() if
fd is an integer, fd.read() otherwise). For example:
try:
data = (yield read(fd, 1024, timeout=5))
except Timeout:
# No data after 5 seconds
"""
func = (partial(os.read, fd) if _is_file_descriptor(fd) else fd.read)
return FDAction(fd, func, args, kwargs, read=True)
def readline(fd, *args, **kwargs):
"""
A task that yields the result of this function will be resumed
when fd is readable, and the value of the yield expression will be
the result of reading a line from fd. If a timeout keyword is
given and is not None, a Timeout exception will be raised in the
yielding task if fd is not readable after timeout seconds have
elapsed. Other arguments will be passed to fd.readline(). For
example:
try:
data = (yield readline(fd, timeout=5))
except Timeout:
# No data after 5 seconds
"""
return FDAction(fd, fd.readline, args, kwargs, read=True)
def write(fd, *args, **kwargs):
"""
A task that yields the result of this function will be resumed
when fd is writable, and the value of the yield expression will be
the result of writing to fd. If a timeout keyword is given and is
not None, a Timeout exception will be raised in the yielding task
if fd is not writable after timeout seconds have elapsed. Other
arguments will be passed to the write function (os.write() if fd
is an integer, fd.write() otherwise). For example:
try:
nbytes = (yield write(fd, data, timeout=5))
except Timeout:
# Can't write after 5 seconds
"""
func = (partial(os.write, fd) if _is_file_descriptor(fd) else fd.write)
return FDAction(fd, func, args, kwargs, write=True)
def accept(sock, *args, **kwargs):
"""
A task that yields the result of this function will be resumed
when sock is readable, and the value of the yield expression will
be the result of accepting a new connection on sock. If a timeout
keyword is given and is not None, a Timeout exception will be
raised in the yielding task if sock is not readable after timeout
seconds have elapsed. Other arguments will be passed to
sock.accept(). For example:
try:
conn, address = (yield accept(sock, timeout=5))
except Timeout:
# No connections after 5 seconds
"""
return FDAction(sock, sock.accept, args, kwargs, read=True)
def recv(sock, *args, **kwargs):
"""
A task that yields the result of this function will be resumed
when sock is readable, and the value of the yield expression will
be the result of receiving from sock. If a timeout keyword is
given and is not None, a Timeout exception will be raised in the
yielding task if sock is not readable after timeout seconds have
elapsed. Other arguments will be passed to sock.recv(). For
example:
try:
data = (yield recv(sock, 1024, timeout=5))
except Timeout:
# No data after 5 seconds
"""
return FDAction(sock, sock.recv, args, kwargs, read=True)
def recvfrom(sock, *args, **kwargs):
"""
A task that yields the result of this function will be resumed
when sock is readable, and the value of the yield expression will
be the result of receiving from sock. If a timeout keyword is
given and is not None, a Timeout exception will be raised in the
yielding task if sock is not readable after timeout seconds have
elapsed. Other arguments will be passed to sock.recvfrom(). For
example:
try:
data, address = (yield recvfrom(sock, 1024, timeout=5))
except Timeout:
# No data after 5 seconds
"""
return FDAction(sock, sock.recvfrom, args, kwargs, read=True)
def send(sock, *args, **kwargs):
"""
A task that yields the result of this function will be resumed
when sock is writable, and the value of the yield expression will
be the result of sending to sock. If a timeout keyword is given
and is not None, a Timeout exception will be raised in the
yielding task if sock is not writable after timeout seconds have
elapsed. Other arguments will be passed to the sock.send(). For
example:
try:
nsent = (yield send(sock, data, timeout=5))
except Timeout:
# Can't send after 5 seconds
"""
return FDAction(sock, sock.send, args, kwargs, write=True)
def sendto(sock, *args, **kwargs):
"""
A task that yields the result of this function will be resumed
when sock is writable, and the value of the yield expression will
be the result of sending to sock. If a timeout keyword is given
and is not None, a Timeout exception will be raised in the
yielding task if sock is not writable after timeout seconds have
elapsed. Other arguments will be passed to the sock.sendto().
For example:
try:
nsent = (yield sendto(sock, data, address, timeout=5))
except Timeout:
# Can't send after 5 seconds
"""
return FDAction(sock, sock.sendto, args, kwargs, write=True)
class Queue(object):
"""
A multi-producer, multi-consumer FIFO queue (similar to
Queue.Queue) that can be used for exchanging data between tasks
"""
def __init__(self, contents=(), maxsize=0):
"""
Create a new Queue instance. contents is a sequence (empty by
default) containing the initial contents of the queue. If
maxsize is greater than 0, the queue will hold a maximum of
maxsize items, and put() will block until space is available
in the queue.
"""
self.maxsize = int(maxsize)
self._queue = collections.deque(contents)
def __len__(self):
'Return the number of items in the queue'
return len(self._queue)
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
def empty(self):
'Return True is the queue is empty, False otherwise'
return (len(self) == 0)
def full(self):
'Return True is the queue is full, False otherwise'
return ((len(self) >= self.maxsize) if (self.maxsize > 0) else False)
def get(self, timeout=None):
"""
A task that yields the result of this method will be resumed
when an item is available in the queue, and the value of the
yield expression will be the item. If timeout is not None, a
Timeout exception will be raised in the yielding task if an
item is not available after timeout seconds have elapsed. For
example:
try:
item = (yield queue.get(timeout=5))
except Timeout:
# No item available after 5 seconds
"""
return _QueueAction(self, timeout=timeout)
def put(self, item, timeout=None):
"""
A task that yields the result of this method will be resumed
when item has been added to the queue. If timeout is not
None, a Timeout exception will be raised in the yielding task
if no space is available after timeout seconds have elapsed.
For example:
try:
yield queue.put(item, timeout=5)
except Timeout:
# No space available after 5 seconds
"""
return _QueueAction(self, item, timeout=timeout)
class _QueueAction(YieldCondition):
NO_ITEM = object()
def __init__(self, queue, item=NO_ITEM, timeout=None):
super(_QueueAction, self).__init__(timeout)
if not isinstance(queue, Queue):
raise TypeError("'queue' must be a Queue instance")
self.queue = queue
self.item = item
class SmartQueue(object):
"""
A multi-producer, multi-consumer FIFO queue (similar to
Queue.Queue) that can be used for exchanging data between tasks.
The difference with Queue is that this implements filtering criteria
on get and allows multiple get to be signalled for the same put.
On the downside, this uses list instead of deque and has lower
performance.
"""
def __init__(self, contents=(), maxsize=0):
"""
Create a new Queue instance. contents is a sequence (empty by
default) containing the initial contents of the queue. If
maxsize is greater than 0, the queue will hold a maximum of
maxsize items, and put() will block until space is available
in the queue.
"""
self.maxsize = int(maxsize)
self._pending = list(contents)
def __len__(self):
'Return the number of items in the queue'
return len(self._pending)
def _get(self, criteria=None):
#self._pending = filter(lambda x: x[1]<=now, self._pending) # remove expired ones
if criteria:
found = filter(lambda x: criteria(x), self._pending) # check any matching criteria
if found:
self._pending.remove(found[0])
return found[0]
else:
return None
else:
return self._pending.pop(0) if self._pending else None
def _put(self, item):
self._pending.append(item)
def empty(self):
'Return True is the queue is empty, False otherwise'
return (len(self) == 0)
def full(self):
'Return True is the queue is full, False otherwise'
return ((len(self) >= self.maxsize) if (self.maxsize > 0) else False)
def get(self, timeout=None, criteria=None):
"""
A task that yields the result of this method will be resumed
when an item is available in the queue and the item matches the
given criteria (a function, usually lambda), and the value of the
yield expression will be the item. If timeout is not None, a
Timeout exception will be raised in the yielding task if an
item is not available after timeout seconds have elapsed. For
example:
try:
item = (yield queue.get(timeout=5, criteria=lambda x: x.name='kundan'))
except Timeout:
# No item available after 5 seconds
"""
return _SmartQueueAction(self, timeout=timeout, criteria=criteria)
def put(self, item, timeout=None):
"""
A task that yields the result of this method will be resumed
when item has been added to the queue. If timeout is not
None, a Timeout exception will be raised in the yielding task
if no space is available after timeout seconds have elapsed.
TODO: Otherwise if space is available, the timeout specifies how
long to keep the item in the queue before discarding it if it
is not fetched in a get. In this case it doesnot throw exception.
For example:
try:
yield queue.put(item, timeout=5)
except Timeout:
# No space available after 5 seconds
"""
return _SmartQueueAction(self, item, timeout=timeout)
class _SmartQueueAction(YieldCondition):
NO_ITEM = object()
def __init__(self, queue, item=NO_ITEM, timeout=None, criteria=None):
super(_SmartQueueAction, self).__init__(timeout)
if not isinstance(queue, SmartQueue):
raise TypeError("'queue' must be a SmartQueue instance")
self.queue = queue
self.item = item
self.criteria = criteria
self.expires = (timeout is not None) and (time.time() + timeout) or 0
class TaskManager(object):
"""
Engine for running a set of cooperatively-multitasking tasks
within a single Python thread
"""
def __init__(self):
"""
Create a new TaskManager instance. Generally, there will only
be one of these per Python process. If you want to run two
existing instances simultaneously, merge them first, then run
one or the other.
"""
self._queue = collections.deque()
self._read_waits = set()
self._write_waits = set()
self._exc_waits = set()
self._queue_waits = collections.defaultdict(self._double_deque)
self._timeouts = []
@staticmethod
def _double_deque():
return (collections.deque(), collections.deque())
def merge(self, other):
"""
Merge this TaskManager with another. After the merge, the two
objects share the same (merged) internal data structures, so
either can be used to manage the combined task set.
"""
if not isinstance(other, TaskManager):
raise TypeError("'other' must be a TaskManager instance")
# Merge the data structures
self._queue.extend(other._queue)
self._read_waits |= other._read_waits
self._write_waits |= other._write_waits
self._exc_waits |= other._exc_waits
self._queue_waits.update(other._queue_waits)
self._timeouts.extend(other._timeouts)
heapq.heapify(self._timeouts)
# Make other reference the merged data structures. This is
# necessary because other's tasks may reference and use other
# (e.g. to add a new task in response to an event).
other._queue = self._queue
other._read_waits = self._read_waits
other._write_waits = self._write_waits
other._exc_waits = self._exc_waits
other._queue_waits = self._queue_waits
other._timeouts = self._timeouts
def add(self, task):
'Add a new task (i.e. a generator instance) to the run queue'
if not isinstance(task, types.GeneratorType):
raise TypeError("'task' must be a generator")
self._enqueue(task)
def _enqueue(self, task, input=None, exc_info=()):
self._queue.append((task, input, exc_info))
def run(self):
"""
Call run_next() repeatedly until there are no tasks that are
currently runnable, waiting for I/O, or waiting to time out.
Note that this method can block indefinitely (e.g. if there
are only I/O waits and no timeouts). If this is unacceptable,
use run_next() instead.
"""
while self.has_runnable() or self.has_io_waits() or self.has_timeouts():
self.run_next()
def has_runnable(self):
"""
Return True is there are runnable tasks in the queue, False
otherwise
"""
return bool(self._queue)
def has_io_waits(self):
"""
Return True is there are tasks waiting for I/O, False
otherwise
"""
return bool(self._read_waits or self._write_waits or self._exc_waits)
def has_timeouts(self):
"""
Return True is there are tasks with pending timeouts, False
otherwise
"""
return bool(self._timeouts)
def run_next(self, timeout=None):
"""
Perform one iteration of the run cycle: check whether any
pending I/O operations can be performed, check whether any
timeouts have expired, then run all currently runnable tasks.
The timeout argument specifies the maximum time to wait for
some task to become runnable. If timeout is None and there
are no currently runnable tasks, but there are tasks waiting
to perform I/O or time out, then this method will block until
at least one of the waiting tasks becomes runnable. To
prevent this method from blocking indefinitely, use timeout to
specify the maximum number of seconds to wait.
If there are runnable tasks in the queue when run_next() is
called, then it will check for I/O readiness using a
non-blocking call to select() (i.e. a poll), and only
already-expired timeouts will be handled. This ensures both
that the task manager is never idle when tasks can be run and
that tasks waiting for I/O never starve.
"""
while self.has_io_waits():
if self._handle_io_waits(self._fix_run_timeout(timeout)) or self.has_runnable(): break
if self.has_timeouts():
self._handle_timeouts(self._fix_run_timeout(timeout))
# Run all tasks currently in the queue
#for dummy in xrange(len(self._queue)):
while len(self._queue) > 0:
task, input, exc_info = self._queue.popleft()
try:
if exc_info:
output = task.throw(*exc_info)
else:
output = task.send(input)
except StopIteration, e:
if isinstance(task, _ChildTask):
if not e.args:
output = None
elif len(e.args) == 1:
output = e.args[0]
else:
output = e.args
self._enqueue(task.parent, input=output)
except:
if isinstance(task, _ChildTask):
# Propagate exception to parent
self._enqueue(task.parent, exc_info=sys.exc_info())
else:
# No parent task, so just die
raise
else:
self._handle_task_output(task, output)
def _fix_run_timeout(self, timeout):
if self.has_runnable():
# Don't block if there are tasks in the queue
timeout = 0.0
elif self.has_timeouts():
# If there are timeouts, block only until the first expiration
expiration_timeout = max(0.0, self._timeouts[0][0] - time.time())
if (timeout is None) or (timeout > expiration_timeout):
timeout = expiration_timeout
return timeout
def _handle_io_waits(self, timeout):
# The error handling here is (mostly) borrowed from Twisted
try:
read_ready, write_ready, exc_ready = \
select.select(self._read_waits,
self._write_waits,
self._exc_waits,
timeout)
except (TypeError, ValueError):
self._remove_bad_file_descriptors()
return False
except (select.error, IOError), err:
if err[0] == errno.EINTR:
return False
elif ((err[0] == errno.EBADF) or
((sys.platform == 'win32') and
(err[0] == 10038))): # WSAENOTSOCK
self._remove_bad_file_descriptors()
return False
else:
# Not an error we can handle, so die
raise
else:
for fd in set(read_ready + write_ready + exc_ready):
try:
input = (fd._eval() if isinstance(fd, FDAction) else None)
self._enqueue(fd.task, input=input)
except:
self._enqueue(fd.task, exc_info=sys.exc_info())
fd._remove_from_fdsets(self._read_waits,
self._write_waits,
self._exc_waits)
if fd._expires():
self._remove_timeout(fd)
return True
def _remove_bad_file_descriptors(self):
for fd in (self._read_waits | self._write_waits | self._exc_waits):
try:
select.select([fd], [fd], [fd], 0.0)
except:
# TODO: do not enqueue the exception (socket.error) so that it does not crash
# when closing an already closed socket. See rtmplite issue #28
# self._enqueue(fd.task, exc_info=sys.exc_info())
fd._remove_from_fdsets(self._read_waits,
self._write_waits,
self._exc_waits)
if fd._expires():
self._remove_timeout(fd)
def _add_timeout(self, item, handler):
item.handle_expiration = handler
heapq.heappush(self._timeouts, (item.expiration, item))
def _remove_timeout(self, item):
self._timeouts.remove((item.expiration, item))
heapq.heapify(self._timeouts)
def _handle_timeouts(self, timeout):
if (not self.has_runnable()) and (timeout > 0.0):
time.sleep(timeout)
current_time = time.time()
while self._timeouts and (self._timeouts[0][0] <= current_time):
item = heapq.heappop(self._timeouts)[1]
if isinstance(item, _SleepDelay):
self._enqueue(item.task)
else:
self._enqueue(item.task, exc_info=(Timeout,))
item.handle_expiration()
def _handle_task_output(self, task, output):
if isinstance(output, types.GeneratorType):
self._enqueue(_ChildTask(task, output))
elif isinstance(output, YieldCondition):
output.task = task
if isinstance(output, _SleepDelay):
self._add_timeout(output, None)
elif isinstance(output, FDReady):
self._handle_fdready(task, output)
elif isinstance(output, _QueueAction):
self._handle_queue_action(task, output)
elif isinstance(output, _SmartQueueAction):
self._handle_smart_queue_action(task, output)
else:
# Return any other output as input and send task to
# end of queue
self._enqueue(task, input=output)
def _handle_fdready(self, task, output):
output._add_to_fdsets(self._read_waits,
self._write_waits,
self._exc_waits)
if output._expires():
self._add_timeout(output,
(lambda:
output._remove_from_fdsets(self._read_waits,
self._write_waits,
self._exc_waits)))
def _handle_queue_action(self, task, output):
get_waits, put_waits = self._queue_waits[output.queue]
if output.item is output.NO_ITEM:
# Action is a get
if output.queue.empty():
get_waits.append(output)
if output._expires():
self._add_timeout(output,
(lambda: get_waits.remove(output)))
else:
item = output.queue._get()
self._enqueue(task, input=item)
if put_waits:
action = put_waits.popleft()
output.queue._put(action.item)
self._enqueue(action.task)
if action._expires():
self._remove_timeout(action)
else:
# Action is a put
if output.queue.full():
put_waits.append(output)
if output._expires():
self._add_timeout(output,
(lambda: put_waits.remove(output)))
else:
output.queue._put(output.item)
self._enqueue(task)
if get_waits:
action = get_waits.popleft()
item = output.queue._get()
self._enqueue(action.task, input=item)
if action._expires():
self._remove_timeout(action)
def _handle_smart_queue_action(self, task, output):
get_waits, put_waits = self._queue_waits[output.queue]
if output.item is output.NO_ITEM:
# Action is a get
item = output.queue._get(criteria=output.criteria)
if item is None:
get_waits.append(output)
if output._expires():
self._add_timeout(output,
(lambda: get_waits.remove(output)))
else:
self._enqueue(task, input=item)
if put_waits:
action = put_waits.popleft()
output.queue._put(action.item)
self._enqueue(action.task)
if action._expires():
self._remove_timeout(action)
else:
# Action is a put
if output.queue.full():
put_waits.append(output)
if output._expires():
self._add_timeout(output,
(lambda: put_waits.remove(output)))
else:
output.queue._put(output.item)
self._enqueue(task)
if get_waits:
actions = []
for action in get_waits:
item = output.queue._get(criteria=action.criteria)
if item is not None:
actions.append((action, item))
for action,item in actions:
get_waits.remove(action)
self._enqueue(action.task, input=item)
if action._expires():
self._remove_timeout(action)
_default_task_manager = None
def get_default_task_manager():
'Return the default TaskManager instance'
global _default_task_manager
if _default_task_manager is None:
_default_task_manager = TaskManager()
return _default_task_manager
def add(task):
'Add a task to the default TaskManager instance'
get_default_task_manager().add(task)
def run():
'Run the default TaskManager instance'
get_default_task_manager().run()
if __name__ == '__main__':
if sys.platform == 'win32':
# Make sure WSAStartup() is called
import socket
def printer(name):
for i in xrange(1, 4):
print '%s:\t%d' % (name, i)
yield
t = TaskManager()
t.add(printer('first'))
t.add(printer('second'))
t.add(printer('third'))
queue = Queue()
def receiver():
print 'receiver started'
print 'receiver received: %s' % (yield queue.get())
print 'receiver finished'
def sender():
print 'sender started'
yield queue.put('from sender')
print 'sender finished'
def bad_descriptor():
print 'bad_descriptor running'
try:
yield readable(12)
except:
print 'exception in bad_descriptor:', sys.exc_info()[1]
def sleeper():
print 'sleeper started'
yield sleep(1)
print 'sleeper finished'
def timeout_immediately():
print 'timeout_immediately running'
try:
yield Queue().get(timeout=0)
except Timeout:
print 'timeout_immediately timed out'
t2 = TaskManager()
t2.add(receiver())
t2.add(bad_descriptor())
t2.add(sender())
t2.add(sleeper())
t2.add(timeout_immediately())
def parent():
print 'child returned: %s' % ((yield child()),)
try:
yield child(raise_exc=True)
except:
print 'exception in child:', sys.exc_info()[1]
def child(raise_exc=False):
yield
if raise_exc:
raise RuntimeError('foo')
raise StopIteration(1, 2, 3)
t3 = TaskManager()
t3.add(parent())
t.merge(t2)
t.merge(t3)
t.run()
assert not(t.has_runnable() or t.has_io_waits() or t.has_timeouts())
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "todolist.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
import k8s
import os
import re
import socket
import subprocess
import sys
import tempfile
def get_service_endpoint(url):
s = api.get(url)
return "%s:%u" % (s.spec.portalIP, s.spec.ports[0].port)
def resolve_values(t, x):
parameters = {p.name: p.get("value", None) for p in t.parameters}
return re.sub("\${([^}]+)}", lambda m: parameters[m.group(1)], x)
def system(cmd, check=True, **kwargs):
print >>sys.stderr, "+ " + cmd
if check:
subprocess.check_call(cmd, shell=True, **kwargs)
else:
subprocess.call(cmd, shell=True, **kwargs)
def download_referenced_images():
images = set()
for t in oapi.get("/namespaces/openshift/templates")._items:
for o in t.objects:
if o.kind == "DeploymentConfig":
# return container image names, unless triggered by imageChange
for c in o.spec.template.spec.containers:
for tr in o.spec.triggers:
if "imageChangeParams" in tr and \
c.name in tr.imageChangeParams.containerNames:
break
else:
images.add(resolve_values(t, c.image))
c.imagePullPolicy = "IfNotPresent"
t.kind = "Template"
t.apiVersion = "v1"
oapi.put("/namespaces/openshift/templates/" + t.metadata.name, t)
for i in images:
system("docker pull " + i)
def download_referenced_images_imagestreams(repo):
images = set()
istrmap = {}
for istr in oapi.get("/namespaces/openshift/imagestreams")._items:
for t in istr.spec.tags:
srctag = t.name
if "_from" in t and t._from.kind == "ImageStreamTag":
srctag = t._from.name
istrmap[istr.metadata.name + ":" + t.name] = istr.spec.dockerImageRepository + ":" + srctag
for t in oapi.get("/namespaces/openshift/templates")._items:
for o in t.objects:
if o.kind == "DeploymentConfig":
# return container images triggered by imageChange, if in
# openshift namespace
for tr in o.spec.triggers:
if "imageChangeParams" in tr:
oo = tr.imageChangeParams._from
if oo.kind == "ImageStreamTag" and "namespace" in oo \
and oo.namespace == "openshift":
images.add(istrmap[resolve_values(t, oo.name)])
elif o["kind"] == "BuildConfig":
# return builder images in openshift namespace
oo = o.spec.strategy.sourceStrategy._from
if oo.kind == "ImageStreamTag" and oo.namespace == "openshift":
images.add(istrmap[resolve_values(t, oo.name)])
for i in images:
newi = repo + "/" + i.split("/", 1)[1]
if i != newi:
if os.path.exists(i.split("/", 1)[1].split(":")[0]):
system("docker build -t " + newi + " " + i.split("/", 1)[1].split(":")[0])
system("docker push " + newi)
system("docker rmi " + newi)
system("docker rmi " + i, False)
else:
system("docker pull " + i)
system("docker tag " + i + " " + newi)
system("docker push " + newi)
system("docker rmi " + newi)
system("docker rmi " + i, False)
for im in oapi.get("/images")._items:
oapi.delete("/images/" + im.metadata.name)
for istr in oapi.get("/namespaces/openshift/imagestreams")._items:
istr.kind = "ImageStream"
istr.apiVersion = "v1"
istr.metadata = k8s.AttrDict({"name": istr.metadata.name,
"annotations": k8s.AttrDict({"openshift.io/image.insecureRepository": "true"})})
istr.spec.dockerImageRepository = repo + "/" + istr.spec.dockerImageRepository.split("/", 1)[1]
del istr.status
oapi.delete("/namespaces/openshift/imagestreams/" + istr.metadata.name)
oapi.post("/namespaces/openshift/imagestreams", istr)
def download_git_repos():
hostname = socket.gethostname()
uris = {}
for t in oapi.get("/namespaces/openshift/templates")._items:
for o in t.objects:
if o.kind == "BuildConfig":
uri = resolve_values(t, o.spec.source.git.uri)
if uri and not uri.startswith("git://" + hostname):
uris[uri] = "git://%s/%s" % (hostname,
uri.split("://", 1)[1])
for uri in uris:
print uri
root = "/var/lib/git/" + uri.split("://", 1)[1]
if not os.path.exists(root):
system("git clone --bare " + uri + " " + root)
system("chown -R nobody:nobody /var/lib/git")
for t in oapi.get("/namespaces/openshift/templates")._items:
for o in t.objects:
if o.kind == "BuildConfig":
m = re.match("^\${([^}]+)}$", o.spec.source.git.uri)
if not m:
raise Exception
for p in t.parameters:
if p.name == m.group(1) and "value" in p and \
not p.value.startswith("git://" + hostname):
p.value = uris[p.value]
if o.spec.strategy.type != "Source":
raise Exception
env = o.spec.strategy.sourceStrategy.get("env", [])
env = [x for x in env if x.name not in ["http_proxy", "https_proxy"]]
env.append(k8s.AttrDict({"name": "http_proxy",
"value": "http://%s:8080/" % hostname}))
env.append(k8s.AttrDict({"name": "https_proxy",
"value": "http://%s:8080/" % hostname}))
o.spec.strategy.sourceStrategy.env = env
t.kind = "Template"
t.apiVersion = "v1"
oapi.put("/namespaces/openshift/templates/" + t.metadata.name, t)
def main():
url = "https://openshift.example.com:8443"
cert = ("/etc/openshift/master/openshift-master.crt",
"/etc/openshift/master/openshift-master.key")
global api, oapi
api = k8s.API(url + "/api/v1", cert)
oapi = k8s.API(url + "/oapi/v1", cert)
ep = get_service_endpoint("/namespaces/default/services/image-registry")
download_referenced_images()
download_referenced_images_imagestreams(ep)
download_git_repos()
if __name__ == "__main__":
main()
|
DOCUMENTATION = '''
---
module: nxos_vrf
version_added: "2.1"
short_description: Manages global VRF configuration.
description:
- Manages global VRF configuration.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Cisco NX-OS creates the default VRF by itself. Therefore,
you're not allowed to use default as I(vrf) name in this module.
- C(vrf) name must be shorter than 32 chars.
- VRF names are not case sensible in NX-OS. Anyway, the name is stored
just like it's inserted by the user and it'll not be changed again
unless the VRF is removed and re-created. i.e. C(vrf=NTC) will create
a VRF named NTC, but running it again with C(vrf=ntc) will not cause
a configuration change.
options:
vrf:
description:
- Name of VRF to be managed.
required: true
admin_state:
description:
- Administrative state of the VRF.
required: false
default: up
choices: ['up','down']
vni:
description:
- Specify virtual network identifier. Valid values are Integer
or keyword 'default'.
required: false
default: null
version_added: "2.2"
route_distinguisher:
description:
- VPN Route Distinguisher (RD). Valid values are a string in
one of the route-distinguisher formats (ASN2:NN, ASN4:NN, or
IPV4:NN); the keyword 'auto', or the keyword 'default'.
required: false
default: null
version_added: "2.2"
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present','absent']
description:
description:
- Description of the VRF.
required: false
default: null
'''
EXAMPLES = '''
- nxos_vrf: vrf=ntc username="{{ un }}" password="{{ pwd }}" host="{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"admin_state": "Up", "description": "Test test",
"vrf": "ntc"}
existing:
description: k/v pairs of existing vrf
type: dict
sample: {"admin_state": "Up", "description": "Old test",
"vrf": "old_ntc"}
end_state:
description: k/v pairs of vrf info after module execution
returned: always
type: dict
sample: {"admin_state": "Up", "description": "Test test",
"vrf": "ntc"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["vrf context ntc", "shutdown"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
import collections
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh_vrf(module, command, response):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when using multiple |.
"""
command_splitted = command.split('|')
if len(command_splitted) > 2 or 'show run' in command:
body = response
elif 'xml' in response[0] or response[0] == '\n':
body = []
else:
body = [json.loads(response[0])]
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh_vrf(module, command, response)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_commands_to_config_vrf(delta, vrf):
commands = []
for param, value in delta.iteritems():
command = ''
if param == 'description':
command = 'description {0}'.format(value)
elif param == 'admin_state':
if value.lower() == 'up':
command = 'no shutdown'
elif value.lower() == 'down':
command = 'shutdown'
elif param == 'rd':
command = 'rd {0}'.format(value)
elif param == 'vni':
command = 'vni {0}'.format(value)
if command:
commands.append(command)
if commands:
commands.insert(0, 'vrf context {0}'.format(vrf))
return commands
def get_vrf_description(vrf, module):
command_type = 'cli_show_ascii'
command = ('show run section vrf | begin ^vrf\scontext\s{0} '
'| end ^vrf.*'.format(vrf))
description = ''
descr_regex = ".*description\s(?P<descr>[\S+\s]+).*"
body = execute_show_command(command, module, command_type)
try:
body = body[0]
splitted_body = body.split('\n')
except (AttributeError, IndexError):
return description
for element in splitted_body:
if 'description' in element:
match_description = re.match(descr_regex, element,
re.DOTALL)
group_description = match_description.groupdict()
description = group_description["descr"]
return description
def get_value(arg, config, module):
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(arg), re.M)
value = ''
if arg in config:
value = REGEX.search(config).group('value')
return value
def get_vrf(vrf, module):
command = 'show vrf {0}'.format(vrf)
vrf_key = {
'vrf_name': 'vrf',
'vrf_state': 'admin_state'
}
body = execute_show_command(command, module)
try:
vrf_table = body[0]['TABLE_vrf']['ROW_vrf']
except (TypeError, IndexError):
return {}
parsed_vrf = apply_key_map(vrf_key, vrf_table)
command = 'show run all | section vrf.context.{0}'.format(vrf)
body = execute_show_command(command, module, 'cli_show_ascii')
extra_params = ['vni', 'rd', 'description']
for param in extra_params:
parsed_vrf[param] = get_value(param, body[0], module)
return parsed_vrf
def main():
argument_spec = dict(
vrf=dict(required=True),
description=dict(default=None, required=False),
vni=dict(required=False, type='str'),
rd=dict(required=False, type='str'),
admin_state=dict(default='up', choices=['up', 'down'],
required=False),
state=dict(default='present', choices=['present', 'absent'],
required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
vrf = module.params['vrf']
admin_state = module.params['admin_state'].lower()
description = module.params['description']
rd = module.params['rd']
vni = module.params['vni']
state = module.params['state']
if vrf == 'default':
module.fail_json(msg='cannot use default as name of a VRF')
elif len(vrf) > 32:
module.fail_json(msg='VRF name exceeded max length of 32',
vrf=vrf)
existing = get_vrf(vrf, module)
args = dict(vrf=vrf, description=description, vni=vni,
admin_state=admin_state, rd=rd)
end_state = existing
changed = False
proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
"""Since 'admin_state' is either 'Up' or 'Down' from outputs,
we use the following to make sure right letter case is used so that delta
results will be consistent to the actual configuration."""
if existing:
if existing['admin_state'].lower() == admin_state:
proposed['admin_state'] = existing['admin_state']
delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
changed = False
end_state = existing
commands = []
if state == 'absent':
if existing:
command = ['no vrf context {0}'.format(vrf)]
commands.extend(command)
elif state == 'present':
if not existing:
command = get_commands_to_config_vrf(delta, vrf)
commands.extend(command)
elif delta:
command = get_commands_to_config_vrf(delta, vrf)
commands.extend(command)
if commands:
if proposed.get('vni'):
if existing.get('vni') and existing.get('vni') != '':
commands.insert(1, 'no vni {0}'.format(existing['vni']))
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
execute_config_command(commands, module)
changed = True
end_state = get_vrf(vrf, module)
if 'configure' in commands:
commands.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = commands
results['changed'] = changed
module.exit_json(**results)
if __name__ == '__main__':
main()
|
"""
This is a web-server which integrates with the twisted.internet
infrastructure.
"""
from __future__ import division, absolute_import
import copy
import os
try:
from urllib import quote
except ImportError:
from urllib.parse import quote as _quote
def quote(string, *args, **kwargs):
return _quote(
string.decode('charmap'), *args, **kwargs).encode('charmap')
import zlib
from zope.interface import implementer
from twisted.python.compat import _PY3, networkString, nativeString, intToBytes
if _PY3:
class Copyable:
"""
Fake mixin, until twisted.spread is ported.
"""
else:
from twisted.spread.pb import Copyable, ViewPoint
from twisted.internet import address
from twisted.web import iweb, http, util
from twisted.web.http import unquote
from twisted.python import log, reflect, failure, components
from twisted import copyright
from twisted.web import resource
from twisted.web.error import UnsupportedMethod
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.python.compat import escape
NOT_DONE_YET = 1
__all__ = [
'supportedMethods',
'Request',
'Session',
'Site',
'version',
'NOT_DONE_YET',
'GzipEncoderFactory'
]
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"Please use twisted.web.http.datetimeToString instead",
"twisted.web.server",
"date_time_string")
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"Please use twisted.web.http.stringToDatetime instead",
"twisted.web.server",
"string_date_time")
date_time_string = http.datetimeToString
string_date_time = http.stringToDatetime
supportedMethods = (b'GET', b'HEAD', b'POST')
def _addressToTuple(addr):
if isinstance(addr, address.IPv4Address):
return ('INET', addr.host, addr.port)
elif isinstance(addr, address.UNIXAddress):
return ('UNIX', addr.name)
else:
return tuple(addr)
@implementer(iweb.IRequest)
class Request(Copyable, http.Request, components.Componentized):
"""
An HTTP request.
@ivar defaultContentType: A C{bytes} giving the default I{Content-Type}
value to send in responses if no other value is set. C{None} disables
the default.
"""
defaultContentType = b"text/html"
site = None
appRootURL = None
__pychecker__ = 'unusednames=issuer'
_inFakeHead = False
_encoder = None
def __init__(self, *args, **kw):
http.Request.__init__(self, *args, **kw)
components.Componentized.__init__(self)
def getStateToCopyFor(self, issuer):
x = self.__dict__.copy()
del x['transport']
# XXX refactor this attribute out; it's from protocol
# del x['server']
del x['channel']
del x['content']
del x['site']
self.content.seek(0, 0)
x['content_data'] = self.content.read()
x['remote'] = ViewPoint(issuer, self)
# Address objects aren't jellyable
x['host'] = _addressToTuple(x['host'])
x['client'] = _addressToTuple(x['client'])
# Header objects also aren't jellyable.
x['requestHeaders'] = list(x['requestHeaders'].getAllRawHeaders())
return x
# HTML generation helpers
def sibLink(self, name):
"""
Return the text that links to a sibling of the requested resource.
"""
if self.postpath:
return (len(self.postpath)*b"../") + name
else:
return name
def childLink(self, name):
"""
Return the text that links to a child of the requested resource.
"""
lpp = len(self.postpath)
if lpp > 1:
return ((lpp-1)*b"../") + name
elif lpp == 1:
return name
else: # lpp == 0
if len(self.prepath) and self.prepath[-1]:
return self.prepath[-1] + b'/' + name
else:
return name
def process(self):
"""
Process a request.
"""
# get site from channel
self.site = self.channel.site
# set various default headers
self.setHeader(b'server', version)
self.setHeader(b'date', http.datetimeToString())
# Resource Identification
self.prepath = []
self.postpath = list(map(unquote, self.path[1:].split(b'/')))
try:
resrc = self.site.getResourceFor(self)
if resource._IEncodingResource.providedBy(resrc):
encoder = resrc.getEncoder(self)
if encoder is not None:
self._encoder = encoder
self.render(resrc)
except:
self.processingFailed(failure.Failure())
def write(self, data):
"""
Write data to the transport (if not responding to a HEAD request).
@param data: A string to write to the response.
"""
if not self.startedWriting:
# Before doing the first write, check to see if a default
# Content-Type header should be supplied.
modified = self.code != http.NOT_MODIFIED
contentType = self.responseHeaders.getRawHeaders(b'content-type')
if (modified and contentType is None and
self.defaultContentType is not None
):
self.responseHeaders.setRawHeaders(
b'content-type', [self.defaultContentType])
# Only let the write happen if we're not generating a HEAD response by
# faking out the request method. Note, if we are doing that,
# startedWriting will never be true, and the above logic may run
# multiple times. It will only actually change the responseHeaders
# once though, so it's still okay.
if not self._inFakeHead:
if self._encoder:
data = self._encoder.encode(data)
http.Request.write(self, data)
def finish(self):
"""
Override C{http.Request.finish} for possible encoding.
"""
if self._encoder:
data = self._encoder.finish()
if data:
http.Request.write(self, data)
return http.Request.finish(self)
def render(self, resrc):
"""
Ask a resource to render itself.
@param resrc: a L{twisted.web.resource.IResource}.
"""
try:
body = resrc.render(self)
except UnsupportedMethod as e:
allowedMethods = e.allowedMethods
if (self.method == b"HEAD") and (b"GET" in allowedMethods):
# We must support HEAD (RFC 2616, 5.1.1). If the
# resource doesn't, fake it by giving the resource
# a 'GET' request and then return only the headers,
# not the body.
log.msg("Using GET to fake a HEAD request for %s" %
(resrc,))
self.method = b"GET"
self._inFakeHead = True
body = resrc.render(self)
if body is NOT_DONE_YET:
log.msg("Tried to fake a HEAD request for %s, but "
"it got away from me." % resrc)
# Oh well, I guess we won't include the content length.
else:
self.setHeader(b'content-length', intToBytes(len(body)))
self._inFakeHead = False
self.method = b"HEAD"
self.write(b'')
self.finish()
return
if self.method in (supportedMethods):
# We MUST include an Allow header
# (RFC 2616, 10.4.6 and 14.7)
self.setHeader(b'Allow', b', '.join(allowedMethods))
s = ('''Your browser approached me (at %(URI)s) with'''
''' the method "%(method)s". I only allow'''
''' the method%(plural)s %(allowed)s here.''' % {
'URI': escape(nativeString(self.uri)),
'method': nativeString(self.method),
'plural': ((len(allowedMethods) > 1) and 's') or '',
'allowed': ', '.join(
[nativeString(x) for x in allowedMethods])
})
epage = resource.ErrorPage(http.NOT_ALLOWED,
"Method Not Allowed", s)
body = epage.render(self)
else:
epage = resource.ErrorPage(
http.NOT_IMPLEMENTED, "Huh?",
"I don't know how to treat a %s request." %
(escape(self.method.decode("charmap")),))
body = epage.render(self)
# end except UnsupportedMethod
if body == NOT_DONE_YET:
return
if not isinstance(body, bytes):
body = resource.ErrorPage(
http.INTERNAL_SERVER_ERROR,
"Request did not return bytes",
"Request: " + util._PRE(reflect.safe_repr(self)) + "<br />" +
"Resource: " + util._PRE(reflect.safe_repr(resrc)) + "<br />" +
"Value: " + util._PRE(reflect.safe_repr(body))).render(self)
if self.method == b"HEAD":
if len(body) > 0:
# This is a Bad Thing (RFC 2616, 9.4)
log.msg("Warning: HEAD request %s for resource %s is"
" returning a message body."
" I think I'll eat it."
% (self, resrc))
self.setHeader(b'content-length',
intToBytes(len(body)))
self.write(b'')
else:
self.setHeader(b'content-length',
intToBytes(len(body)))
self.write(body)
self.finish()
def processingFailed(self, reason):
log.err(reason)
if self.site.displayTracebacks:
body = (b"<html><head><title>web.Server Traceback"
b" (most recent call last)</title></head>"
b"<body><b>web.Server Traceback"
b" (most recent call last):</b>\n\n" +
util.formatFailure(reason) +
b"\n\n</body></html>\n")
else:
body = (b"<html><head><title>Processing Failed"
b"</title></head><body>"
b"<b>Processing Failed</b></body></html>")
self.setResponseCode(http.INTERNAL_SERVER_ERROR)
self.setHeader(b'content-type', b"text/html")
self.setHeader(b'content-length', intToBytes(len(body)))
self.write(body)
self.finish()
return reason
def view_write(self, issuer, data):
"""Remote version of write; same interface.
"""
self.write(data)
def view_finish(self, issuer):
"""Remote version of finish; same interface.
"""
self.finish()
def view_addCookie(self, issuer, k, v, **kwargs):
"""Remote version of addCookie; same interface.
"""
self.addCookie(k, v, **kwargs)
def view_setHeader(self, issuer, k, v):
"""Remote version of setHeader; same interface.
"""
self.setHeader(k, v)
def view_setLastModified(self, issuer, when):
"""Remote version of setLastModified; same interface.
"""
self.setLastModified(when)
def view_setETag(self, issuer, tag):
"""Remote version of setETag; same interface.
"""
self.setETag(tag)
def view_setResponseCode(self, issuer, code, message=None):
"""
Remote version of setResponseCode; same interface.
"""
self.setResponseCode(code, message)
def view_registerProducer(self, issuer, producer, streaming):
"""Remote version of registerProducer; same interface.
(requires a remote producer.)
"""
self.registerProducer(_RemoteProducerWrapper(producer), streaming)
def view_unregisterProducer(self, issuer):
self.unregisterProducer()
### these calls remain local
session = None
def getSession(self, sessionInterface=None):
# Session management
if not self.session:
cookiename = b"_".join([b'TWISTED_SESSION'] + self.sitepath)
sessionCookie = self.getCookie(cookiename)
if sessionCookie:
try:
self.session = self.site.getSession(sessionCookie)
except KeyError:
pass
# if it still hasn't been set, fix it up.
if not self.session:
self.session = self.site.makeSession()
self.addCookie(cookiename, self.session.uid, path=b'/')
self.session.touch()
if sessionInterface:
return self.session.getComponent(sessionInterface)
return self.session
def _prePathURL(self, prepath):
port = self.getHost().port
if self.isSecure():
default = 443
else:
default = 80
if port == default:
hostport = ''
else:
hostport = ':%d' % port
prefix = networkString('http%s://%s%s/' % (
self.isSecure() and 's' or '',
nativeString(self.getRequestHostname()),
hostport))
path = b'/'.join([quote(segment, safe=b'') for segment in prepath])
return prefix + path
def prePathURL(self):
return self._prePathURL(self.prepath)
def URLPath(self):
from twisted.python import urlpath
return urlpath.URLPath.fromRequest(self)
def rememberRootURL(self):
"""
Remember the currently-processed part of the URL for later
recalling.
"""
url = self._prePathURL(self.prepath[:-1])
self.appRootURL = url
def getRootURL(self):
"""
Get a previously-remembered URL.
"""
return self.appRootURL
@implementer(iweb._IRequestEncoderFactory)
class GzipEncoderFactory(object):
"""
@cvar compressLevel: The compression level used by the compressor, default
to 9 (highest).
@since: 12.3
"""
compressLevel = 9
def encoderForRequest(self, request):
"""
Check the headers if the client accepts gzip encoding, and encodes the
request if so.
"""
acceptHeaders = request.requestHeaders.getRawHeaders(
'accept-encoding', [])
supported = ','.join(acceptHeaders).split(',')
if 'gzip' in supported:
encoding = request.responseHeaders.getRawHeaders(
'content-encoding')
if encoding:
encoding = '%s,gzip' % ','.join(encoding)
else:
encoding = 'gzip'
request.responseHeaders.setRawHeaders('content-encoding',
[encoding])
return _GzipEncoder(self.compressLevel, request)
@implementer(iweb._IRequestEncoder)
class _GzipEncoder(object):
"""
An encoder which supports gzip.
@ivar _zlibCompressor: The zlib compressor instance used to compress the
stream.
@ivar _request: A reference to the originating request.
@since: 12.3
"""
_zlibCompressor = None
def __init__(self, compressLevel, request):
self._zlibCompressor = zlib.compressobj(
compressLevel, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
self._request = request
def encode(self, data):
"""
Write to the request, automatically compressing data on the fly.
"""
if not self._request.startedWriting:
# Remove the content-length header, we can't honor it
# because we compress on the fly.
self._request.responseHeaders.removeHeader(b'content-length')
return self._zlibCompressor.compress(data)
def finish(self):
"""
Finish handling the request request, flushing any data from the zlib
buffer.
"""
remain = self._zlibCompressor.flush()
self._zlibCompressor = None
return remain
class _RemoteProducerWrapper:
def __init__(self, remote):
self.resumeProducing = remote.remoteMethod("resumeProducing")
self.pauseProducing = remote.remoteMethod("pauseProducing")
self.stopProducing = remote.remoteMethod("stopProducing")
class Session(components.Componentized):
"""
A user's session with a system.
This utility class contains no functionality, but is used to
represent a session.
@ivar uid: A unique identifier for the session.
@type uid: L{bytes}
@ivar _reactor: An object providing L{IReactorTime} to use for scheduling
expiration.
@ivar sessionTimeout: timeout of a session, in seconds.
"""
sessionTimeout = 900
_expireCall = None
def __init__(self, site, uid, reactor=None):
"""
Initialize a session with a unique ID for that session.
"""
components.Componentized.__init__(self)
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
self.site = site
self.uid = uid
self.expireCallbacks = []
self.touch()
self.sessionNamespaces = {}
def startCheckingExpiration(self):
"""
Start expiration tracking.
@return: C{None}
"""
self._expireCall = self._reactor.callLater(
self.sessionTimeout, self.expire)
def notifyOnExpire(self, callback):
"""
Call this callback when the session expires or logs out.
"""
self.expireCallbacks.append(callback)
def expire(self):
"""
Expire/logout of the session.
"""
del self.site.sessions[self.uid]
for c in self.expireCallbacks:
c()
self.expireCallbacks = []
if self._expireCall and self._expireCall.active():
self._expireCall.cancel()
# Break reference cycle.
self._expireCall = None
def touch(self):
"""
Notify session modification.
"""
self.lastModified = self._reactor.seconds()
if self._expireCall is not None:
self._expireCall.reset(self.sessionTimeout)
version = networkString("TwistedWeb/%s" % (copyright.version,))
class Site(http.HTTPFactory):
"""
A web site: manage log, sessions, and resources.
@ivar counter: increment value used for generating unique sessions ID.
@ivar requestFactory: A factory which is called with (channel, queued)
and creates L{Request} instances. Default to L{Request}.
@ivar displayTracebacks: if set, Twisted internal errors are displayed on
rendered pages. Default to C{True}.
@ivar sessionFactory: factory for sessions objects. Default to L{Session}.
@ivar sessionCheckTime: Deprecated. See L{Session.sessionTimeout} instead.
"""
counter = 0
requestFactory = Request
displayTracebacks = True
sessionFactory = Session
sessionCheckTime = 1800
def __init__(self, resource, requestFactory=None, *args, **kwargs):
"""
@param resource: The root of the resource hierarchy. All request
traversal for requests received by this factory will begin at this
resource.
@type resource: L{IResource} provider
@param requestFactory: Overwrite for default requestFactory.
@type requestFactory: C{callable} or C{class}.
@see: L{twisted.web.http.HTTPFactory.__init__}
"""
http.HTTPFactory.__init__(self, *args, **kwargs)
self.sessions = {}
self.resource = resource
if requestFactory is not None:
self.requestFactory = requestFactory
def _openLogFile(self, path):
from twisted.python import logfile
return logfile.LogFile(os.path.basename(path), os.path.dirname(path))
def __getstate__(self):
d = self.__dict__.copy()
d['sessions'] = {}
return d
def _mkuid(self):
"""
(internal) Generate an opaque, unique ID for a user's session.
"""
from binascii import hexlify
from hashlib import md5
import random
self.counter = self.counter + 1
return hexlify(md5(networkString(
"%s_%s" % (str(random.random()), str(self.counter)))
).digest())
def makeSession(self):
"""
Generate a new Session instance, and store it for future reference.
"""
uid = self._mkuid()
session = self.sessions[uid] = self.sessionFactory(self, uid)
session.startCheckingExpiration()
return session
def getSession(self, uid):
"""
Get a previously generated session.
@param uid: Unique ID of the session.
@type uid: L{bytes}.
@raise: L{KeyError} if the session is not found.
"""
return self.sessions[uid]
def buildProtocol(self, addr):
"""
Generate a channel attached to this site.
"""
channel = http.HTTPFactory.buildProtocol(self, addr)
channel.requestFactory = self.requestFactory
channel.site = self
return channel
isLeaf = 0
def render(self, request):
"""
Redirect because a Site is always a directory.
"""
request.redirect(request.prePathURL() + b'/')
request.finish()
def getChildWithDefault(self, pathEl, request):
"""
Emulate a resource's getChild method.
"""
request.site = self
return self.resource.getChildWithDefault(pathEl, request)
def getResourceFor(self, request):
"""
Get a resource for a request.
This iterates through the resource heirarchy, calling
getChildWithDefault on each resource it finds for a path element,
stopping when it hits an element where isLeaf is true.
"""
request.site = self
# Sitepath is used to determine cookie names between distributed
# servers and disconnected sites.
request.sitepath = copy.copy(request.prepath)
return resource.getChildForRequest(self.resource, request)
|
"""
Tests for course wiki
"""
from django.urls import reverse
from mock import patch
from six import text_type
from courseware.tests.tests import LoginEnrollmentTestCase
from openedx.features.enterprise_support.tests.mixins.enterprise import EnterpriseTestConsentRequired
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class WikiRedirectTestCase(EnterpriseTestConsentRequired, LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for wiki course redirection.
"""
shard = 1
def setUp(self):
super(WikiRedirectTestCase, self).setUp()
self.toy = CourseFactory.create(org='edX', course='toy', display_name='2012_Fall')
# Create two accounts
self.student = 'view@test.com'
self.instructor = 'view2@test.com'
self.password = 'foo'
for username, email in [('u1', self.student), ('u2', self.instructor)]:
self.create_account(username, email, self.password)
self.activate_user(email)
self.logout()
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': True})
def test_wiki_redirect(self):
"""
Test that requesting wiki URLs redirect properly to or out of classes.
An enrolled in student going from /courses/edX/toy/2012_Fall/progress
to /wiki/some/fake/wiki/page/ will redirect to
/courses/edX/toy/2012_Fall/wiki/some/fake/wiki/page/
An unenrolled student going to /courses/edX/toy/2012_Fall/wiki/some/fake/wiki/page/
will be redirected to /wiki/some/fake/wiki/page/
"""
self.login(self.student, self.password)
self.enroll(self.toy)
referer = reverse("progress", kwargs={'course_id': text_type(self.toy.id)})
destination = reverse("wiki:get", kwargs={'path': 'some/fake/wiki/page/'})
redirected_to = referer.replace("progress", "wiki/some/fake/wiki/page/")
resp = self.client.get(destination, HTTP_REFERER=referer)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], redirected_to)
# Now we test that the student will be redirected away from that page if the course doesn't exist
# We do this in the same test because we want to make sure the redirected_to is constructed correctly
# This is a location like /courses/*/wiki/* , but with an invalid course ID
bad_course_wiki_page = redirected_to.replace(self.toy.location.course, "bad_course")
resp = self.client.get(bad_course_wiki_page, HTTP_REFERER=referer)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], destination)
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': False})
def test_wiki_no_root_access(self):
"""
Test to verify that normally Wiki's cannot be browsed from the /wiki/xxxx/yyy/zz URLs
"""
self.login(self.student, self.password)
self.enroll(self.toy)
referer = reverse("progress", kwargs={'course_id': text_type(self.toy.id)})
destination = reverse("wiki:get", kwargs={'path': 'some/fake/wiki/page/'})
resp = self.client.get(destination, HTTP_REFERER=referer)
self.assertEqual(resp.status_code, 403)
def create_course_page(self, course):
"""
Test that loading the course wiki page creates the wiki page.
The user must be enrolled in the course to see the page.
"""
course_wiki_home = reverse('course_wiki', kwargs={'course_id': text_type(course.id)})
referer = reverse("progress", kwargs={'course_id': text_type(course.id)})
resp = self.client.get(course_wiki_home, follow=True, HTTP_REFERER=referer)
course_wiki_page = referer.replace('progress', 'wiki/' + course.wiki_slug + "/")
ending_location = resp.redirect_chain[-1][0]
self.assertEquals(ending_location, course_wiki_page)
self.assertEquals(resp.status_code, 200)
self.has_course_navigator(resp)
self.assertContains(resp, '<h3 class="entry-title">{}</h3>'.format(course.display_name_with_default))
def has_course_navigator(self, resp):
"""
Ensure that the response has the course navigator.
"""
self.assertContains(resp, "Home")
self.assertContains(resp, "Course")
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': True})
def test_course_navigator(self):
""""
Test that going from a course page to a wiki page contains the course navigator.
"""
self.login(self.student, self.password)
self.enroll(self.toy)
self.create_course_page(self.toy)
course_wiki_page = reverse('wiki:get', kwargs={'path': self.toy.wiki_slug + '/'})
referer = reverse("courseware", kwargs={'course_id': text_type(self.toy.id)})
resp = self.client.get(course_wiki_page, follow=True, HTTP_REFERER=referer)
self.has_course_navigator(resp)
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': True})
def test_wiki_not_accessible_when_not_enrolled(self):
"""
Test that going from a course page to a wiki page when not enrolled
redirects a user to the course about page
"""
self.login(self.instructor, self.password)
self.enroll(self.toy)
self.create_course_page(self.toy)
self.logout()
self.login(self.student, self.password)
course_wiki_page = reverse('wiki:get', kwargs={'path': self.toy.wiki_slug + '/'})
referer = reverse("courseware", kwargs={'course_id': text_type(self.toy.id)})
# When not enrolled, we should get a 302
resp = self.client.get(course_wiki_page, follow=False, HTTP_REFERER=referer)
self.assertEqual(resp.status_code, 302)
# and end up at the course about page
resp = self.client.get(course_wiki_page, follow=True, HTTP_REFERER=referer)
target_url, __ = resp.redirect_chain[-1]
self.assertTrue(
target_url.endswith(reverse('about_course', args=[text_type(self.toy.id)]))
)
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': True})
def test_redirect_when_not_logged_in(self):
"""
Test that attempting to reach a course wiki page when not logged in
redirects the user to the login page
"""
self.logout()
course_wiki_page = reverse('wiki:get', kwargs={'path': self.toy.wiki_slug + '/'})
# When not logged in, we should get a 302
resp = self.client.get(course_wiki_page, follow=False)
self.assertEqual(resp.status_code, 302)
# and end up at the login page
resp = self.client.get(course_wiki_page, follow=True)
target_url, __ = resp.redirect_chain[-1]
self.assertIn(reverse('signin_user'), target_url)
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': True})
def test_create_wiki_with_long_course_id(self):
"""
Tests that the wiki is successfully created for courses that have
very long course ids.
"""
# Combined course key length is currently capped at 65 characters (see MAX_SUM_KEY_LENGTH
# in /common/static/common/js/components/utils/view_utils.js).
# The below key components combined are exactly 65 characters long.
org = 'a-very-long-org-name'
course = 'a-very-long-course-name'
display_name = 'very-long-display-name'
# This is how wiki_slug is generated in cms/djangoapps/contentstore/views/course.py.
wiki_slug = "{0}.{1}.{2}".format(org, course, display_name)
self.assertEqual(len(org + course + display_name), 65) # sanity check
course = CourseFactory.create(org=org, course=course, display_name=display_name, wiki_slug=wiki_slug)
self.login(self.student, self.password)
self.enroll(course)
self.create_course_page(course)
course_wiki_page = reverse('wiki:get', kwargs={'path': course.wiki_slug + '/'})
referer = reverse("courseware", kwargs={'course_id': text_type(course.id)})
resp = self.client.get(course_wiki_page, follow=True, HTTP_REFERER=referer)
self.assertEqual(resp.status_code, 200)
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': True})
@patch('openedx.features.enterprise_support.api.enterprise_customer_for_request')
def test_consent_required(self, mock_enterprise_customer_for_request):
"""
Test that enterprise data sharing consent is required when enabled for the various courseware views.
"""
# ENT-924: Temporary solution to replace sensitive SSO usernames.
mock_enterprise_customer_for_request.return_value = None
# Public wikis can be accessed by non-enrolled users, and so direct access is not gated by the consent page
course = CourseFactory.create()
course.allow_public_wiki_access = False
course.save()
# However, for private wikis, enrolled users must pass through the consent gate
# (Unenrolled users are redirected to course/about)
course_id = unicode(course.id)
self.login(self.student, self.password)
self.enroll(course)
for (url, status_code) in (
(reverse('course_wiki', kwargs={'course_id': course_id}), 302),
('/courses/{}/wiki/'.format(course_id), 200),
):
self.verify_consent_required(self.client, url, status_code=status_code)
|
"""Python 'zlib_codec' Codec - zlib compression encoding.
This codec de/encodes from bytes to bytes and is therefore usable with
bytes.transform() and bytes.untransform().
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import zlib # this codec needs the optional zlib module !
def zlib_encode(input, errors='strict'):
assert errors == 'strict'
return (zlib.compress(input), len(input))
def zlib_decode(input, errors='strict'):
assert errors == 'strict'
return (zlib.decompress(input), len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec, codecs.StreamWriter):
charbuffertype = bytes
class StreamReader(Codec, codecs.StreamReader):
charbuffertype = bytes
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
import random
import string
import unittest
import pytest
from airflow import models
from airflow.api.common.experimental import pool as pool_api
from airflow.exceptions import AirflowBadRequest, PoolNotFound
from airflow.models.pool import Pool
from airflow.utils.session import create_session
from tests.test_utils.db import clear_db_pools
class TestPool(unittest.TestCase):
USER_POOL_COUNT = 2
TOTAL_POOL_COUNT = USER_POOL_COUNT + 1 # including default_pool
def setUp(self):
clear_db_pools()
self.pools = [Pool.get_default_pool()]
for i in range(self.USER_POOL_COUNT):
name = f'experimental_{i + 1}'
pool = models.Pool(
pool=name,
slots=i,
description=name,
)
self.pools.append(pool)
with create_session() as session:
session.add_all(self.pools)
def test_get_pool(self):
pool = pool_api.get_pool(name=self.pools[0].pool)
assert pool.pool == self.pools[0].pool
def test_get_pool_non_existing(self):
with pytest.raises(PoolNotFound, match="^Pool 'test' doesn't exist$"):
pool_api.get_pool(name='test')
def test_get_pool_bad_name(self):
for name in ('', ' '):
with pytest.raises(AirflowBadRequest, match="^Pool name shouldn't be empty$"):
pool_api.get_pool(name=name)
def test_get_pools(self):
pools = sorted(pool_api.get_pools(), key=lambda p: p.pool)
assert pools[0].pool == self.pools[0].pool
assert pools[1].pool == self.pools[1].pool
def test_create_pool(self):
pool = pool_api.create_pool(name='foo', slots=5, description='')
assert pool.pool == 'foo'
assert pool.slots == 5
assert pool.description == ''
with create_session() as session:
assert session.query(models.Pool).count() == self.TOTAL_POOL_COUNT + 1
def test_create_pool_existing(self):
pool = pool_api.create_pool(name=self.pools[0].pool, slots=5, description='')
assert pool.pool == self.pools[0].pool
assert pool.slots == 5
assert pool.description == ''
with create_session() as session:
assert session.query(models.Pool).count() == self.TOTAL_POOL_COUNT
def test_create_pool_bad_name(self):
for name in ('', ' '):
with pytest.raises(AirflowBadRequest, match="^Pool name shouldn't be empty$"):
pool_api.create_pool(
name=name,
slots=5,
description='',
)
def test_create_pool_name_too_long(self):
long_name = ''.join(random.choices(string.ascii_lowercase, k=300))
column_length = models.Pool.pool.property.columns[0].type.length
with pytest.raises(
AirflowBadRequest, match="^Pool name can't be more than %d characters$" % column_length
):
pool_api.create_pool(
name=long_name,
slots=5,
description='',
)
def test_create_pool_bad_slots(self):
with pytest.raises(AirflowBadRequest, match="^Bad value for `slots`: foo$"):
pool_api.create_pool(
name='foo',
slots='foo',
description='',
)
def test_delete_pool(self):
pool = pool_api.delete_pool(name=self.pools[-1].pool)
assert pool.pool == self.pools[-1].pool
with create_session() as session:
assert session.query(models.Pool).count() == self.TOTAL_POOL_COUNT - 1
def test_delete_pool_non_existing(self):
with pytest.raises(pool_api.PoolNotFound, match="^Pool 'test' doesn't exist$"):
pool_api.delete_pool(name='test')
def test_delete_pool_bad_name(self):
for name in ('', ' '):
with pytest.raises(AirflowBadRequest, match="^Pool name shouldn't be empty$"):
pool_api.delete_pool(name=name)
def test_delete_default_pool_not_allowed(self):
with pytest.raises(AirflowBadRequest, match="^default_pool cannot be deleted$"):
pool_api.delete_pool(Pool.DEFAULT_POOL_NAME)
|
from proton import Message
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, unittest, TestTimeout, PollTimeout, Logger
from proton.handlers import MessagingHandler
from proton.reactor import Container, DynamicNodeProperties
from qpid_dispatch_internal.compat import UNICODE
class RouterMultitenantPolicyTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
"""Start a router"""
super(RouterMultitenantPolicyTest, cls).setUpClass()
def router(name, connection):
config = [
('router', {'mode': 'interior', 'id': name}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'multiTenant': 'yes'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'role': 'route-container'}),
('linkRoute', {'prefix': 'hosted-group-1/link', 'direction': 'in', 'containerId': 'LRC'}),
('linkRoute', {'prefix': 'hosted-group-1/link', 'direction': 'out', 'containerId': 'LRC'}),
('autoLink', {'address': 'hosted-group-1/queue.waypoint', 'containerId': 'ALC', 'direction': 'in'}),
('autoLink', {'address': 'hosted-group-1/queue.waypoint', 'containerId': 'ALC', 'direction': 'out'}),
('autoLink', {'address': 'hosted-group-1/queue.ext', 'containerId': 'ALCE', 'direction': 'in', 'externalAddress': 'EXT'}),
('autoLink', {'address': 'hosted-group-1/queue.ext', 'containerId': 'ALCE', 'direction': 'out', 'externalAddress': 'EXT'}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'spread', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('address', {'prefix': 'hosted-group-1/queue', 'waypoint': 'yes'}),
('policy', {'enableVhostPolicy': 'true'}),
('vhost', {'hostname': 'hosted-group-1',
'allowUnknownUser': 'true',
'aliases': '0.0.0.0',
'groups': {
'$default': {
'users': '*',
'maxConnections': 100,
'remoteHosts': '*',
'sources': '*',
'targets': '*',
'allowAnonymousSender': 'true',
'allowWaypointLinks': 'true',
'allowDynamicSource': 'true'
}
}
}),
connection
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', ('listener', {'role': 'inter-router', 'port': inter_router_port}))
router('B', ('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port}))
cls.routers[0].wait_router_connected('B')
cls.routers[1].wait_router_connected('A')
def test_01_one_router_targeted_sender_no_tenant(self):
test = MessageTransferTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
"anything/addr_01",
"anything/addr_01",
self.routers[0].addresses[0],
"M0anything/addr_01")
test.run()
self.assertIsNone(test.error)
def test_02_one_router_targeted_sender_tenant_on_sender(self):
test = MessageTransferTest(self.routers[0].addresses[1],
self.routers[0].addresses[0],
"addr_02",
"hosted-group-1/addr_02",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_02")
test.run()
self.assertIsNone(test.error)
def test_03_one_router_targeted_sender_tenant_on_receiver(self):
test = MessageTransferTest(self.routers[0].addresses[0],
self.routers[0].addresses[1],
"hosted-group-1/addr_03",
"addr_03",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_03")
test.run()
self.assertIsNone(test.error)
def test_04_one_router_targeted_sender_tenant_on_both(self):
test = MessageTransferTest(self.routers[0].addresses[1],
self.routers[0].addresses[1],
"addr_04",
"addr_04",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_04")
test.run()
self.assertIsNone(test.error)
def test_05_two_router_targeted_sender_no_tenant(self):
test = MessageTransferTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
"hosted-group-1/addr_05",
"hosted-group-1/addr_05",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_05")
test.run()
self.assertIsNone(test.error)
def test_06_two_router_targeted_sender_tenant_on_sender(self):
test = MessageTransferTest(self.routers[0].addresses[1],
self.routers[1].addresses[0],
"addr_06",
"hosted-group-1/addr_06",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_06")
test.run()
self.assertIsNone(test.error)
def test_07_two_router_targeted_sender_tenant_on_receiver(self):
test = MessageTransferTest(self.routers[0].addresses[0],
self.routers[1].addresses[1],
"hosted-group-1/addr_07",
"addr_07",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_07")
test.run()
self.assertIsNone(test.error)
def test_08_two_router_targeted_sender_tenant_on_both(self):
test = MessageTransferTest(self.routers[0].addresses[1],
self.routers[1].addresses[1],
"addr_08",
"addr_08",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_08")
test.run()
self.assertIsNone(test.error)
def test_09_one_router_anonymous_sender_no_tenant(self):
test = MessageTransferAnonTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
"anything/addr_09",
"anything/addr_09",
self.routers[0].addresses[0],
"M0anything/addr_09")
test.run()
self.assertIsNone(test.error)
def test_10_one_router_anonymous_sender_tenant_on_sender(self):
test = MessageTransferAnonTest(self.routers[0].addresses[1],
self.routers[0].addresses[0],
"addr_10",
"hosted-group-1/addr_10",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_10")
test.run()
self.assertIsNone(test.error)
def test_11_one_router_anonymous_sender_tenant_on_receiver(self):
test = MessageTransferAnonTest(self.routers[0].addresses[0],
self.routers[0].addresses[1],
"hosted-group-1/addr_11",
"addr_11",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_11")
test.run()
self.assertIsNone(test.error)
def test_12_one_router_anonymous_sender_tenant_on_both(self):
test = MessageTransferAnonTest(self.routers[0].addresses[1],
self.routers[0].addresses[1],
"addr_12",
"addr_12",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_12")
test.run()
self.assertIsNone(test.error)
def test_13_two_router_anonymous_sender_no_tenant(self):
test = MessageTransferAnonTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
"anything/addr_13",
"anything/addr_13",
self.routers[0].addresses[0],
"M0anything/addr_13")
test.run()
self.assertIsNone(test.error)
def test_14_two_router_anonymous_sender_tenant_on_sender(self):
test = MessageTransferAnonTest(self.routers[0].addresses[1],
self.routers[1].addresses[0],
"addr_14",
"hosted-group-1/addr_14",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_14")
test.run()
self.assertIsNone(test.error)
def test_15_two_router_anonymous_sender_tenant_on_receiver(self):
test = MessageTransferAnonTest(self.routers[0].addresses[0],
self.routers[1].addresses[1],
"hosted-group-1/addr_15",
"addr_15",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_15")
test.run()
self.assertIsNone(test.error)
def test_16_two_router_anonymous_sender_tenant_on_both(self):
test = MessageTransferAnonTest(self.routers[0].addresses[1],
self.routers[1].addresses[1],
"addr_16",
"addr_16",
self.routers[0].addresses[0],
"M0hosted-group-1/addr_16")
test.run()
self.assertIsNone(test.error)
def test_17_one_router_link_route_targeted(self):
test = LinkRouteTest(self.routers[0].addresses[1],
self.routers[0].addresses[2],
"link.addr_17",
"hosted-group-1/link.addr_17",
False,
self.routers[0].addresses[0])
test.run()
self.assertIsNone(test.error)
def test_18_one_router_link_route_targeted_no_tenant(self):
test = LinkRouteTest(self.routers[0].addresses[0],
self.routers[0].addresses[2],
"hosted-group-1/link.addr_18",
"hosted-group-1/link.addr_18",
False,
self.routers[0].addresses[0])
test.run()
self.assertIsNone(test.error)
def test_19_one_router_link_route_dynamic(self):
test = LinkRouteTest(self.routers[0].addresses[1],
self.routers[0].addresses[2],
"link.addr_19",
"hosted-group-1/link.addr_19",
True,
self.routers[0].addresses[0])
test.run()
self.assertIsNone(test.error)
def test_20_one_router_link_route_dynamic_no_tenant(self):
test = LinkRouteTest(self.routers[0].addresses[0],
self.routers[0].addresses[2],
"hosted-group-1/link.addr_20",
"hosted-group-1/link.addr_20",
True,
self.routers[0].addresses[0])
test.run()
self.assertIsNone(test.error)
def test_21_two_router_link_route_targeted(self):
test = LinkRouteTest(self.routers[0].addresses[1],
self.routers[1].addresses[2],
"link.addr_21",
"hosted-group-1/link.addr_21",
False,
self.routers[0].addresses[0])
test.run()
self.assertIsNone(test.error)
def test_22_two_router_link_route_targeted_no_tenant(self):
test = LinkRouteTest(self.routers[0].addresses[0],
self.routers[1].addresses[2],
"hosted-group-1/link.addr_22",
"hosted-group-1/link.addr_22",
False,
self.routers[0].addresses[0])
test.run()
self.assertIsNone(test.error)
def test_23_two_router_link_route_dynamic(self):
test = LinkRouteTest(self.routers[0].addresses[1],
self.routers[1].addresses[2],
"link.addr_23",
"hosted-group-1/link.addr_23",
True,
self.routers[0].addresses[0])
test.run()
self.assertIsNone(test.error)
def test_24_two_router_link_route_dynamic_no_tenant(self):
test = LinkRouteTest(self.routers[0].addresses[0],
self.routers[1].addresses[2],
"hosted-group-1/link.addr_24",
"hosted-group-1/link.addr_24",
True,
self.routers[0].addresses[0])
test.run()
self.assertIsNone(test.error)
def test_25_one_router_anonymous_sender_non_mobile(self):
test = MessageTransferAnonTest(self.routers[0].addresses[1],
self.routers[0].addresses[0],
"_local/addr_25",
"_local/addr_25",
self.routers[0].addresses[0],
"Laddr_25")
test.run()
self.assertIsNone(test.error)
def test_26_one_router_targeted_sender_non_mobile(self):
test = MessageTransferTest(self.routers[0].addresses[1],
self.routers[0].addresses[0],
"_local/addr_26",
"_local/addr_26",
self.routers[0].addresses[0],
"Laddr_26")
test.run()
self.assertIsNone(test.error)
def test_27_two_router_anonymous_sender_non_mobile(self):
test = MessageTransferAnonTest(self.routers[0].addresses[1],
self.routers[1].addresses[0],
"_topo/0/B/addr_27",
"_local/addr_27",
self.routers[1].addresses[0],
"Laddr_27")
test.run()
self.assertIsNone(test.error)
def test_28_two_router_targeted_sender_non_mobile(self):
test = MessageTransferTest(self.routers[0].addresses[1],
self.routers[1].addresses[0],
"_topo/0/B/addr_28",
"_local/addr_28",
self.routers[1].addresses[0],
"Laddr_28")
test.run()
self.assertIsNone(test.error)
def test_29_one_router_waypoint_no_tenant(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[0].addresses[2],
"hosted-group-1/queue.waypoint",
"hosted-group-1/queue.waypoint")
test.run()
# Dump the logger output only if there is a test error, otherwise dont bother
if test.error:
test.logger.dump()
self.assertIsNone(test.error)
def test_30_one_router_waypoint(self):
test = WaypointTest(self.routers[0].addresses[1],
self.routers[0].addresses[2],
"queue.waypoint",
"hosted-group-1/queue.waypoint")
test.run()
# Dump the logger output only if there is a test error, otherwise dont bother
if test.error:
test.logger.dump()
self.assertIsNone(test.error)
def test_31_two_router_waypoint_no_tenant(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[1].addresses[2],
"hosted-group-1/queue.waypoint",
"hosted-group-1/queue.waypoint")
test.run()
# Dump the logger output only if there is a test error, otherwise dont bother
if test.error:
test.logger.dump()
self.assertIsNone(test.error)
def test_32_two_router_waypoint(self):
test = WaypointTest(self.routers[0].addresses[1],
self.routers[1].addresses[2],
"queue.waypoint",
"hosted-group-1/queue.waypoint")
test.run()
# Dump the logger output only if there is a test error, otherwise dont bother
if test.error:
test.logger.dump()
self.assertIsNone(test.error)
def test_33_one_router_waypoint_no_tenant_external_addr(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[0].addresses[2],
"hosted-group-1/queue.ext",
"EXT",
"ALCE")
test.run()
# Dump the logger output only if there is a test error, otherwise dont bother
if test.error:
test.logger.dump()
self.assertIsNone(test.error)
def test_34_one_router_waypoint_external_addr(self):
test = WaypointTest(self.routers[0].addresses[1],
self.routers[0].addresses[2],
"queue.ext",
"EXT",
"ALCE")
test.run()
# Dump the logger output only if there is a test error, otherwise dont bother
if test.error:
test.logger.dump()
self.assertIsNone(test.error)
def test_35_two_router_waypoint_no_tenant_external_addr(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[1].addresses[2],
"hosted-group-1/queue.ext",
"EXT",
"ALCE")
test.run()
# Dump the logger output only if there is a test error, otherwise dont bother
if test.error:
test.logger.dump()
self.assertIsNone(test.error)
def test_36_two_router_waypoint_external_addr(self):
test = WaypointTest(self.routers[0].addresses[1],
self.routers[1].addresses[2],
"queue.ext",
"EXT",
"ALCE")
test.run()
# Dump the logger output only if there is a test error, otherwise dont bother
if test.error:
test.logger.dump()
self.assertIsNone(test.error)
class Entity(object):
def __init__(self, status_code, status_description, attrs):
self.status_code = status_code
self.status_description = status_description
self.attrs = attrs
def __getattr__(self, key):
return self.attrs[key]
class RouterProxy(object):
def __init__(self, reply_addr):
self.reply_addr = reply_addr
def response(self, msg):
ap = msg.properties
return Entity(ap['statusCode'], ap['statusDescription'], msg.body)
def read_address(self, name):
ap = {'operation': 'READ', 'type': 'org.apache.qpid.dispatch.router.address', 'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
def query_addresses(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.address'}
return Message(properties=ap, reply_to=self.reply_addr)
class MessageTransferTest(MessagingHandler):
def __init__(self, sender_host, receiver_host, sender_address, receiver_address, lookup_host, lookup_address):
super(MessageTransferTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.sender_address = sender_address
self.receiver_address = receiver_address
self.lookup_host = lookup_host
self.lookup_address = lookup_address
self.sender_conn = None
self.receiver_conn = None
self.lookup_conn = None
self.error = None
self.sender = None
self.receiver = None
self.proxy = None
self.count = 10
self.n_sent = 0
self.n_rcvd = 0
self.n_accepted = 0
self.n_receiver_opened = 0
self.n_sender_opened = 0
def timeout(self):
self.error = "Timeout Expired: n_sent=%d n_rcvd=%d n_accepted=%d n_receiver_opened=%d n_sender_opened=%d" %\
(self.n_sent, self.n_rcvd, self.n_accepted, self.n_receiver_opened, self.n_sender_opened)
self.sender_conn.close()
self.receiver_conn.close()
self.lookup_conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver_conn = event.container.connect(self.receiver_host)
self.lookup_conn = event.container.connect(self.lookup_host)
self.reply_receiver = event.container.create_receiver(self.lookup_conn, dynamic=True)
self.agent_sender = event.container.create_sender(self.lookup_conn, "$management")
def send(self):
while self.sender.credit > 0 and self.n_sent < self.count:
self.n_sent += 1
m = Message(body="Message %d of %d" % (self.n_sent, self.count))
self.sender.send(m)
def on_link_opened(self, event):
if event.receiver:
self.n_receiver_opened += 1
else:
self.n_sender_opened += 1
if event.receiver == self.reply_receiver:
self.proxy = RouterProxy(self.reply_receiver.remote_source.address)
self.sender = event.container.create_sender(self.sender_conn, self.sender_address)
self.receiver = event.container.create_receiver(self.receiver_conn, self.receiver_address)
def on_sendable(self, event):
if event.sender == self.sender:
self.send()
def on_message(self, event):
if event.receiver == self.receiver:
self.n_rcvd += 1
if event.receiver == self.reply_receiver:
response = self.proxy.response(event.message)
if response.status_code != 200:
self.error = "Unexpected error code from agent: %d - %s" % (response.status_code, response.status_description)
if self.n_sent != self.count or self.n_rcvd != self.count:
self.error = "Unexpected counts: n_sent=%d n_rcvd=%d n_accepted=%d" % (self.n_sent, self.n_rcvd, self.n_accepted)
self.sender_conn.close()
self.receiver_conn.close()
self.lookup_conn.close()
self.timer.cancel()
def on_accepted(self, event):
if event.sender == self.sender:
self.n_accepted += 1
if self.n_accepted == self.count:
request = self.proxy.read_address(self.lookup_address)
self.agent_sender.send(request)
def run(self):
Container(self).run()
class MessageTransferAnonTest(MessagingHandler):
def __init__(self, sender_host, receiver_host, sender_address, receiver_address, lookup_host, lookup_address):
super(MessageTransferAnonTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.sender_address = sender_address
self.receiver_address = receiver_address
self.lookup_host = lookup_host
self.lookup_address = lookup_address
self.sender_conn = None
self.receiver_conn = None
self.lookup_conn = None
self.error = None
self.sender = None
self.receiver = None
self.proxy = None
self.count = 10
self.n_sent = 0
self.n_rcvd = 0
self.n_accepted = 0
self.n_agent_reads = 0
self.n_receiver_opened = 0
self.n_sender_opened = 0
def timeout(self):
self.error = "Timeout Expired: n_sent=%d n_rcvd=%d n_accepted=%d n_agent_reads=%d n_receiver_opened=%d n_sender_opened=%d" %\
(self.n_sent, self.n_rcvd, self.n_accepted, self.n_agent_reads, self.n_receiver_opened, self.n_sender_opened)
self.sender_conn.close()
self.receiver_conn.close()
self.lookup_conn.close()
if self.poll_timer:
self.poll_timer.cancel()
def poll_timeout(self):
self.poll()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.poll_timer = None
self.sender_conn = event.container.connect(self.sender_host)
self.receiver_conn = event.container.connect(self.receiver_host)
self.lookup_conn = event.container.connect(self.lookup_host)
self.reply_receiver = event.container.create_receiver(self.lookup_conn, dynamic=True)
self.agent_sender = event.container.create_sender(self.lookup_conn, "$management")
self.receiver = event.container.create_receiver(self.receiver_conn, self.receiver_address)
def send(self):
while self.sender.credit > 0 and self.n_sent < self.count:
self.n_sent += 1
m = Message(body="Message %d of %d" % (self.n_sent, self.count))
m.address = self.sender_address
self.sender.send(m)
def poll(self):
request = self.proxy.read_address(self.lookup_address)
self.agent_sender.send(request)
self.n_agent_reads += 1
def on_link_opened(self, event):
if event.receiver:
self.n_receiver_opened += 1
else:
self.n_sender_opened += 1
if event.receiver == self.reply_receiver:
self.proxy = RouterProxy(self.reply_receiver.remote_source.address)
self.poll()
def on_sendable(self, event):
if event.sender == self.sender:
self.send()
def on_message(self, event):
if event.receiver == self.receiver:
self.n_rcvd += 1
if event.receiver == self.reply_receiver:
response = self.proxy.response(event.message)
if response.status_code == 200 and (response.remoteCount + response.subscriberCount) > 0:
self.sender = event.container.create_sender(self.sender_conn, None)
if self.poll_timer:
self.poll_timer.cancel()
self.poll_timer = None
else:
self.poll_timer = event.reactor.schedule(0.25, PollTimeout(self))
def on_accepted(self, event):
if event.sender == self.sender:
self.n_accepted += 1
if self.n_accepted == self.count:
self.sender_conn.close()
self.receiver_conn.close()
self.lookup_conn.close()
self.timer.cancel()
def run(self):
Container(self).run()
class LinkRouteTest(MessagingHandler):
def __init__(self, first_host, second_host, first_address, second_address, dynamic, lookup_host):
super(LinkRouteTest, self).__init__(prefetch=0)
self.first_host = first_host
self.second_host = second_host
self.first_address = first_address
self.second_address = second_address
self.dynamic = dynamic
self.lookup_host = lookup_host
self.first_conn = None
self.second_conn = None
self.error = None
self.first_sender = None
self.first_receiver = None
self.second_sender = None
self.second_receiver = None
self.poll_timer = None
self.count = 10
self.n_sent = 0
self.n_rcvd = 0
self.n_settled = 0
def timeout(self):
self.error = "Timeout Expired: n_sent=%d n_rcvd=%d n_settled=%d" % (self.n_sent, self.n_rcvd, self.n_settled)
self.first_conn.close()
self.second_conn.close()
self.lookup_conn.close()
if self.poll_timer:
self.poll_timer.cancel()
def poll_timeout(self):
self.poll()
def fail(self, text):
self.error = text
self.second_conn.close()
self.first_conn.close()
self.timer.cancel()
self.lookup_conn.close()
if self.poll_timer:
self.poll_timer.cancel()
def send(self):
while self.first_sender.credit > 0 and self.n_sent < self.count:
self.n_sent += 1
m = Message(body="Message %d of %d" % (self.n_sent, self.count))
self.first_sender.send(m)
def poll(self):
request = self.proxy.read_address("Dhosted-group-1/link")
self.agent_sender.send(request)
def setup_first_links(self, event):
self.first_sender = event.container.create_sender(self.first_conn, self.first_address)
if self.dynamic:
self.first_receiver = event.container.create_receiver(self.first_conn,
dynamic=True,
options=DynamicNodeProperties({"x-opt-qd.address":
UNICODE(self.first_address)}))
else:
self.first_receiver = event.container.create_receiver(self.first_conn, self.first_address)
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.first_conn = event.container.connect(self.first_host)
self.second_conn = event.container.connect(self.second_host)
self.lookup_conn = event.container.connect(self.lookup_host)
self.reply_receiver = event.container.create_receiver(self.lookup_conn, dynamic=True)
self.agent_sender = event.container.create_sender(self.lookup_conn, "$management")
def on_link_opening(self, event):
if event.sender:
self.second_sender = event.sender
if self.dynamic:
if event.sender.remote_source.dynamic:
event.sender.source.address = self.second_address
event.sender.open()
else:
self.fail("Expected dynamic source on sender")
else:
if event.sender.remote_source.address == self.second_address:
event.sender.source.address = self.second_address
event.sender.open()
else:
self.fail("Incorrect address on incoming sender: got %s, expected %s" %
(event.sender.remote_source.address, self.second_address))
elif event.receiver:
self.second_receiver = event.receiver
if event.receiver.remote_target.address == self.second_address:
event.receiver.target.address = self.second_address
event.receiver.open()
else:
self.fail("Incorrect address on incoming receiver: got %s, expected %s" %
(event.receiver.remote_target.address, self.second_address))
def on_link_opened(self, event):
if event.receiver:
event.receiver.flow(self.count)
if event.receiver == self.reply_receiver:
self.proxy = RouterProxy(self.reply_receiver.remote_source.address)
self.poll()
def on_sendable(self, event):
if event.sender == self.first_sender:
self.send()
def on_message(self, event):
if event.receiver == self.first_receiver:
self.n_rcvd += 1
if event.receiver == self.reply_receiver:
response = self.proxy.response(event.message)
if response.status_code == 200 and (response.remoteCount + response.containerCount) > 0:
if self.poll_timer:
self.poll_timer.cancel()
self.poll_timer = None
self.setup_first_links(event)
else:
self.poll_timer = event.reactor.schedule(0.25, PollTimeout(self))
def on_settled(self, event):
if event.sender == self.first_sender:
self.n_settled += 1
if self.n_settled == self.count:
self.fail(None)
def run(self):
container = Container(self)
container.container_id = 'LRC'
container.run()
class WaypointTest(MessagingHandler):
def __init__(self, first_host, second_host, first_address, second_address, container_id="ALC"):
super(WaypointTest, self).__init__()
self.first_host = first_host
self.second_host = second_host
self.first_address = first_address
self.second_address = second_address
self.container_id = container_id
self.logger = Logger(title="WaypointTest")
self.first_conn = None
self.second_conn = None
self.error = None
self.first_sender = None
self.first_sender_created = False
self.first_sender_link_opened = False
self.first_receiver = None
self.first_receiver_created = False
self.waypoint_sender = None
self.waypoint_receiver = None
self.waypoint_queue = []
self.waypoint_sender_opened = False
self.waypoint_receiver_opened = False
self.firsts_created = False
self.count = 10
self.n_sent = 0
self.n_rcvd = 0
self.n_waypoint_rcvd = 0
self.n_thru = 0
self.outs = None
def timeout(self):
self.error = "Timeout Expired: n_sent=%d n_rcvd=%d n_thru=%d n_waypoint_rcvd=%d" % (self.n_sent, self.n_rcvd, self.n_thru, self.n_waypoint_rcvd)
self.first_conn.close()
self.second_conn.close()
self.logger.dump()
def fail(self, text):
self.error = text
self.second_conn.close()
self.first_conn.close()
self.timer.cancel()
self.outs = "n_sent=%d n_rcvd=%d n_thru=%d n_waypoint_rcvd=%d" % (self.n_sent, self.n_rcvd, self.n_thru, self.n_waypoint_rcvd)
print(self.outs)
def send_client(self):
while self.first_sender.credit > 0 and self.n_sent < self.count:
self.n_sent += 1
m = Message(body="Message %d of %d" % (self.n_sent, self.count))
self.first_sender.send(m)
def send_waypoint(self):
self.logger.log("send_waypoint called")
while self.waypoint_sender.credit > 0 and len(self.waypoint_queue) > 0:
self.n_thru += 1
m = self.waypoint_queue.pop()
self.waypoint_sender.send(m)
self.logger.log("waypoint_sender message sent")
else:
self.logger.log("waypoint_sender did not sent - credit = %s, len(self.waypoint_queue) = %s" % (str(self.waypoint_sender.credit), str(len(self.waypoint_queue))))
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.first_conn = event.container.connect(self.first_host)
self.second_conn = event.container.connect(self.second_host)
def on_link_flow(self, event):
if event.sender == self.waypoint_sender and self.first_sender_link_opened and not self.first_sender_created:
self.first_sender_created = True
self.first_sender = event.container.create_sender(self.first_conn, self.first_address)
def on_link_opened(self, event):
if event.receiver == self.waypoint_receiver and not self.first_sender_link_opened:
self.first_sender_link_opened = True
def on_link_opening(self, event):
if event.sender and not self.waypoint_sender:
self.waypoint_sender = event.sender
if event.sender.remote_source.address == self.second_address:
event.sender.source.address = self.second_address
event.sender.open()
self.waypoint_sender_opened = True
else:
self.fail("Incorrect address on incoming sender: got %s, expected %s" %
(event.sender.remote_source.address, self.second_address))
elif event.receiver and not self.waypoint_receiver:
self.waypoint_receiver = event.receiver
if event.receiver.remote_target.address == self.second_address:
event.receiver.target.address = self.second_address
event.receiver.open()
self.waypoint_receiver_opened = True
else:
self.fail("Incorrect address on incoming receiver: got %s, expected %s" %
(event.receiver.remote_target.address, self.second_address))
if self.waypoint_sender_opened and self.waypoint_receiver_opened and not self.first_receiver_created:
self.first_receiver_created = True
self.first_receiver = event.container.create_receiver(self.first_conn, self.first_address)
def on_sendable(self, event):
if event.sender == self.first_sender:
self.send_client()
def on_message(self, event):
if event.receiver == self.first_receiver:
self.n_rcvd += 1
if self.n_rcvd == self.count and self.n_thru == self.count:
self.fail(None)
elif event.receiver == self.waypoint_receiver:
self.n_waypoint_rcvd += 1
m = Message(body=event.message.body)
self.waypoint_queue.append(m)
self.send_waypoint()
def run(self):
container = Container(self)
container.container_id = self.container_id
container.run()
if __name__ == '__main__':
unittest.main(main_module())
|
LOCAL_VLAN_ID = -2
FLAT_VLAN_ID = -1
TYPE_IB = 'ib'
VIF_TYPE_DIRECT = 'mlnx_direct'
VIF_TYPE_HOSTDEV = 'hostdev'
VNIC_TYPE = 'vnic_type'
|
"""Constants for the Fronius integration."""
from typing import Final, NamedTuple, TypedDict
from homeassistant.helpers.entity import DeviceInfo
DOMAIN: Final = "fronius"
SolarNetId = str
SOLAR_NET_ID_POWER_FLOW: SolarNetId = "power_flow"
SOLAR_NET_ID_SYSTEM: SolarNetId = "system"
class FroniusConfigEntryData(TypedDict):
"""ConfigEntry for the Fronius integration."""
host: str
is_logger: bool
class FroniusDeviceInfo(NamedTuple):
"""Information about a Fronius inverter device."""
device_info: DeviceInfo
solar_net_id: SolarNetId
unique_id: str
|
import requests
import responses
from zerver.lib.cache import cache_delete
from zerver.lib.github import InvalidPlatform, get_latest_github_release_download_link_for_platform
from zerver.lib.test_classes import ZulipTestCase
logger_string = "zerver.lib.github"
class GitHubTestCase(ZulipTestCase):
@responses.activate
def test_get_latest_github_release_download_link_for_platform(self) -> None:
responses.add(
responses.GET,
"https://api.github.com/repos/zulip/zulip-desktop/releases/latest",
json={"tag_name": "v5.4.3"},
status=200,
)
responses.add(
responses.HEAD,
"https://desktop-download.zulip.com/v5.4.3/Zulip-Web-Setup-5.4.3.exe",
status=302,
)
self.assertEqual(
get_latest_github_release_download_link_for_platform("windows"),
"https://desktop-download.zulip.com/v5.4.3/Zulip-Web-Setup-5.4.3.exe",
)
responses.add(
responses.HEAD,
"https://desktop-download.zulip.com/v5.4.3/Zulip-5.4.3-x86_64.AppImage",
status=302,
)
self.assertEqual(
get_latest_github_release_download_link_for_platform("linux"),
"https://desktop-download.zulip.com/v5.4.3/Zulip-5.4.3-x86_64.AppImage",
)
responses.add(
responses.HEAD,
"https://desktop-download.zulip.com/v5.4.3/Zulip-5.4.3-x64.dmg",
status=302,
)
self.assertEqual(
get_latest_github_release_download_link_for_platform("mac"),
"https://desktop-download.zulip.com/v5.4.3/Zulip-5.4.3-x64.dmg",
)
api_url = "https://api.github.com/repos/zulip/zulip-desktop/releases/latest"
responses.replace(responses.GET, api_url, body=requests.RequestException())
cache_delete("download_link:windows")
with self.assertLogs(logger_string, level="ERROR") as error_log:
self.assertEqual(
get_latest_github_release_download_link_for_platform("windows"),
"https://github.com/zulip/zulip-desktop/releases/latest",
)
self.assertIn(
f"ERROR:{logger_string}:Unable to fetch the latest release version from GitHub {api_url}",
error_log.output[0],
)
responses.replace(
responses.GET,
"https://api.github.com/repos/zulip/zulip-desktop/releases/latest",
json={"tag_name": "5.4.4"},
status=200,
)
download_link = "https://desktop-download.zulip.com/v5.4.4/Zulip-5.4.4-x86_64.AppImage"
responses.add(responses.HEAD, download_link, status=404)
cache_delete("download_link:linux")
with self.assertLogs(logger_string, level="ERROR") as error_log:
self.assertEqual(
get_latest_github_release_download_link_for_platform("linux"),
"https://github.com/zulip/zulip-desktop/releases/latest",
)
self.assertEqual(
error_log.output,
[f"ERROR:{logger_string}:App download link is broken {download_link}"],
)
with self.assertRaises(InvalidPlatform):
get_latest_github_release_download_link_for_platform("plan9")
|
import multiprocessing
import os
import time
import unittest
import pytest
from mock import patch
from airflow import AirflowException, models, settings
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs import LocalTaskJob
from airflow.models import DAG, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from tests.test_utils.db import clear_db_runs
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
patcher = patch('airflow.jobs.base_job.sleep')
self.addCleanup(patcher.stop)
self.mock_base_job_sleep = patcher.start()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@patch('os.getpid')
def test_heartbeat_failed_fast(self, mock_getpid):
"""
Test that task heartbeat will sleep when it fails fast
"""
mock_getpid.return_value = 1
self.mock_base_job_sleep.side_effect = time.sleep
with create_session() as session:
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
self.assertGreater(len(heartbeat_records), 2)
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
# Assert that difference small enough
delta = (time2 - time1).total_seconds()
self.assertAlmostEqual(delta, job.heartrate, delta=0.05)
@pytest.mark.xfail(condition=True, reason="This test might be flaky in postgres/mysql")
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
def test_localtaskjob_maintain_heart_rate(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
self.assertEqual(mock_start.call_count, 1)
self.assertEqual(mock_ret_code.call_count, 2)
time_end = time.time()
self.assertEqual(self.mock_base_job_sleep.call_count, 1)
self.assertEqual(job1.state, State.SUCCESS)
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
self.assertLess(time_end - time_start, job1.heartrate)
session.close()
def test_mark_failure_on_failure_callback(self):
"""
Test that ensures that mark_failure in the UI fails
the task, and executes on_failure_callback
"""
data = {'called': False}
def check_failure(context):
self.assertEqual(context['dag_run'].dag_id,
'test_mark_failure')
data['called'] = True
dag = DAG(dag_id='test_mark_failure',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
task = DummyOperator(
task_id='test_state_succeeded1',
dag=dag,
on_failure_callback=check_failure)
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
job1.task_runner = StandardTaskRunner(job1)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.FAILED
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
self.assertTrue(data['called'])
process.join(timeout=10)
self.assertFalse(process.is_alive())
|
"""
Support for tracking MQTT enabled devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.mqtt/
"""
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.core import callback
from homeassistant.const import CONF_DEVICES
from homeassistant.components.mqtt import CONF_QOS
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['mqtt']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(mqtt.SCHEMA_BASE).extend({
vol.Required(CONF_DEVICES): {cv.string: mqtt.valid_subscribe_topic},
})
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up the MQTT tracker."""
devices = config[CONF_DEVICES]
qos = config[CONF_QOS]
for dev_id, topic in devices.items():
@callback
def async_message_received(topic, payload, qos, dev_id=dev_id):
"""Handle received MQTT message."""
hass.async_create_task(
async_see(dev_id=dev_id, location_name=payload))
await mqtt.async_subscribe(
hass, topic, async_message_received, qos)
return True
|
from pywin.mfc import dialog
import win32ui
import win32con
def MakeDlgTemplate():
style = (win32con.DS_MODALFRAME |
win32con.WS_POPUP |
win32con.WS_VISIBLE |
win32con.WS_CAPTION |
win32con.WS_SYSMENU |
win32con.DS_SETFONT)
cs = (win32con.WS_CHILD |
win32con.WS_VISIBLE)
w = 215
h = 36
dlg = [["Progress bar control example",
(0, 0, w, h),
style,
None,
(8, "MS Sans Serif")],
]
s = win32con.WS_TABSTOP | cs
dlg.append([128,
"Tick",
win32con.IDOK,
(10, h - 18, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
dlg.append([128,
"Cancel",
win32con.IDCANCEL,
(w - 60, h - 18, 50, 14), s | win32con.BS_PUSHBUTTON])
return dlg
class TestDialog(dialog.Dialog):
def OnInitDialog(self):
rc = dialog.Dialog.OnInitDialog(self)
self.pbar = win32ui.CreateProgressCtrl()
self.pbar.CreateWindow (win32con.WS_CHILD |
win32con.WS_VISIBLE,
(10, 10, 310, 24),
self, 1001)
# self.pbar.SetStep (5)
self.progress = 0
self.pincr = 5
return rc
def OnOK(self):
# NB: StepIt wraps at the end if you increment past the upper limit!
# self.pbar.StepIt()
self.progress = self.progress + self.pincr
if self.progress > 100:
self.progress = 100
if self.progress <= 100:
self.pbar.SetPos(self.progress)
def demo(modal = 0):
d = TestDialog (MakeDlgTemplate())
if modal:
d.DoModal()
else:
d.CreateWindow ()
if __name__=='__main__':
demo(1)
|
"""
Support for Unifi WAP controllers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.unifi/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.const import CONF_VERIFY_SSL
REQUIREMENTS = ['pyunifi==2.13']
_LOGGER = logging.getLogger(__name__)
CONF_PORT = 'port'
CONF_SITE_ID = 'site_id'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8443
DEFAULT_VERIFY_SSL = True
NOTIFICATION_ID = 'unifi_notification'
NOTIFICATION_TITLE = 'Unifi Device Tracker Setup'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_SITE_ID, default='default'): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
})
def get_scanner(hass, config):
"""Set up the Unifi device_tracker."""
from pyunifi.controller import Controller, APIError
host = config[DOMAIN].get(CONF_HOST)
username = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
site_id = config[DOMAIN].get(CONF_SITE_ID)
port = config[DOMAIN].get(CONF_PORT)
verify_ssl = config[DOMAIN].get(CONF_VERIFY_SSL)
try:
ctrl = Controller(host, username, password, port, version='v4',
site_id=site_id, ssl_verify=verify_ssl)
except APIError as ex:
_LOGGER.error("Failed to connect to Unifi: %s", ex)
hass.components.persistent_notification.create(
'Failed to connect to Unifi. '
'Error: {}<br />'
'You will need to restart hass after fixing.'
''.format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
return False
return UnifiScanner(ctrl)
class UnifiScanner(DeviceScanner):
"""Provide device_tracker support from Unifi WAP client data."""
def __init__(self, controller):
"""Initialize the scanner."""
self._controller = controller
self._update()
def _update(self):
"""Get the clients from the device."""
from pyunifi.controller import APIError
try:
clients = self._controller.get_clients()
except APIError as ex:
_LOGGER.error("Failed to scan clients: %s", ex)
clients = []
self._clients = {client['mac']: client for client in clients}
def scan_devices(self):
"""Scan for devices."""
self._update()
return self._clients.keys()
def get_device_name(self, mac):
"""Return the name (if known) of the device.
If a name has been set in Unifi, then return that, else
return the hostname if it has been detected.
"""
client = self._clients.get(mac, {})
name = client.get('name') or client.get('hostname')
_LOGGER.debug("Device %s name %s", mac, name)
return name
|
from twisted.internet import reactor
from twisted.spread import pb
from twisted.cred.credentials import UsernamePassword
from pbecho import DefinedError
def success(message):
print "Message received:",message
# reactor.stop()
def failure(error):
t = error.trap(DefinedError)
print "error received:", t
reactor.stop()
def connected(perspective):
perspective.callRemote('echo', "hello world").addCallbacks(success, failure)
perspective.callRemote('error').addCallbacks(success, failure)
print "connected."
factory = pb.PBClientFactory()
reactor.connectTCP("localhost", pb.portno, factory)
factory.login(
UsernamePassword("guest", "guest")).addCallbacks(connected, failure)
reactor.run()
|
"""Tests for Python ops defined in math_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import execution_callbacks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
RAISE = execution_callbacks.ExecutionCallback.RAISE
class SquaredDifferenceOpTest(test.TestCase):
def _testGrad(self, left_shape, right_shape):
if len(left_shape) > len(right_shape):
output_shape = left_shape
else:
output_shape = right_shape
l = np.random.randn(*left_shape)
r = np.random.randn(*right_shape)
with self.cached_session(use_gpu=True):
left_tensor = constant_op.constant(l, shape=left_shape)
right_tensor = constant_op.constant(r, shape=right_shape)
output = math_ops.squared_difference(left_tensor, right_tensor)
left_err = gradient_checker.compute_gradient_error(
left_tensor, left_shape, output, output_shape, x_init_value=l)
right_err = gradient_checker.compute_gradient_error(
right_tensor, right_shape, output, output_shape, x_init_value=r)
self.assertLess(left_err, 1e-10)
self.assertLess(right_err, 1e-10)
@test_util.run_deprecated_v1
def testGrad(self):
self._testGrad([1, 2, 3, 2], [3, 2])
self._testGrad([2, 4], [3, 2, 4])
class AbsOpTest(test.TestCase):
def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
"""Returns samples from a normal distribution shifted `bias` away from 0."""
value = np.random.randn(*shape) * sigma
return value + np.sign(value) * bias
def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
np.random.seed(7)
if dtype in (dtypes.complex64, dtypes.complex128):
value = math_ops.complex(
self._biasedRandN(
shape, bias=bias, sigma=sigma),
self._biasedRandN(
shape, bias=bias, sigma=sigma))
else:
value = ops.convert_to_tensor(
self._biasedRandN(
shape, bias=bias), dtype=dtype)
with self.cached_session(use_gpu=True):
output = math_ops.abs(value)
error = gradient_checker.compute_gradient_error(
value, shape, output, output.get_shape().as_list())
self.assertLess(error, max_error)
@test_util.run_deprecated_v1
def testComplexAbs(self):
# Bias random test values away from zero to avoid numeric instabilities.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0)
# Ensure stability near the pole at zero.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1)
class MinOrMaxGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testMinGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0))
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testMaxGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0))
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
class MaximumOrMinimumGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testMaximumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.maximum(inputs, 3.0)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testMinimumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.minimum(inputs, 2.0)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
class ProdGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testProdGradient(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testProdGradientForNegativeAxis(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs, -1)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testProdGradientComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
inputs = constant_op.constant([[1 + 3j, 2 - 1j], [3j, 4]],
dtype=dtype)
outputs = math_ops.reduce_prod(inputs)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testProdGradientForNegativeAxisComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
inputs = constant_op.constant([[1 + 3j, 2 - 1j], [3j, 4]],
dtype=dtype)
outputs = math_ops.reduce_prod(inputs, -1)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
class SegmentMinOrMaxGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testSegmentMinGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_min,
[2])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testSegmentMaxGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_max,
[2])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testSegmentMinGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_min,
[1])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testSegmentMaxGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_max,
[1])
self.assertLess(error, 1e-4)
class FloorModGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testFloorModGradient(self):
# Making sure the input is not near the discontinuity point where
# x/y == floor(x/y)
ns = constant_op.constant([17.], dtype=dtypes.float32)
inputs = constant_op.constant([131.], dtype=dtypes.float32)
floor_mod = math_ops.floormod(inputs, ns)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1],
floor_mod, [1])
self.assertLess(error, 1e-4)
class DivNoNanGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasicGradient(self):
inputs = constant_op.constant(np.arange(-3, 3),
dtype=dtypes.float32)
outputs = math_ops.div_no_nan(inputs, 1 + math_ops.abs(inputs))
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs,
inputs.get_shape().as_list(), outputs,
outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithDenominatorIsZero(self):
x = constant_op.constant(np.arange(-3, 3),
dtype=dtypes.float32)
y = array_ops.zeros_like(x,
dtype=dtypes.float32)
outputs = math_ops.div_no_nan(x, y)
with self.cached_session():
dx, dy = gradients.gradients(outputs, [x, y])
self.assertAllClose(dx.eval(), np.zeros(x.shape.as_list()))
self.assertAllClose(dy.eval(), np.zeros(y.shape.as_list()))
class XlogyTest(test.TestCase):
def _xlogy_gradients(self, x, y):
xlogy_xgrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), x)[0])
xlogy_ygrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), y)[0])
return xlogy_xgrad, xlogy_ygrad
@test_util.run_deprecated_v1
def testNonZeroValuesGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
xlogy_expected_xgrad = self.evaluate(math_ops.log(y))
xlogy_expected_ygrad = self.evaluate(x / y)
self.assertAllClose(xlogy_expected_xgrad, xlogy_xgrad)
self.assertAllClose(xlogy_expected_ygrad, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroXGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlogy_xgrad)
self.assertAllClose(zero, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
self.assertAllClose(-np.inf, xlogy_xgrad)
self.assertAllClose(np.inf, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroXYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlogy_xgrad)
self.assertAllClose(zero, xlogy_ygrad)
class XdivyTest(test.TestCase):
def _xdivy_gradients(self, x, y):
xdivy_xgrad = self.evaluate(gradients.gradients(math_ops.xdivy(x, y), x)[0])
xdivy_ygrad = self.evaluate(gradients.gradients(math_ops.xdivy(x, y), y)[0])
return xdivy_xgrad, xdivy_ygrad
@test_util.run_deprecated_v1
def testNonZeroValuesGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
xdivy_expected_xgrad = self.evaluate(1 / y)
xdivy_expected_ygrad = self.evaluate(-x / y**2)
self.assertAllClose(xdivy_expected_xgrad, xdivy_xgrad)
self.assertAllClose(xdivy_expected_ygrad, xdivy_ygrad)
@test_util.run_deprecated_v1
def testZeroXGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xdivy_xgrad)
self.assertAllClose(zero, xdivy_ygrad)
@test_util.run_deprecated_v1
def testZeroYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
self.assertAllClose(np.inf, xdivy_xgrad)
self.assertAllClose(-np.inf, xdivy_ygrad)
@test_util.run_deprecated_v1
def testZeroXYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xdivy_xgrad)
self.assertAllClose(zero, xdivy_ygrad)
@test_util.run_all_in_graph_and_eager_modes
class PowGradTest(test.TestCase):
def test_zero_grad_tf_gradients(self):
if context.executing_eagerly():
self.skipTest("tf.gradients not supported in eager.")
x = constant_op.constant([-1., 0., 1.])
g = self.evaluate(gradients.gradients(math_ops.pow(x, 2), x)[0])
self.assertAllClose([-2., 0., 2.], g)
def test_zero_grad_tape(self):
with execution_callbacks.errstate(inf_or_nan=RAISE):
x = constant_op.constant([-1, 0., 1.])
with backprop.GradientTape() as tape:
tape.watch(x)
g = tape.gradient(math_ops.pow(x, 2), x)
g = self.evaluate(g)
self.assertAllClose([-2., 0., 2.], g)
@test_util.run_all_in_graph_and_eager_modes
class NextAfterTest(test.TestCase):
def _nextafter_gradient(self, x1, x2):
with backprop.GradientTape() as tape:
tape.watch(x1)
tape.watch(x2)
y = math_ops.nextafter(x1, x2)
return tape.gradient(y, [x1, x2])
def testBasic(self):
for dtype in [dtypes.float32, dtypes.float64]:
x1 = constant_op.constant(0.1, dtype=dtype)
x2 = constant_op.constant(3.1, dtype=dtype)
dx1, dx2 = self._nextafter_gradient(x1, x2)
expected_dx1 = constant_op.constant(1, dtype=dtype)
expected_dx2 = constant_op.constant(0, dtype=dtype)
self.assertAllClose(expected_dx1, dx1)
self.assertAllClose(expected_dx2, dx2)
def testDynamicShapes(self):
for dtype in [dtypes.float32, dtypes.float64]:
default_x1 = constant_op.constant(0.1, dtype=dtype)
default_x2 = constant_op.constant(3.1, dtype=dtype)
x1 = array_ops.placeholder_with_default(default_x1, shape=None)
x2 = array_ops.placeholder_with_default(default_x2, shape=None)
dx1, dx2 = self._nextafter_gradient(x1, x2)
expected_dx1 = constant_op.constant(1, dtype=dtype)
expected_dx2 = constant_op.constant(0, dtype=dtype)
self.assertAllClose(expected_dx1, dx1)
self.assertAllClose(expected_dx2, dx2)
def testWithGradientChecker(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
x1 = np.array([-1, 0, 1, 2, 3], dtype=dtype.as_numpy_dtype)
x2 = np.array([2, 2, 2, 2, 2], dtype=dtype.as_numpy_dtype)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
lambda x: math_ops.nextafter(x, x2), [x1])) # pylint: disable=cell-var-from-loop
self.assertLess(err, 1e-3)
def testBroadcastingWithGradientChecker(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
x1 = np.array([-1, 0, 1, 2, 3], dtype=dtype.as_numpy_dtype)
x2 = np.array([2], dtype=dtype.as_numpy_dtype)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
lambda x: math_ops.nextafter(x, x2), [x1])) # pylint: disable=cell-var-from-loop
self.assertLess(err, 1e-3)
if __name__ == "__main__":
test.main()
|
from django.contrib import admin
from django.contrib.auth import logout
from django.contrib.redirects.models import Redirect
from django.core.exceptions import MiddlewareNotUsed
from django.core.urlresolvers import reverse
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponsePermanentRedirect, HttpResponseGone)
from django.utils.cache import get_max_age
from django.template import Template, RequestContext
from django.middleware.csrf import CsrfViewMiddleware, get_token
from mezzanine.conf import settings
from mezzanine.core.models import SitePermission
from mezzanine.utils.cache import (cache_key_prefix, nevercache_token,
cache_get, cache_set, cache_installed)
from mezzanine.utils.device import templates_for_device
from mezzanine.utils.sites import current_site_id, templates_for_host
_deprecated = {
"AdminLoginInterfaceSelector": "AdminLoginInterfaceSelectorMiddleware",
"DeviceAwareUpdateCacheMiddleware": "UpdateCacheMiddleware",
"DeviceAwareFetchFromCacheMiddleware": "FetchFromCacheMiddleware",
}
class _Deprecated(object):
def __init__(self, *args, **kwargs):
from warnings import warn
msg = "mezzanine.core.middleware.%s is deprecated." % self.old
if self.new:
msg += (" Please change the MIDDLEWARE_CLASSES setting to use "
"mezzanine.core.middleware.%s" % self.new)
warn(msg)
for old, new in _deprecated.items():
globals()[old] = type(old, (_Deprecated,), {"old": old, "new": new})
class AdminLoginInterfaceSelectorMiddleware(object):
"""
Checks for a POST from the admin login view and if authentication is
successful and the "site" interface is selected, redirect to the site.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
login_type = request.POST.get("mezzanine_login_interface")
if login_type and not request.user.is_authenticated():
response = view_func(request, *view_args, **view_kwargs)
if request.user.is_authenticated():
if login_type == "admin":
next = request.get_full_path()
else:
next = request.GET.get("next", "/")
return HttpResponseRedirect(next)
else:
return response
return None
class SitePermissionMiddleware(object):
"""
Marks the current user with a ``has_site_permission`` which is
used in place of ``user.is_staff`` to achieve per-site staff
access.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
has_site_permission = False
if request.user.is_superuser:
has_site_permission = True
elif request.user.is_staff:
lookup = {"user": request.user, "sites": current_site_id()}
try:
SitePermission.objects.get(**lookup)
except SitePermission.DoesNotExist:
admin_index = reverse("admin:index")
if request.path.startswith(admin_index):
logout(request)
view_func = admin.site.login
extra_context = {"no_site_permission": True}
return view_func(request, extra_context=extra_context)
else:
has_site_permission = True
request.user.has_site_permission = has_site_permission
class TemplateForDeviceMiddleware(object):
"""
Inserts device-specific templates to the template list.
"""
def process_template_response(self, request, response):
if hasattr(response, "template_name"):
templates = templates_for_device(request, response.template_name)
response.template_name = templates
return response
class TemplateForHostMiddleware(object):
"""
Inserts host-specific templates to the template list.
"""
def process_template_response(self, request, response):
if hasattr(response, "template_name"):
templates = templates_for_host(request, response.template_name)
response.template_name = templates
return response
class UpdateCacheMiddleware(object):
"""
Response phase for Mezzanine's cache middleware. Handles caching
the response, and then performing the second phase of rendering,
for content enclosed by the ``nevercache`` tag.
"""
def process_response(self, request, response):
# Cache the response if all the required conditions are met.
# Response must be marked for updating by the
# ``FetchFromCacheMiddleware`` having a cache get miss, the
# user must not be authenticated, the HTTP status must be OK
# and the response mustn't include an expiry age, incicating it
# shouldn't be cached.
marked_for_update = getattr(request, "_update_cache", False)
anon = hasattr(request, "user") and not request.user.is_authenticated()
valid_status = response.status_code == 200
timeout = get_max_age(response)
if timeout is None:
timeout = settings.CACHE_MIDDLEWARE_SECONDS
if anon and valid_status and marked_for_update and timeout:
cache_key = cache_key_prefix(request) + request.get_full_path()
_cache_set = lambda r: cache_set(cache_key, r.content, timeout)
if callable(getattr(response, "render", None)):
response.add_post_render_callback(_cache_set)
else:
_cache_set(response)
# Second phase rendering for non-cached template code and
# content. Split on the delimiter the ``nevercache`` tag
# wrapped its contents in, and render only the content
# enclosed by it, to avoid possible template code injection.
parts = response.content.split(nevercache_token())
if response["content-type"].startswith("text") and len(parts) > 1:
# Restore csrf token from cookie - check the response
# first as it may be being set for the first time.
csrf_token = None
try:
csrf_token = response.cookies[settings.CSRF_COOKIE_NAME].value
except KeyError:
try:
csrf_token = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
pass
if csrf_token:
request.META["CSRF_COOKIE"] = csrf_token
context = RequestContext(request)
for i, part in enumerate(parts):
if i % 2:
part = Template(part).render(context).encode("utf-8")
parts[i] = part
response.content = "".join(parts)
response["Content-Length"] = len(response.content)
# Required to clear out user messages.
request._messages.update(response)
return response
class FetchFromCacheMiddleware(object):
"""
Request phase for Mezzanine cache middleware. Return a response
from cache if found, othwerwise mark the request for updating
the cache in ``UpdateCacheMiddleware``.
If the response is served from cache and process_request returns a valid
response, Django will not execute the next middlewares, but we really
need ``CsrfViewMiddleware`` to run before ``UpdateCacheMiddleware`` to
make sure a valid csrf token exist.
"""
def process_request(self, request):
if (cache_installed() and request.method == "GET" and
not request.user.is_authenticated()):
cache_key = cache_key_prefix(request) + request.get_full_path()
response = cache_get(cache_key)
if response is None:
request._update_cache = True
else:
csrf_mw_name = "django.middleware.csrf.CsrfViewMiddleware"
if csrf_mw_name in settings.MIDDLEWARE_CLASSES:
csrf_mw = CsrfViewMiddleware()
csrf_mw.process_view(request, lambda x: None, None, None)
get_token(request)
return HttpResponse(response)
class SSLRedirectMiddleware(object):
"""
Handles redirections required for SSL when ``SSL_ENABLED`` is ``True``.
If ``SSL_FORCE_HOST`` is ``True``, and is not the current host,
redirect to it.
Also ensure URLs defined by ``SSL_FORCE_URL_PREFIXES`` are redirect
to HTTPS, and redirect all other URLs to HTTP if on HTTPS.
"""
def process_request(self, request):
settings.use_editable()
force_host = settings.SSL_FORCE_HOST
if force_host and request.get_host().split(":")[0] != force_host:
url = "http://%s%s" % (force_host, request.get_full_path())
return HttpResponsePermanentRedirect(url)
if settings.SSL_ENABLED and not settings.DEV_SERVER:
url = "%s%s" % (request.get_host(), request.get_full_path())
if request.path.startswith(settings.SSL_FORCE_URL_PREFIXES):
if not request.is_secure():
return HttpResponseRedirect("https://%s" % url)
elif request.is_secure() and settings.SSL_FORCED_PREFIXES_ONLY:
return HttpResponseRedirect("http://%s" % url)
class RedirectFallbackMiddleware(object):
"""
Port of Django's ``RedirectFallbackMiddleware`` that uses
Mezzanine's approach for determining the current site.
"""
def __init__(self):
if "django.contrib.redirects" not in settings.INSTALLED_APPS:
raise MiddlewareNotUsed
def process_response(self, request, response):
if response.status_code == 404:
lookup = {
"site_id": current_site_id(),
"old_path": request.get_full_path(),
}
try:
redirect = Redirect.objects.get(**lookup)
except Redirect.DoesNotExist:
pass
else:
if not redirect.new_path:
response = HttpResponseGone()
else:
response = HttpResponseRedirect(redirect.new_path)
return response
|
from __future__ import unicode_literals
import decimal
from django.forms import DecimalField, NumberInput, ValidationError, Widget
from django.test import SimpleTestCase
from django.utils import formats, translation
from . import FormFieldAssertionsMixin
class DecimalFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_decimalfield_1(self):
f = DecimalField(max_digits=4, decimal_places=2)
self.assertWidgetRendersTo(f, '<input id="id_f" step="0.01" type="number" name="f" />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual(f.clean('1'), decimal.Decimal("1"))
self.assertIsInstance(f.clean('1'), decimal.Decimal)
self.assertEqual(f.clean('23'), decimal.Decimal("23"))
self.assertEqual(f.clean('3.14'), decimal.Decimal("3.14"))
self.assertEqual(f.clean(3.14), decimal.Decimal("3.14"))
self.assertEqual(f.clean(decimal.Decimal('3.14')), decimal.Decimal("3.14"))
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('NaN')
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('Inf')
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('-Inf')
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('a')
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('łąść')
self.assertEqual(f.clean('1.0 '), decimal.Decimal("1.0"))
self.assertEqual(f.clean(' 1.0'), decimal.Decimal("1.0"))
self.assertEqual(f.clean(' 1.0 '), decimal.Decimal("1.0"))
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('1.0a')
with self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'"):
f.clean('123.45')
with self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'"):
f.clean('1.234')
msg = "'Ensure that there are no more than 2 digits before the decimal point.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('123.4')
self.assertEqual(f.clean('-12.34'), decimal.Decimal("-12.34"))
with self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'"):
f.clean('-123.45')
self.assertEqual(f.clean('-.12'), decimal.Decimal("-0.12"))
self.assertEqual(f.clean('-00.12'), decimal.Decimal("-0.12"))
self.assertEqual(f.clean('-000.12'), decimal.Decimal("-0.12"))
with self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'"):
f.clean('-000.123')
with self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'"):
f.clean('-000.12345')
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('--0.12')
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
def test_decimalfield_2(self):
f = DecimalField(max_digits=4, decimal_places=2, required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(f.clean('1'), decimal.Decimal("1"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
def test_decimalfield_3(self):
f = DecimalField(
max_digits=4, decimal_places=2,
max_value=decimal.Decimal('1.5'),
min_value=decimal.Decimal('0.5')
)
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
with self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'"):
f.clean('1.6')
with self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'"):
f.clean('0.4')
self.assertEqual(f.clean('1.5'), decimal.Decimal("1.5"))
self.assertEqual(f.clean('0.5'), decimal.Decimal("0.5"))
self.assertEqual(f.clean('.5'), decimal.Decimal("0.5"))
self.assertEqual(f.clean('00.50'), decimal.Decimal("0.50"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, decimal.Decimal('1.5'))
self.assertEqual(f.min_value, decimal.Decimal('0.5'))
def test_decimalfield_4(self):
f = DecimalField(decimal_places=2)
with self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'"):
f.clean('0.00000001')
def test_decimalfield_5(self):
f = DecimalField(max_digits=3)
# Leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('0000000.10'), decimal.Decimal("0.1"))
# But a leading 0 before the . doesn't count towards max_digits
self.assertEqual(f.clean('0000000.100'), decimal.Decimal("0.100"))
# Only leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('000000.02'), decimal.Decimal('0.02'))
with self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 3 digits in total.'"):
f.clean('000000.0002')
self.assertEqual(f.clean('.002'), decimal.Decimal("0.002"))
def test_decimalfield_6(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('.01'), decimal.Decimal(".01"))
msg = "'Ensure that there are no more than 0 digits before the decimal point.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('1.1')
def test_decimalfield_scientific(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('1E+2'), decimal.Decimal('1E+2'))
self.assertEqual(f.clean('1e+2'), decimal.Decimal('1E+2'))
with self.assertRaisesMessage(ValidationError, "Ensure that there are no more"):
f.clean('0.546e+2')
def test_decimalfield_widget_attrs(self):
f = DecimalField(max_digits=6, decimal_places=2)
self.assertEqual(f.widget_attrs(Widget()), {})
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '0.01'})
f = DecimalField(max_digits=10, decimal_places=0)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1'})
f = DecimalField(max_digits=19, decimal_places=19)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1e-19'})
f = DecimalField(max_digits=20)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': 'any'})
f = DecimalField(max_digits=6, widget=NumberInput(attrs={'step': '0.01'}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" type="number" id="id_f" />')
def test_decimalfield_localized(self):
"""
A localized DecimalField's widget renders to a text input without
number input specific attributes.
"""
f = DecimalField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_decimalfield_changed(self):
f = DecimalField(max_digits=2, decimal_places=2)
d = decimal.Decimal("0.1")
self.assertFalse(f.has_changed(d, '0.10'))
self.assertTrue(f.has_changed(d, '0.101'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = DecimalField(max_digits=2, decimal_places=2, localize=True)
localized_d = formats.localize_input(d) # -> '0,1' in French
self.assertFalse(f.has_changed(d, localized_d))
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tax', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Voucher',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number', models.CharField(unique=True, max_length=100)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('start_date', models.DateField(null=True, blank=True)),
('effective_from', models.FloatField(default=0.0)),
('end_date', models.DateField(null=True, blank=True)),
('kind_of', models.PositiveSmallIntegerField(choices=[(0, 'Absolute'), (1, 'Percentage')])),
('value', models.FloatField(default=0.0)),
('active', models.BooleanField(default=True)),
('used_amount', models.PositiveSmallIntegerField(default=0)),
('last_used_date', models.DateTimeField(null=True, blank=True)),
('limit', models.PositiveSmallIntegerField(default=1, null=True, blank=True)),
('creator', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('creation_date', 'number'),
},
),
migrations.CreateModel(
name='VoucherGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('position', models.PositiveSmallIntegerField(default=10)),
('creator', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('position',),
},
),
migrations.CreateModel(
name='VoucherOptions',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number_prefix', models.CharField(default=b'', max_length=20, blank=True)),
('number_suffix', models.CharField(default=b'', max_length=20, blank=True)),
('number_length', models.IntegerField(default=5, null=True, blank=True)),
('number_letters', models.CharField(default=b'ABCDEFGHIJKLMNOPQRSTUVWXYZ', max_length=100, blank=True)),
],
),
migrations.AddField(
model_name='voucher',
name='group',
field=models.ForeignKey(related_name='vouchers', to='voucher.VoucherGroup'),
),
migrations.AddField(
model_name='voucher',
name='tax',
field=models.ForeignKey(verbose_name='Tax', blank=True, to='tax.Tax', null=True),
),
]
|
import datetime
import distutils.version
import os
import re
from collections import OrderedDict
class NlxHeader(OrderedDict):
"""
Representation of basic information in all 16 kbytes Neuralynx file headers,
including dates opened and closed if given.
"""
HEADER_SIZE = 2 ** 14 # Neuralynx files have a txt header of 16kB
# helper function to interpret boolean keys
def _to_bool(txt):
if txt == 'True':
return True
elif txt == 'False':
return False
else:
raise Exception('Can not convert %s to bool' % txt)
# keys that may be present in header which we parse
txt_header_keys = [
('AcqEntName', 'channel_names', None), # used
('FileType', '', None),
('FileVersion', '', None),
('RecordSize', '', None),
('HardwareSubSystemName', '', None),
('HardwareSubSystemType', '', None),
('SamplingFrequency', 'sampling_rate', float), # used
('ADMaxValue', '', None),
('ADBitVolts', 'bit_to_microVolt', None), # used
('NumADChannels', '', None),
('ADChannel', 'channel_ids', None), # used
('InputRange', '', None),
('InputInverted', 'input_inverted', _to_bool), # used
('DSPLowCutFilterEnabled', '', None),
('DspLowCutFrequency', '', None),
('DspLowCutNumTaps', '', None),
('DspLowCutFilterType', '', None),
('DSPHighCutFilterEnabled', '', None),
('DspHighCutFrequency', '', None),
('DspHighCutNumTaps', '', None),
('DspHighCutFilterType', '', None),
('DspDelayCompensation', '', None),
('DspFilterDelay_µs', '', None),
('DisabledSubChannels', '', None),
('WaveformLength', '', int),
('AlignmentPt', '', None),
('ThreshVal', '', None),
('MinRetriggerSamples', '', None),
('SpikeRetriggerTime', '', None),
('DualThresholding', '', None),
(r'Feature \w+ \d+', '', None),
('SessionUUID', '', None),
('FileUUID', '', None),
('CheetahRev', '', None), # only for older versions of Cheetah
('ProbeName', '', None),
('OriginalFileName', '', None),
('TimeCreated', '', None),
('TimeClosed', '', None),
('ApplicationName', '', None), # also include version number when present
('AcquisitionSystem', '', None),
('ReferenceChannel', '', None),
('NLX_Base_Class_Type', '', None) # in version 4 and earlier versions of Cheetah
]
# Filename and datetime may appear in header lines starting with # at
# beginning of header or in later versions as a property. The exact format
# used depends on the application name and its version as well as the
# -FileVersion property.
#
# There are 4 styles understood by this code and the patterns used for parsing
# the items within each are stored in a dictionary. Each dictionary is then
# stored in main dictionary keyed by an abbreviation for the style.
header_pattern_dicts = {
# BML
'bml': dict(
datetime1_regex=r'## Time Opened: \(m/d/y\): (?P<date>\S+)'
r' At Time: (?P<time>\S+)',
filename_regex=r'## File Name: (?P<filename>\S+)',
datetimeformat='%m/%d/%y %H:%M:%S.%f'),
# Cheetah after version 1 and before version 5
'bv5': dict(
datetime1_regex=r'## Time Opened: \(m/d/y\): (?P<date>\S+)'
r' At Time: (?P<time>\S+)',
filename_regex=r'## File Name: (?P<filename>\S+)',
datetimeformat='%m/%d/%Y %H:%M:%S.%f'),
# Cheetah version 5.4.0
'v5.4.0': dict(
datetime1_regex=r'## Time Opened \(m/d/y\): (?P<date>\S+)'
r' At Time: (?P<time>\S+)',
datetime2_regex=r'## Time Closed \(m/d/y\): (?P<date>\S+)'
r' At Time: (?P<time>\S+)',
filename_regex=r'## File Name: (?P<filename>\S+)',
datetimeformat='%m/%d/%Y %H:%M:%S.%f'),
# Cheetah version 5 before and including v 5.6.4 as well as version 1
'bv5.6.4': dict(
datetime1_regex=r'## Time Opened \(m/d/y\): (?P<date>\S+)'
r' \(h:m:s\.ms\) (?P<time>\S+)',
datetime2_regex=r'## Time Closed \(m/d/y\): (?P<date>\S+)'
r' \(h:m:s\.ms\) (?P<time>\S+)',
filename_regex=r'## File Name (?P<filename>\S+)',
datetimeformat='%m/%d/%Y %H:%M:%S.%f'),
'neuraview2': dict(
datetime1_regex=r'## Date Opened: \(mm/dd/yyy\): (?P<date>\S+)'
r' At Time: (?P<time>\S+)',
datetime2_regex=r'## Date Closed: \(mm/dd/yyy\): (?P<date>\S+)'
r' At Time: (?P<time>\S+)',
filename_regex=r'## File Name: (?P<filename>\S+)',
datetimeformat='%m/%d/%Y %H:%M:%S'),
# Cheetah after v 5.6.4 and default for others such as Pegasus
'def': dict(
datetime1_regex=r'-TimeCreated (?P<date>\S+) (?P<time>\S+)',
datetime2_regex=r'-TimeClosed (?P<date>\S+) (?P<time>\S+)',
filename_regex=r'-OriginalFileName "?(?P<filename>\S+)"?',
datetimeformat='%Y/%m/%d %H:%M:%S')
}
def __init__(self, filename):
"""
Factory function to build NlxHeader for a given file.
"""
super(OrderedDict, self).__init__()
with open(filename, 'rb') as f:
txt_header = f.read(NlxHeader.HEADER_SIZE)
txt_header = txt_header.strip(b'\x00').decode('latin-1')
# must start with 8 # characters
assert txt_header.startswith("########"),\
'Neuralynx files must start with 8 # characters.'
# find keys
for k1, k2, type_ in NlxHeader.txt_header_keys:
pattern = r'-(?P<name>' + k1 + r')\s+(?P<value>[\S ]*)'
matches = re.findall(pattern, txt_header)
for match in matches:
if k2 == '':
name = match[0]
else:
name = k2
value = match[1].rstrip(' ')
if type_ is not None:
value = type_(value)
self[name] = value
# if channel_ids or s not in self then the filename is used
name = os.path.splitext(os.path.basename(filename))[0]
# convert channel ids
if 'channel_ids' in self:
chid_entries = re.findall(r'\w+', self['channel_ids'])
self['channel_ids'] = [int(c) for c in chid_entries]
else:
self['channel_ids'] = ['unknown']
# convert channel names
if 'channel_names' in self:
name_entries = re.findall(r'\w+', self['channel_names'])
if len(name_entries) == 1:
self['channel_names'] = name_entries * len(self['channel_ids'])
assert len(self['channel_names']) == len(self['channel_ids']), \
'Number of channel ids does not match channel names.'
else:
self['channel_names'] = ['unknown'] * len(self['channel_ids'])
# version and application name
# older Cheetah versions with CheetahRev property
if 'CheetahRev' in self:
assert 'ApplicationName' not in self
self['ApplicationName'] = 'Cheetah'
app_version = self['CheetahRev']
# new file version 3.4 does not contain CheetahRev property, but ApplicationName instead
elif 'ApplicationName' in self:
pattern = r'(\S*) "([\S ]*)"'
match = re.findall(pattern, self['ApplicationName'])
assert len(match) == 1, 'impossible to find application name and version'
self['ApplicationName'], app_version = match[0]
# BML Ncs file contain neither property, but 'NLX_Base_Class_Type'
elif 'NLX_Base_Class_Type' in txt_header:
self['ApplicationName'] = 'BML'
app_version = "2.0"
# Neuraview Ncs file contained neither property nor
# NLX_Base_Class_Type information
else:
self['ApplicationName'] = 'Neuraview'
app_version = '2'
self['ApplicationVersion'] = distutils.version.LooseVersion(app_version)
# convert bit_to_microvolt
if 'bit_to_microVolt' in self:
btm_entries = re.findall(r'\S+', self['bit_to_microVolt'])
if len(btm_entries) == 1:
btm_entries = btm_entries * len(self['channel_ids'])
self['bit_to_microVolt'] = [float(e) * 1e6 for e in btm_entries]
assert len(self['bit_to_microVolt']) == len(self['channel_ids']), \
'Number of channel ids does not match bit_to_microVolt conversion factors.'
if 'InputRange' in self:
ir_entries = re.findall(r'\w+', self['InputRange'])
if len(ir_entries) == 1:
self['InputRange'] = [int(ir_entries[0])] * len(chid_entries)
else:
self['InputRange'] = [int(e) for e in ir_entries]
assert len(self['InputRange']) == len(chid_entries), \
'Number of channel ids does not match input range values.'
# Format of datetime depends on app name, app version
# :TODO: this works for current examples but is not likely actually related
# to app version in this manner.
an = self['ApplicationName']
if an == 'Cheetah':
av = self['ApplicationVersion']
if av <= '2': # version 1 uses same as older versions
hpd = NlxHeader.header_pattern_dicts['bv5.6.4']
elif av < '5':
hpd = NlxHeader.header_pattern_dicts['bv5']
elif av <= '5.4.0':
hpd = NlxHeader.header_pattern_dicts['v5.4.0']
elif av <= '5.6.4':
hpd = NlxHeader.header_pattern_dicts['bv5.6.4']
else:
hpd = NlxHeader.header_pattern_dicts['def']
elif an == 'BML':
hpd = NlxHeader.header_pattern_dicts['bml']
av = "2"
elif an == 'Neuraview':
hpd = NlxHeader.header_pattern_dicts['neuraview2']
av = "2"
else:
an = "Unknown"
av = "NA"
hpd = NlxHeader.header_pattern_dicts['def']
# opening time
sr = re.search(hpd['datetime1_regex'], txt_header)
if not sr:
raise IOError("No matching header open date/time for application {} " +
"version {}. Please contact developers.".format(an, av))
else:
dt1 = sr.groupdict()
self['recording_opened'] = datetime.datetime.strptime(
dt1['date'] + ' ' + dt1['time'], hpd['datetimeformat'])
# close time, if available
if 'datetime2_regex' in hpd:
sr = re.search(hpd['datetime2_regex'], txt_header)
if not sr:
raise IOError("No matching header close date/time for application {} " +
"version {}. Please contact developers.".format(an, av))
else:
dt2 = sr.groupdict()
self['recording_closed'] = datetime.datetime.strptime(
dt2['date'] + ' ' + dt2['time'], hpd['datetimeformat'])
def type_of_recording(self):
"""
Determines type of recording in Ncs file with this header.
RETURN:
one of 'PRE4','BML','DIGITALLYNX','DIGITALLYNXSX','UNKNOWN'
"""
if 'NLX_Base_Class_Type' in self:
# older style standard neuralynx acquisition with rounded sampling frequency
if self['NLX_Base_Class_Type'] == 'CscAcqEnt':
return 'PRE4'
# BML style with fractional frequency and microsPerSamp
elif self['NLX_Base_Class_Type'] == 'BmlAcq':
return 'BML'
else:
return 'UNKNOWN'
elif 'HardwareSubSystemType' in self:
# DigitalLynx
if self['HardwareSubSystemType'] == 'DigitalLynx':
return 'DIGITALLYNX'
# DigitalLynxSX
elif self['HardwareSubSystemType'] == 'DigitalLynxSX':
return 'DIGITALLYNXSX'
elif 'FileType' in self:
if self['FileVersion'] in ['3.3', '3.4']:
return self['AcquisitionSystem'].split()[1].upper()
else:
return 'UNKNOWN'
else:
return 'UNKNOWN'
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class WagtailSearchPromotionsAppConfig(AppConfig):
name = 'wagtail.contrib.search_promotions'
label = 'wagtailsearchpromotions'
verbose_name = _("Wagtail search promotions")
|
from __future__ import absolute_import, unicode_literals
import pytest
from case import Mock
from kombu.utils.scheduling import FairCycle, cycle_by_name
class MyEmpty(Exception):
pass
def consume(fun, n):
r = []
for i in range(n):
r.append(fun(Mock(name='callback')))
return r
class test_FairCycle:
def test_cycle(self):
resources = ['a', 'b', 'c', 'd', 'e']
callback = Mock(name='callback')
def echo(r, timeout=None):
return r
# cycle should be ['a', 'b', 'c', 'd', 'e', ... repeat]
cycle = FairCycle(echo, resources, MyEmpty)
for i in range(len(resources)):
assert cycle.get(callback) == resources[i]
for i in range(len(resources)):
assert cycle.get(callback) == resources[i]
def test_cycle_breaks(self):
resources = ['a', 'b', 'c', 'd', 'e']
def echo(r, callback):
if r == 'c':
raise MyEmpty(r)
return r
cycle = FairCycle(echo, resources, MyEmpty)
assert consume(cycle.get, len(resources)) == [
'a', 'b', 'd', 'e', 'a',
]
assert consume(cycle.get, len(resources)) == [
'b', 'd', 'e', 'a', 'b',
]
cycle2 = FairCycle(echo, ['c', 'c'], MyEmpty)
with pytest.raises(MyEmpty):
consume(cycle2.get, 3)
def test_cycle_no_resources(self):
cycle = FairCycle(None, [], MyEmpty)
cycle.pos = 10
with pytest.raises(MyEmpty):
cycle._next()
def test__repr__(self):
assert repr(FairCycle(lambda x: x, [1, 2, 3], MyEmpty))
def test_round_robin_cycle():
it = cycle_by_name('round_robin')(['A', 'B', 'C'])
assert it.consume(3) == ['A', 'B', 'C']
it.rotate('B')
assert it.consume(3) == ['A', 'C', 'B']
it.rotate('A')
assert it.consume(3) == ['C', 'B', 'A']
it.rotate('A')
assert it.consume(3) == ['C', 'B', 'A']
it.rotate('C')
assert it.consume(3) == ['B', 'A', 'C']
def test_priority_cycle():
it = cycle_by_name('priority')(['A', 'B', 'C'])
assert it.consume(3) == ['A', 'B', 'C']
it.rotate('B')
assert it.consume(3) == ['A', 'B', 'C']
it.rotate('A')
assert it.consume(3) == ['A', 'B', 'C']
it.rotate('A')
assert it.consume(3) == ['A', 'B', 'C']
it.rotate('C')
assert it.consume(3) == ['A', 'B', 'C']
def test_sorted_cycle():
it = cycle_by_name('sorted')(['B', 'C', 'A'])
assert it.consume(3) == ['A', 'B', 'C']
it.rotate('B')
assert it.consume(3) == ['A', 'B', 'C']
it.rotate('A')
assert it.consume(3) == ['A', 'B', 'C']
it.rotate('A')
assert it.consume(3) == ['A', 'B', 'C']
it.rotate('C')
assert it.consume(3) == ['A', 'B', 'C']
|
from __future__ import unicode_literals
from django.db import migrations, models
import autoslug.fields
import mozillians.groups.templatetags.helpers
class Migration(migrations.Migration):
dependencies = [
('groups', '0015_groupmembership_needs_renewal'),
]
operations = [
migrations.AlterField(
model_name='groupalias',
name='url',
field=autoslug.fields.AutoSlugField(editable=False, populate_from=b'name', blank=True, unique=True, slugify=mozillians.groups.templatetags.helpers.slugify),
),
migrations.AlterField(
model_name='skillalias',
name='url',
field=autoslug.fields.AutoSlugField(editable=False, populate_from=b'name', blank=True, unique=True, slugify=mozillians.groups.templatetags.helpers.slugify),
),
]
|
from __future__ import unicode_literals
from unittest import expectedFailure
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django import forms
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils.deprecation import RemovedInDjango20Warning
from django.views.generic.base import View
from django.views.generic.edit import FormMixin, ModelFormMixin, CreateView
from . import views
from .models import Artist, Author
from .test_forms import AuthorForm
class FormMixinTests(TestCase):
def test_initial_data(self):
""" Test instance independence of initial data dict (see #16138) """
initial_1 = FormMixin().get_initial()
initial_1['foo'] = 'bar'
initial_2 = FormMixin().get_initial()
self.assertNotEqual(initial_1, initial_2)
def test_get_prefix(self):
""" Test prefix can be set (see #18872) """
test_string = 'test'
rf = RequestFactory()
get_request = rf.get('/')
class TestFormMixin(FormMixin):
request = get_request
default_kwargs = TestFormMixin().get_form_kwargs()
self.assertEqual(None, default_kwargs.get('prefix'))
set_mixin = TestFormMixin()
set_mixin.prefix = test_string
set_kwargs = set_mixin.get_form_kwargs()
self.assertEqual(test_string, set_kwargs.get('prefix'))
def test_get_form(self):
class TestFormMixin(FormMixin):
request = RequestFactory().get('/')
self.assertIsInstance(
TestFormMixin().get_form(forms.Form), forms.Form,
'get_form() should use provided form class.'
)
class FormClassTestFormMixin(TestFormMixin):
form_class = forms.Form
self.assertIsInstance(
FormClassTestFormMixin().get_form(), forms.Form,
'get_form() should fallback to get_form_class() if none is provided.'
)
def test_get_form_missing_form_class_default_value(self):
with warnings.catch_warnings(record=True) as w:
class MissingDefaultValue(FormMixin):
request = RequestFactory().get('/')
form_class = forms.Form
def get_form(self, form_class):
return form_class(**self.get_form_kwargs())
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, RemovedInDjango20Warning)
self.assertEqual(
str(w[0].message),
'`generic_views.test_edit.MissingDefaultValue.get_form` method '
'must define a default value for its `form_class` argument.'
)
self.assertIsInstance(
MissingDefaultValue().get_form(), forms.Form,
)
@override_settings(ROOT_URLCONF='generic_views.urls')
class BasicFormTests(TestCase):
def test_post_data(self):
res = self.client.post('/contact/', {'name': "Me", 'message': "Hello"})
self.assertRedirects(res, 'http://testserver/list/authors/')
class ModelFormMixinTests(TestCase):
def test_get_form(self):
form_class = views.AuthorGetQuerySetFormView().get_form_class()
self.assertEqual(form_class._meta.model, Author)
def test_get_form_checks_for_object(self):
mixin = ModelFormMixin()
mixin.request = RequestFactory().get('/')
self.assertEqual({'initial': {}, 'prefix': None},
mixin.get_form_kwargs())
@override_settings(ROOT_URLCONF='generic_views.urls')
class CreateViewTests(TestCase):
def test_create(self):
res = self.client.get('/edit/authors/create/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertNotIn('object', res.context)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.post('/edit/authors/create/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_invalid(self):
res = self.client.post('/edit/authors/create/',
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertEqual(Author.objects.count(), 0)
def test_create_with_object_url(self):
res = self.client.post('/edit/artists/create/',
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
artist = Artist.objects.get(name='Rene Magritte')
self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % artist.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_create_with_redirect(self):
res = self.client.post('/edit/authors/create/redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_with_interpolated_redirect(self):
res = self.client.post('/edit/authors/create/interpolate_redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.all()[0].pk
self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk)
def test_create_with_special_properties(self):
res = self.client.get('/edit/authors/create/special/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertNotIn('object', res.context)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/authors/create/special/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
obj = Author.objects.get(slug='randall-munroe')
self.assertRedirects(res, reverse('author_detail', kwargs={'pk': obj.pk}))
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_without_redirect(self):
try:
self.client.post('/edit/authors/create/naive/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided')
except ImproperlyConfigured:
pass
def test_create_restricted(self):
res = self.client.post('/edit/authors/create/restricted/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/accounts/login/?next=/edit/authors/create/restricted/')
def test_create_view_with_restricted_fields(self):
class MyCreateView(CreateView):
model = Author
fields = ['name']
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name'])
def test_create_view_all_fields(self):
class MyCreateView(CreateView):
model = Author
fields = '__all__'
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name', 'slug'])
def test_create_view_without_explicit_fields(self):
class MyCreateView(CreateView):
model = Author
message = (
"Using ModelFormMixin (base class of MyCreateView) without the "
"'fields' attribute is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
def test_define_both_fields_and_form_class(self):
class MyCreateView(CreateView):
model = Author
form_class = AuthorForm
fields = ['name']
message = "Specifying both 'fields' and 'form_class' is not permitted."
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
@override_settings(ROOT_URLCONF='generic_views.urls')
class UpdateViewTests(TestCase):
def test_update_post(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@expectedFailure
def test_update_put(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.put('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
# Here is the expected failure. PUT data are not processed in any special
# way by django. So the request will equal to a POST without data, hence
# the form will be invalid and redisplayed with errors (status code 200).
# See also #12635
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_invalid(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_update_with_object_url(self):
a = Artist.objects.create(name='Rene Magritte')
res = self.client.post('/edit/artists/%d/update/' % a.pk,
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % a.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_update_with_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_with_interpolated_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/interpolate_redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.all()[0].pk
self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk)
def test_update_with_special_properties(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/author/%d/update/special/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/detail/author/%d/' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/update/naive/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
def test_update_get_object(self):
a = Author.objects.create(
pk=1,
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/update/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/update/',
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@override_settings(ROOT_URLCONF='generic_views.urls')
class DeleteViewTests(TestCase):
def test_delete_by_post(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_confirm_delete.html')
# Deletion with POST
res = self.client.post('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_by_delete(self):
# Deletion with browser compatible DELETE method
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.delete('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_interpolated_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/interpolate_redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/?deleted=%d' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_special_properties(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/confirm_delete.html')
res = self.client.post('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/delete/naive/' % a.pk)
|
import warnings
import random
import json
import jinja2
import numpy
import re
import os
from ._server import serve
from .utils import deprecated, get_id, write_ipynb_local_js
from .mplexporter import Exporter
from .mpld3renderer import MPLD3Renderer
from . import urls
__all__ = ["fig_to_html", "fig_to_dict", "fig_to_d3",
"display_d3", "display",
"show_d3", "show",
"enable_notebook", "disable_notebook",
"save_html", "save_json"]
SIMPLE_HTML = jinja2.Template("""
<script type="text/javascript" src="{{ d3_url }}"></script>
<script type="text/javascript" src="{{ mpld3_url }}"></script>
<style>
{{ extra_css }}
</style>
<div id={{ figid }}></div>
<script type="text/javascript">
!function(mpld3){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
}(mpld3);
</script>
""")
REQUIREJS_HTML = jinja2.Template("""
<style>
{{ extra_css }}
</style>
<div id={{ figid }}></div>
<script type="text/javascript">
if(typeof(window.mpld3) !== "undefined" && window.mpld3._mpld3IsLoaded){
!function (mpld3){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
}(mpld3);
}else{
require.config({paths: {d3: "{{ d3_url[:-3] }}"}});
require(["d3"], function(d3){
window.d3 = d3;
$.getScript("{{ mpld3_url }}", function(){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
});
});
}
</script>
""")
GENERAL_HTML = jinja2.Template("""
<style>
{{ extra_css }}
</style>
<div id={{ figid }}></div>
<script>
function mpld3_load_lib(url, callback){
var s = document.createElement('script');
s.src = url;
s.async = true;
s.onreadystatechange = s.onload = callback;
s.onerror = function(){console.warn("failed to load library " + url);};
document.getElementsByTagName("head")[0].appendChild(s);
}
if(typeof(mpld3) !== "undefined" && mpld3._mpld3IsLoaded){
// already loaded: just create the figure
!function(mpld3){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
}(mpld3);
}else if(typeof define === "function" && define.amd){
// require.js is available: use it to load d3/mpld3
require.config({paths: {d3: "{{ d3_url[:-3] }}"}});
require(["d3"], function(d3){
window.d3 = d3;
mpld3_load_lib("{{ mpld3_url }}", function(){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
});
});
}else{
// require.js not available: dynamically load d3 & mpld3
mpld3_load_lib("{{ d3_url }}", function(){
mpld3_load_lib("{{ mpld3_url }}", function(){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
})
});
}
</script>
""")
TEMPLATE_DICT = {"simple": SIMPLE_HTML,
"notebook": REQUIREJS_HTML,
"general": GENERAL_HTML}
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, (numpy.int_, numpy.intc, numpy.intp, numpy.int8,
numpy.int16, numpy.int32, numpy.int64, numpy.uint8,
numpy.uint16,numpy.uint32, numpy.uint64)):
return int(obj)
elif isinstance(obj, (numpy.float_, numpy.float16, numpy.float32,
numpy.float64)):
return float(obj)
return json.JSONEncoder.default(self, obj)
def fig_to_dict(fig, **kwargs):
"""Output json-serializable dictionary representation of the figure
Parameters
----------
fig : matplotlib figure
The figure to display
**kwargs :
Additional keyword arguments passed to mplexporter.Exporter
Returns
-------
fig_dict : dict
the Python dictionary representation of the figure, which is
directly convertible to json using the standard json package.
See Also
--------
:func:`save_json`: save json representation of a figure to file
:func:`save_html` : save html representation of a figure to file
:func:`fig_to_html` : output html representation of the figure
:func:`show` : launch a local server and show a figure in a browser
:func:`display` : embed figure within the IPython notebook
:func:`enable_notebook` : automatically embed figures in IPython notebook
"""
renderer = MPLD3Renderer()
Exporter(renderer, close_mpl=False, **kwargs).run(fig)
fig, figure_dict, extra_css, extra_js = renderer.finished_figures[0]
return figure_dict
def fig_to_html(fig, d3_url=None, mpld3_url=None, no_extras=False,
template_type="general", figid=None, use_http=False, **kwargs):
"""Output html representation of the figure
Parameters
----------
fig : matplotlib figure
The figure to display
d3_url : string (optional)
The URL of the d3 library. If not specified, a standard web path
will be used.
mpld3_url : string (optional)
The URL of the mpld3 library. If not specified, a standard web path
will be used.
no_extras : boolean
If true, remove any extra javascript or CSS. The output will be similar
to that if the representation output by fig_to_json is embedded in
a web page.
template_type : string
string specifying the type of HTML template to use. Options are:
``"simple"``
suitable for a simple html page with one figure. Will
fail if require.js is available on the page.
``"notebook"``
assumes require.js and jquery are available.
``"general"``
more complicated, but works both in and out of the
notebook, whether or not require.js and jquery are available
figid : string (optional)
The html/css id of the figure div, which must not contain spaces.
If not specified, a random id will be generated.
use_http : boolean (optional)
If true, use http:// instead of https:// for d3_url and mpld3_url.
**kwargs :
Additional keyword arguments passed to mplexporter.Exporter
Returns
-------
fig_html : string
the HTML representation of the figure
See Also
--------
:func:`save_json`: save json representation of a figure to file
:func:`save_html` : save html representation of a figure to file
:func:`fig_to_dict` : output dictionary representation of the figure
:func:`show` : launch a local server and show a figure in a browser
:func:`display` : embed figure within the IPython notebook
:func:`enable_notebook` : automatically embed figures in IPython notebook
"""
template = TEMPLATE_DICT[template_type]
# TODO: allow fig to be a list of figures?
d3_url = d3_url or urls.D3_URL
mpld3_url = mpld3_url or urls.MPLD3_URL
if use_http:
d3_url = d3_url.replace('https://', 'http://')
mpld3_url = mpld3_url.replace('https://', 'http://')
if figid is None:
figid = 'fig_' + get_id(fig) + str(int(random.random() * 1E10))
elif re.search('\s', figid):
raise ValueError("figid must not contain spaces")
renderer = MPLD3Renderer()
Exporter(renderer, close_mpl=False, **kwargs).run(fig)
fig, figure_json, extra_css, extra_js = renderer.finished_figures[0]
if no_extras:
extra_css = ""
extra_js = ""
return template.render(figid=json.dumps(figid),
d3_url=d3_url,
mpld3_url=mpld3_url,
figure_json=json.dumps(figure_json, cls=NumpyEncoder),
extra_css=extra_css,
extra_js=extra_js)
def display(fig=None, closefig=True, local=False, **kwargs):
"""Display figure in IPython notebook via the HTML display hook
Parameters
----------
fig : matplotlib figure
The figure to display (grabs current figure if missing)
closefig : boolean (default: True)
If true, close the figure so that the IPython matplotlib mode will not
display the png version of the figure.
local : boolean (optional, default=False)
if True, then copy the d3 & mpld3 libraries to a location visible to
the notebook server, and source them from there. See Notes below.
**kwargs :
additional keyword arguments are passed through to :func:`fig_to_html`.
Returns
-------
fig_d3 : IPython.display.HTML object
the IPython HTML rich display of the figure.
Notes
-----
Known issues: using ``local=True`` may not work correctly in certain cases:
- In IPython < 2.0, ``local=True`` may fail if the current working
directory is changed within the notebook (e.g. with the %cd command).
- In IPython 2.0+, ``local=True`` may fail if a url prefix is added
(e.g. by setting NotebookApp.base_url).
See Also
--------
:func:`show` : launch a local server and show a figure in a browser
:func:`enable_notebook` : automatically embed figures in IPython notebook
"""
# import here, in case users don't have requirements installed
from IPython.display import HTML
import matplotlib.pyplot as plt
if local:
if 'mpld3_url' in kwargs or 'd3_url' in kwargs:
warnings.warn(
"display: specified urls are ignored when local=True")
kwargs['d3_url'], kwargs['mpld3_url'] = write_ipynb_local_js()
if fig is None:
fig = plt.gcf()
if closefig:
plt.close(fig)
return HTML(fig_to_html(fig, **kwargs))
def show(fig=None, ip='127.0.0.1', port=8888, n_retries=50,
local=True, open_browser=True, http_server=None, **kwargs):
"""Open figure in a web browser
Similar behavior to plt.show(). This opens the D3 visualization of the
specified figure in the web browser. On most platforms, the browser
will open automatically.
Parameters
----------
fig : matplotlib figure
The figure to display. If not specified, the current active figure
will be used.
ip : string, default = '127.0.0.1'
the ip address used for the local server
port : int, default = 8888
the port number to use for the local server. If already in use,
a nearby open port will be found (see n_retries)
n_retries : int, default = 50
the maximum number of ports to try when locating an empty port.
local : bool, default = True
if True, use the local d3 & mpld3 javascript versions, within the
js/ folder. If False, use the standard urls.
open_browser : bool (optional)
if True (default), then open a web browser to the given HTML
http_server : class (optional)
optionally specify an HTTPServer class to use for showing the
figure. The default is Python's basic HTTPServer.
**kwargs :
additional keyword arguments are passed through to :func:`fig_to_html`
See Also
--------
:func:`display` : embed figure within the IPython notebook
:func:`enable_notebook` : automatically embed figures in IPython notebook
"""
if local:
kwargs['mpld3_url'] = '/mpld3.js'
kwargs['d3_url'] = '/d3.js'
files = {'/mpld3.js': ["text/javascript",
open(urls.MPLD3_LOCAL, 'r').read()],
'/d3.js': ["text/javascript",
open(urls.D3_LOCAL, 'r').read()]}
else:
files = None
if fig is None:
# import here, in case matplotlib.use(...) is called by user
import matplotlib.pyplot as plt
fig = plt.gcf()
html = fig_to_html(fig, **kwargs)
serve(html, ip=ip, port=port, n_retries=n_retries, files=files,
open_browser=open_browser, http_server=http_server)
def enable_notebook(local=False, **kwargs):
"""Enable the automatic display of figures in the IPython Notebook.
This function should be used with the inline Matplotlib backend
that ships with IPython that can be enabled with `%pylab inline`
or `%matplotlib inline`. This works by adding an HTML formatter
for Figure objects; the existing SVG/PNG formatters will remain
enabled.
Parameters
----------
local : boolean (optional, default=False)
if True, then copy the d3 & mpld3 libraries to a location visible to
the notebook server, and source them from there. See Notes below.
**kwargs :
all keyword parameters are passed through to :func:`fig_to_html`
Notes
-----
Known issues: using ``local=True`` may not work correctly in certain cases:
- In IPython < 2.0, ``local=True`` may fail if the current working
directory is changed within the notebook (e.g. with the %cd command).
- In IPython 2.0+, ``local=True`` may fail if a url prefix is added
(e.g. by setting NotebookApp.base_url).
See Also
--------
:func:`disable_notebook` : undo the action of enable_notebook
:func:`display` : embed figure within the IPython notebook
:func:`show` : launch a local server and show a figure in a browser
"""
try:
from IPython.core.getipython import get_ipython
from matplotlib.figure import Figure
except ImportError:
raise ImportError('This feature requires IPython 1.0+ and Matplotlib')
if local:
if 'mpld3_url' in kwargs or 'd3_url' in kwargs:
warnings.warn(
"enable_notebook: specified urls are ignored when local=True")
kwargs['d3_url'], kwargs['mpld3_url'] = write_ipynb_local_js()
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.for_type(Figure,
lambda fig, kwds=kwargs: fig_to_html(fig, **kwds))
def disable_notebook():
"""Disable the automatic display of figures in the IPython Notebook.
See Also
--------
:func:`enable_notebook` : automatically embed figures in IPython notebook
"""
try:
from IPython.core.getipython import get_ipython
from matplotlib.figure import Figure
except ImportError:
raise ImportError('This feature requires IPython 1.0+ and Matplotlib')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Figure, None)
def save_html(fig, fileobj, **kwargs):
"""Save a matplotlib figure to an html file
Parameters
----------
fig : matplotlib Figure instance
The figure to write to file.
fileobj : filename or file object
The filename or file-like object in which to write the HTML
representation of the figure.
**kwargs :
additional keyword arguments will be passed to :func:`fig_to_html`
See Also
--------
:func:`save_json`: save json representation of a figure to file
:func:`fig_to_html` : output html representation of the figure
:func:`fig_to_dict` : output dictionary representation of the figure
"""
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
if not hasattr(fileobj, 'write'):
raise ValueError("fileobj should be a filename or a writable file")
fileobj.write(fig_to_html(fig, **kwargs))
def save_json(fig, fileobj, **kwargs):
"""Save a matplotlib figure to a json file.
Note that any plugins which depend on generated HTML will not be included
in the JSON encoding.
Parameters
----------
fig : matplotlib Figure instance
The figure to write to file.
fileobj : filename or file object
The filename or file-like object in which to write the HTML
representation of the figure.
**kwargs :
additional keyword arguments will be passed to :func:`fig_to_dict`
See Also
--------
:func:`save_html` : save html representation of a figure to file
:func:`fig_to_html` : output html representation of the figure
:func:`fig_to_dict` : output dictionary representation of the figure
"""
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
if not hasattr(fileobj, 'write'):
raise ValueError("fileobj should be a filename or a writable file")
json.dump(fig_to_dict(fig, **kwargs), fileobj, cls=NumpyEncoder)
show_d3 = deprecated(show, "mpld3.show_d3", "mpld3.show")
fig_to_d3 = deprecated(fig_to_html, "mpld3.fig_to_d3", "mpld3.fig_to_html")
display_d3 = deprecated(display, "mpld3.display_d3", "mpld3.display")
|
"""This test covers the workflow for a sharding merge.
We start with 3 shards: -40, 40-80, and 80-. We then merge -40 and 40-80
into -80.
Note this test is just testing the full workflow, not corner cases or error
cases. These are mostly done by the other resharding tests.
"""
import logging
import unittest
from vtdb import keyrange_constants
import base_sharding
import environment
import tablet
import utils
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly = tablet.Tablet()
shard_2_master = tablet.Tablet()
shard_2_replica = tablet.Tablet()
shard_2_rdonly = tablet.Tablet()
shard_dest_master = tablet.Tablet()
shard_dest_replica = tablet.Tablet()
shard_dest_rdonly = tablet.Tablet()
all_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly,
shard_2_master, shard_2_replica, shard_2_rdonly,
shard_dest_master, shard_dest_replica, shard_dest_rdonly]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
class TestMergeSharding(unittest.TestCase, base_sharding.BaseShardingTest):
# create_schema will create the same schema on the keyspace
# then insert some values
def _create_schema(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
# Note that the primary key columns are not defined first on purpose to test
# that a reordered column list is correctly used everywhere in vtworker.
create_table_template = '''create table %s(
msg varchar(64),
custom_ksid_col ''' + t + ''' not null,
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
create_view_template = (
'create view %s'
'(id, msg, custom_ksid_col) as select id, msg, custom_ksid_col '
'from %s')
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding2'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_view_template % ('view1', 'resharding1'),
'test_keyspace'],
auto_log=True)
def _insert_startup_values(self):
# row covered by shard -40 (should be merged).
self._insert_value(shard_0_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
# row covered by shard 40-80 (should be merged).
self._insert_value(shard_1_master, 'resharding1', 2, 'msg2',
0x5000000000000000)
# row covered by shard 80- (must not be merged).
self._insert_value(shard_2_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _check_startup_values(self):
# check first two values are in the right shard
self._check_value(shard_dest_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._check_value(shard_dest_replica, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._check_value(shard_dest_rdonly, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._check_value(shard_dest_master, 'resharding1', 2, 'msg2',
0x5000000000000000)
self._check_value(shard_dest_replica, 'resharding1', 2, 'msg2',
0x5000000000000000)
self._check_value(shard_dest_rdonly, 'resharding1', 2, 'msg2',
0x5000000000000000)
def _insert_lots(self, count, base=0):
if count > 10000:
self.assertFail('bad count passed in, only support up to 10000')
for i in xrange(count):
self._insert_value(shard_0_master, 'resharding1', 1000000 + base + i,
'msg-range0-%d' % i, 0x2000000000000000 + base + i)
self._insert_value(shard_1_master, 'resharding1', 1010000 + base + i,
'msg-range1-%d' % i, 0x6000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_dest_replica, 'resharding1',
1000000 + base + i,
'msg-range0-%d' % i,
0x2000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_dest_replica, 'resharding1',
1010000 + base + i,
'msg-range1-%d' % i,
0x6000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('waiting for %d%% of the data' % threshold,
timeout, sleep_time=1)
def test_merge_sharding(self):
utils.run_vtctl(['CreateKeyspace',
'--sharding_column_name', 'custom_ksid_col',
'--sharding_column_type', base_sharding.keyspace_id_type,
'test_keyspace'])
shard_0_master.init_tablet('replica', 'test_keyspace', '-40')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-40')
shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '-40')
shard_1_master.init_tablet('replica', 'test_keyspace', '40-80')
shard_1_replica.init_tablet('replica', 'test_keyspace', '40-80')
shard_1_rdonly.init_tablet('rdonly', 'test_keyspace', '40-80')
shard_2_master.init_tablet('replica', 'test_keyspace', '80-')
shard_2_replica.init_tablet('replica', 'test_keyspace', '80-')
shard_2_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
# rebuild and check SrvKeyspace
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(ks['sharding_column_name'], 'custom_ksid_col')
# create databases so vttablet can start behaving normally
for t in [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly,
shard_2_master, shard_2_replica, shard_2_rdonly]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
# won't be serving, no replication state
for t in [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly,
shard_2_master, shard_2_replica, shard_2_rdonly]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-40',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/40-80',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_2_master.tablet_alias], auto_log=True)
# create the tables
self._create_schema()
self._insert_startup_values()
# run a health check on source replicas so they respond to discovery
# (for binlog players) and on the source rdonlys (for workers)
for t in [shard_0_replica, shard_1_replica]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in [shard_0_rdonly, shard_1_rdonly]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# create the merge shards
shard_dest_master.init_tablet('replica', 'test_keyspace', '-80')
shard_dest_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_dest_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
# start vttablet on the destination shard (no db created,
# so they're all not serving)
for t in [shard_dest_master, shard_dest_replica, shard_dest_rdonly]:
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_dest_master, shard_dest_replica, shard_dest_rdonly]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_dest_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace(
'test_nj', 'test_keyspace',
'Partitions(master): -40 40-80 80-\n'
'Partitions(rdonly): -40 40-80 80-\n'
'Partitions(replica): -40 40-80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# copy the schema
utils.run_vtctl(['CopySchemaShard', shard_0_rdonly.tablet_alias,
'test_keyspace/-80'], auto_log=True)
# copy the data (will also start filtered replication), reset source
# Run vtworker as daemon for the following SplitClone commands.
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--command_display_interval', '10ms',
'--use_v3_resharding_mode=false'],
auto_log=True)
# Initial clone (online).
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
2, 0, 0, 0)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Modify the destination shard. SplitClone will revert the changes.
# Delete row 1 (provokes an insert).
shard_dest_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=1', write=True)
# Update row 2 (provokes an update).
shard_dest_master.mquery(
'vt_test_keyspace', "update resharding1 set msg='msg-not-2' where id=2",
write=True)
# Insert row 0 (provokes a delete).
self._insert_value(shard_dest_master, 'resharding1', 0, 'msg0',
0x5000000000000000)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Change tablets, which were taken offline, back to rdonly.
utils.run_vtctl(['ChangeSlaveType', shard_0_rdonly.tablet_alias,
'rdonly'], auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly.tablet_alias,
'rdonly'], auto_log=True)
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 1, 1, 0)
self.verify_reconciliation_counters(worker_port, 'Offline', 'resharding1',
0, 0, 0, 2)
# Terminate worker daemon because it is no longer needed.
utils.kill_sub_process(worker_proc, soft=True)
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], auto_log=True)
# check binlog player variables
self.check_destination_master(shard_dest_master,
['test_keyspace/-40', 'test_keyspace/40-80'])
# check that binlog server exported the stats vars
self.check_binlog_server_vars(shard_0_replica, horizontal=True)
self.check_binlog_server_vars(shard_1_replica, horizontal=True)
# testing filtered replication: insert a bunch of data on shard 0 and 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shards')
self._insert_lots(1000)
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 10)
if v != 100:
# small optimization: only do this check if we don't have all the data
# already anyway.
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 30)
self.check_binlog_player_vars(shard_dest_master,
['test_keyspace/-40', 'test_keyspace/40-80'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_0_replica, horizontal=True,
min_statements=1000, min_transactions=1000)
self.check_binlog_server_vars(shard_1_replica, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data (after health-checking the destination
# rdonly tablets so discovery works)
utils.run_vtctl(['RunHealthCheck', shard_dest_rdonly.tablet_alias])
logging.debug('Running vtworker SplitDiff on first half')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'--source_uid', '0',
'test_keyspace/-80'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_0_rdonly.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_dest_rdonly.tablet_alias,
'rdonly'], auto_log=True)
logging.debug('Running vtworker SplitDiff on second half')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'--source_uid', '1',
'test_keyspace/-80'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_dest_rdonly.tablet_alias,
'rdonly'], auto_log=True)
# get status for the destination master tablet, make sure we have it all
self.check_running_binlog_player(shard_dest_master, 3000, 1000)
# check destination master query service is not running
utils.check_tablet_query_service(self, shard_dest_master, False, False)
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
shard_dest_master.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('serving', stream_health)
# check the destination master 3 is healthy, even though its query
# service is not running (if not healthy this would exception out)
shard_dest_master.get_healthz()
# now serve rdonly from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/-80', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -40 40-80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -40 40-80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# now serve replica from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/-80', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -40 40-80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# now serve master from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/-80', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_master, False, True)
utils.check_tablet_query_service(self, shard_1_master, False, True)
# check the binlog players are gone now
self.check_no_binlog_player(shard_dest_master)
# kill the original tablets in the original shards
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly])
for t in [shard_0_replica, shard_0_rdonly,
shard_1_replica, shard_1_rdonly]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
for t in [shard_0_master, shard_1_master]:
utils.run_vtctl(['DeleteTablet', '-allow_master', t.tablet_alias],
auto_log=True)
# delete the original shards
utils.run_vtctl(['DeleteShard', 'test_keyspace/-40'], auto_log=True)
utils.run_vtctl(['DeleteShard', 'test_keyspace/40-80'], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# kill everything else
tablet.kill_tablets([shard_2_master, shard_2_replica, shard_2_rdonly,
shard_dest_master, shard_dest_replica,
shard_dest_rdonly])
if __name__ == '__main__':
utils.main()
|
__version__ = "0.0.6"
|
"""Reset connections on a certain service group."""
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import loadbal
@click.command()
@click.argument('identifier')
@environment.pass_env
def cli(env, identifier):
"""Reset connections on a certain service group."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, group_id = loadbal.parse_id(identifier)
mgr.reset_service_group(loadbal_id, group_id)
env.fout('Load balancer service group connections are being reset!')
|
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
('regions', '0001_initial'),
('workshops', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('user', models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL, related_name='profile')),
('slug', models.CharField(max_length=100, unique=True)),
('mobile', models.CharField(max_length=10)),
('interested_locations', models.ManyToManyField(to='regions.Location')),
('interested_sections', models.ManyToManyField(to='workshops.WorkshopSections')),
],
options={
'db_table': 'user_profile',
'verbose_name': 'UserProfile',
'verbose_name_plural': 'UserProfiles',
},
),
migrations.CreateModel(
name='UserType',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('slug', models.CharField(max_length=100, verbose_name='slug')),
('display_name', models.CharField(max_length=300, verbose_name='Display Name')),
('active', models.BooleanField(default=1)),
],
options={
'db_table': 'users_type',
'ordering': ('-id',),
'verbose_name_plural': 'UserTypes',
'verbose_name': 'UserType',
},
),
migrations.AddField(
model_name='profile',
name='usertype',
field=models.ManyToManyField(to='profiles.UserType'),
),
]
|
import logging
from django.core.urlresolvers import reverse
from rest_framework import decorators, permissions
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from redis import ConnectionError
from readthedocs.core.utils.tasks import TaskNoPermission
from readthedocs.core.utils.tasks import get_public_task_data
from readthedocs.oauth import tasks
log = logging.getLogger(__name__)
SUCCESS_STATES = ('SUCCESS',)
FAILURE_STATES = ('FAILURE', 'REVOKED',)
FINISHED_STATES = SUCCESS_STATES + FAILURE_STATES
STARTED_STATES = ('RECEIVED', 'STARTED', 'RETRY') + FINISHED_STATES
def get_status_data(task_name, state, data, error=None):
data = {
'name': task_name,
'data': data,
'started': state in STARTED_STATES,
'finished': state in FINISHED_STATES,
'success': state in SUCCESS_STATES,
}
if error is not None and isinstance(error, Exception):
data['error'] = error.message
return data
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer,))
def job_status(request, task_id):
try:
task_name, state, public_data, error = get_public_task_data(request, task_id)
except (TaskNoPermission, ConnectionError):
return Response(
get_status_data('unknown', 'PENDING', {}))
return Response(
get_status_data(task_name, state, public_data, error))
@decorators.api_view(['POST'])
@decorators.permission_classes((permissions.IsAuthenticated,))
@decorators.renderer_classes((JSONRenderer,))
def sync_remote_repositories(request):
result = tasks.sync_remote_repositories.delay(
user_id=request.user.id)
task_id = result.task_id
return Response({
'task_id': task_id,
'url': reverse('api_job_status', kwargs={'task_id': task_id}),
})
|
'''
This decoder stacks on top of the 'spi' PD and decodes the
Analog Devices AD5626 protocol.
'''
from .pd import Decoder
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.cnos import cnos_interface
from units.modules.utils import set_module_args
from .cnos_module import TestCnosModule, load_fixture
class TestCnosEthernetModule(TestCnosModule):
module = cnos_interface
def setUp(self):
super(TestCnosEthernetModule, self).setUp()
self.mock_run_cnos_commands = patch('ansible.module_utils.network.cnos.cnos.run_cnos_commands')
self.run_cnos_commands = self.mock_run_cnos_commands.start()
def tearDown(self):
super(TestCnosEthernetModule, self).tearDown()
self.mock_run_cnos_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.run_cnos_commands.return_value = [load_fixture('cnos_ethernet_config.cfg')]
def test_ethernet_channelgroup(self):
set_module_args({'username': 'admin', 'password': 'pass',
'host': '10.241.107.39', 'deviceType': 'g8272_cnos',
'outputfile': 'test.log', 'interfaceOption': 'ethernet', 'interfaceRange': '33',
'interfaceArg1': 'channel-group', 'interfaceArg2': '33', 'interfaceArg3': 'on'})
result = self.execute_module(changed=True)
file = open('Anil.txt', "a")
file.write(str(result))
file.close()
expected_result = 'Interface Configuration is Accomplished'
self.assertEqual(result['msg'], expected_result)
def test_cnos_ethernet_lacp(self):
set_module_args({'username': 'admin', 'password': 'pass',
'host': '10.241.107.39', 'deviceType': 'g8272_cnos',
'outputfile': 'test.log', 'interfaceOption': 'ethernet', 'interfaceRange': '33',
'interfaceArg1': 'lacp', 'interfaceArg2': 'port-priority', 'interfaceArg3': '33'})
result = self.execute_module(changed=True)
expected_result = 'Interface Configuration is Accomplished'
self.assertEqual(result['msg'], expected_result)
def test_cnos_ethernet_duplex(self):
set_module_args({'username': 'admin', 'password': 'pass',
'host': '10.241.107.39', 'deviceType': 'g8272_cnos',
'outputfile': 'test.log', 'interfaceOption': 'ethernet', 'interfaceRange': '33',
'interfaceArg1': 'duplex', 'interfaceArg2': 'auto'})
result = self.execute_module(changed=True)
expected_result = 'Interface Configuration is Accomplished'
self.assertEqual(result['msg'], expected_result)
def test_cnos_ethernet_mtu(self):
set_module_args({'username': 'admin', 'password': 'pass',
'host': '10.241.107.39', 'deviceType': 'g8272_cnos',
'outputfile': 'test.log', 'interfaceOption': 'ethernet', 'interfaceRange': '33',
'interfaceArg1': 'mtu', 'interfaceArg2': '1300'})
result = self.execute_module(changed=True)
expected_result = 'Interface Configuration is Accomplished'
self.assertEqual(result['msg'], expected_result)
def test_cnos_ethernet_spanningtree(self):
set_module_args({'username': 'admin', 'password': 'pass',
'host': '10.241.107.39', 'deviceType': 'g8272_cnos',
'outputfile': 'test.log', 'interfaceOption': 'ethernet', 'interfaceRange': '33',
'interfaceArg1': 'spanning-tree', 'interfaceArg2': 'mst',
'interfaceArg3': '33-35', 'interfaceArg4': 'cost',
'interfaceArg5': '33'})
result = self.execute_module(changed=True)
expected_result = 'Interface Configuration is Accomplished'
self.assertEqual(result['msg'], expected_result)
def test_cnos_ethernet_ip(self):
set_module_args({'username': 'admin', 'password': 'pass',
'host': '10.241.107.39', 'deviceType': 'g8272_cnos',
'outputfile': 'test.log', 'interfaceOption': 'ethernet', 'interfaceRange': '33',
'interfaceArg1': 'ip', 'interfaceArg2': 'port',
'interfaceArg3': 'anil'})
result = self.execute_module(changed=True)
expected_result = 'Interface Configuration is Accomplished'
self.assertEqual(result['msg'], expected_result)
|
"""Send the results of a query to the configured music player as a playlist.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets import config
from beets import ui
from beets import util
from os.path import relpath
from tempfile import NamedTemporaryFile
ARGS_MARKER = '$args'
class PlayPlugin(BeetsPlugin):
def __init__(self):
super(PlayPlugin, self).__init__()
config['play'].add({
'command': None,
'use_folders': False,
'relative_to': None,
'raw': False,
# Backwards compatibility. See #1803 and line 74
'warning_threshold': -2,
'warning_treshold': 100,
})
def commands(self):
play_command = Subcommand(
'play',
help=u'send music to a player as a playlist'
)
play_command.parser.add_album_option()
play_command.parser.add_option(
u'-A', u'--args',
action='store',
help=u'add additional arguments to the command',
)
play_command.func = self.play_music
return [play_command]
def play_music(self, lib, opts, args):
"""Execute query, create temporary playlist and execute player
command passing that playlist, at request insert optional arguments.
"""
command_str = config['play']['command'].get()
if not command_str:
command_str = util.open_anything()
use_folders = config['play']['use_folders'].get(bool)
relative_to = config['play']['relative_to'].get()
raw = config['play']['raw'].get(bool)
warning_threshold = config['play']['warning_threshold'].get(int)
# We use -2 as a default value for warning_threshold to detect if it is
# set or not. We can't use a falsey value because it would have an
# actual meaning in the configuration of this plugin, and we do not use
# -1 because some people might use it as a value to obtain no warning,
# which wouldn't be that bad of a practice.
if warning_threshold == -2:
# if warning_threshold has not been set by user, look for
# warning_treshold, to preserve backwards compatibility. See #1803.
# warning_treshold has the correct default value of 100.
warning_threshold = config['play']['warning_treshold'].get(int)
if relative_to:
relative_to = util.normpath(relative_to)
# Add optional arguments to the player command.
if opts.args:
if ARGS_MARKER in command_str:
command_str = command_str.replace(ARGS_MARKER, opts.args)
else:
command_str = u"{} {}".format(command_str, opts.args)
else:
# Don't include the marker in the command.
command_str = command_str.replace(" " + ARGS_MARKER, "")
# Perform search by album and add folders rather than tracks to
# playlist.
if opts.album:
selection = lib.albums(ui.decargs(args))
paths = []
sort = lib.get_default_album_sort()
for album in selection:
if use_folders:
paths.append(album.item_dir())
else:
paths.extend(item.path
for item in sort.sort(album.items()))
item_type = 'album'
# Perform item query and add tracks to playlist.
else:
selection = lib.items(ui.decargs(args))
paths = [item.path for item in selection]
if relative_to:
paths = [relpath(path, relative_to) for path in paths]
item_type = 'track'
item_type += 's' if len(selection) > 1 else ''
if not selection:
ui.print_(ui.colorize('text_warning',
u'No {0} to play.'.format(item_type)))
return
# Warn user before playing any huge playlists.
if warning_threshold and len(selection) > warning_threshold:
ui.print_(ui.colorize(
'text_warning',
u'You are about to queue {0} {1}.'.format(
len(selection), item_type)))
if ui.input_options((u'Continue', u'Abort')) == 'a':
return
ui.print_(u'Playing {0} {1}.'.format(len(selection), item_type))
if raw:
open_args = paths
else:
open_args = [self._create_tmp_playlist(paths)]
self._log.debug(u'executing command: {} {!r}', command_str, open_args)
try:
util.interactive_open(open_args, command_str)
except OSError as exc:
raise ui.UserError(
"Could not play the query: {0}".format(exc))
def _create_tmp_playlist(self, paths_list):
"""Create a temporary .m3u file. Return the filename.
"""
m3u = NamedTemporaryFile('wb', suffix='.m3u', delete=False)
for item in paths_list:
m3u.write(item + b'\n')
m3u.close()
return m3u.name
|
import ConfigParser
import os
from .kernel import core
DEBUG_MODE = os.environ.get('DRD_DEBUG','0') == '1'
PROJECT_NAME = os.environ.get('DRD_JOB','hf2')
USER_NAME = os.environ.get('USER','general')
SHOT_AUDIO_CONTAINER = os.environ.get('SHOT_AUDIO_CONTAINER','ShotAudio_v2')
SORT_VERSIONS_BY_DATE = True
MEDIA_MODE = 'video'
PLAY_MODE = 'selection'
APP_BROWSER = 'firefox'
PATH_CONFIG = '/tmp/reviewTool2.%s.conf' % USER_NAME
PATH_HELPDOCS = 'http://prodwiki/mediawiki/index.php/RnD:HF2Projects:ReviewTool2'
PATH_TEMP = os.path.join( os.environ.get('DRD_TEMP','/Local/tmp'), 'reviewTool' )
PATH_ICONCACHE = os.path.join( PATH_TEMP, 'icon_cache' )
PATH_SHARE = os.environ.get('DRD_SHARE','/drd/jobs/%s/wip/users/%s' % (PROJECT_NAME,USER_NAME) )
FILETYPE_SEQUENCE = ['jpg','png','exr','iff']
FILETYPE_MOVIE = ['mov','mp4']
FILETYPE_AUDIO = ['wav']
SHOTGUN_SERVER = os.environ.get('DRD_SG_SERVER', 'http://shotgun-sandbox' if DEBUG_MODE else 'http://shotgun-api' )
SHOTGUN_VERSION = os.environ.get('DRD_SG_VERSION', 'api2')
SHOTGUN_USER = os.environ.get('DRD_SG_USER', 'reviewTool' )
SHOTGUN_KEY = os.environ.get('DRD_SG_KEY', 'b9fc6d4614201bfe62a20e64da83defc2e58892c')
SHOTGUN_DEPARTMENT_TYPE = os.environ.get('DRD_SG_DEPT_TYPE','CustomNonProjectEntity03')
if ( not os.path.exists( PATH_TEMP ) ):
os.makedirs( PATH_TEMP )
if ( not os.path.exists( PATH_ICONCACHE ) ):
os.makedirs( PATH_ICONCACHE )
if ( not os.path.exists( PATH_SHARE ) ):
os.mkdir( PATH_SHARE )
REVIEW_TYPE_ORDER = [
'creative',
'technical',
''
]
DEPARTMENT_FILTERS = {
'lens': { 'name': 'Lensing', 'order': 0, 'enabled': True },
'anim': { 'name': 'Animation', 'order': 1, 'enabled': True },
'light': { 'name': 'Lighting', 'order': 2, 'enabled': True },
'crowd': { 'name': 'Crowd', 'order': 3, 'enabled': True },
'moedit': { 'name': 'MoEdit', 'order': 4, 'enabled': True },
'rnd': { 'name': 'RnD', 'order': 5, 'enabled': False },
'flo': { 'name': 'Final Layout', 'order': 6, 'enabled': False },
'comp': { 'name': 'Compositing', 'order': 7, 'enabled': False },
'edit': { 'name': 'Editing', 'order': 8, 'enabled': False },
'fx': { 'name': 'FX', 'order': 9, 'enabled': False },
'art': { 'name': 'Art', 'order': 10, 'enabled': False },
'model': { 'name': 'Model', 'order': 11, 'enabled': False },
'previs': { 'name': 'Previs', 'order': 12, 'enabled': False },
'rig': { 'name': 'Rig', 'order': 13, 'enabled': False },
'skydome': { 'name': 'Skydome', 'order': 14, 'enabled': False },
'surface': { 'name': 'Surface', 'order': 15, 'enabled': False },
'visdev': { 'name': 'Vis Dev', 'order': 16, 'enabled': False },
'charfx': { 'name': 'Char FX', 'order': 17, 'enabled': False },
'mocap': { 'name': 'Mo Cap', 'order': 18, 'enabled': False },
'bulkedit': { 'name': 'Bulk Edit', 'order': 19, 'enabled': False },
'charfinal': { 'name': 'Char Finaling', 'order': 20, 'enabled': False },
'stereo': { 'name': 'Stereo', 'order': 21, 'enabled': False }
}
SHOT_FILTERS = {
'ip': { 'name': 'In Progress', 'order': 0, 'enabled': True, 'icon': 'img/shot_status/ip.png' },
'edp': { 'name': 'Edit Prep', 'order': 1, 'enabled': False, 'icon': 'img/shot_status/edp.png' },
'hld': { 'name': 'Hold', 'order': 2, 'enabled': True, 'icon': 'img/shot_status/hld.png' },
'fin': { 'name': 'Final', 'order': 3, 'enabled': True, 'icon': 'img/shot_status/fin.png' },
}
VERSION_FILTERS = {
'dap': { 'name': 'Dir Approved', 'order': 0, 'enabled': True, 'icon': 'img/render_status/dap.png' },
'pdirev': { 'name': 'Pending Dir Review', 'order': 1, 'enabled': True, 'icon': 'img/render_status/pdirev.png' },
'fcomp': { 'name': 'Fix Complete', 'order': 2, 'enabled': True, 'icon': 'img/render_status/fcomp.png' },
'fix': { 'name': 'Fix Required', 'order': 3, 'enabled': True, 'icon': 'img/render_status/fix.png' },
'techap': { 'name': 'Tech Approved', 'order': 4, 'enabled': True, 'icon': 'img/render_status/techap.png' },
'nfr': { 'name': 'Not For Review', 'order': 5, 'enabled': True, 'icon': 'img/render_status/nfr.png' },
'apr': { 'name': 'Approved', 'order': 6, 'enabled': True, 'icon': 'img/render_status/apr.png' },
'rev': { 'name': 'Pending Review', 'order': 7, 'enabled': True, 'icon': 'img/render_status/rev.png' },
'vwd': { 'name': 'Viewed', 'order': 8, 'enabled': True, 'icon': 'img/render_status/vwd.png' }
}
def audioFileTypes():
"""
Returns the different audio file type
filters based on the global settings
:return <str>:
"""
return 'Audio Files (*.%s)' % (' *.'.join(FILETYPE_AUDIO))
def compareDepartment(a,b):
"""
Compares the two departments based on the
current department filter ordering information
:return <int>: 1 || 0 || -1
"""
aorder = DEPARTMENT_FILTERS.get(a,{}).get('order',100000)
border = DEPARTMENT_FILTERS.get(b,{}).get('order',100000)
return cmp(aorder,border)
def departmentFilters():
"""
Returns the current department filters
:return <dict> { <str> key: <dict> { <str>: <variant> value, .. }, .. }:
"""
return DEPARTMENT_FILTERS
def departmentLabels(depts):
"""
Returns a list of the user friendly labels for the
inputed department keys
:return <list> [ <str>, .. ]:
"""
return [ DEPARTMENT_FILTERS.get(key,{}).get('name',key) for key in depts ]
def departmentOrder( dept ):
"""
Returns the order number for the inputed department
key based on the current department filter ordering
information
:return <int>:
"""
return DEPARTMENT_FILTERS.get(dept,{}).get('order',100000)
def departments():
"""
Returns a list of the department keys, sorted
alphabetically
:return <list> [ <str>, .. ]:
"""
keys = DEPARTMENT_FILTERS.keys()
keys.sort()
return keys
def desktopPath( relpath ):
"""
Returns the desktop filepath based on the inputed relative path
:param relapath:
:type <str>:
:return <str>:
"""
return os.path.join(os.path.expanduser('~/Desktop'),relpath)
def enableDepartment( dept, state ):
"""
Sets the enabled state for the given department to the inputed state
:param dept:
:type <str>:
:param state:
:type <bool>:
:return <bool>:
"""
dept = str(dept)
if ( not dept in DEPARTMENT_FILTERS ):
return False
DEPARTMENT_FILTERS[dept]['enabled'] = state
return True
def enabledDepartments():
"""
Returns a list of all the department keys that are currently set to
enabled, sorted by the current filter order.
:return <list> [ <str>, .. ]:
"""
keys = [ key for key in DEPARTMENT_FILTERS if DEPARTMENT_FILTERS[key]['enabled'] ]
keys.sort(compareDepartment)
return keys
def enabledShots():
keys = [ key for key in SHOT_FILTERS if SHOT_FILTERS[key]['enabled'] ]
keys.sort()
return keys
def iconCachePath( relpath ):
"""
Returns a joining of the inputed relative
path with the tool's temporary path location
:return <str>:
"""
return os.path.join( PATH_ICONCACHE, relpath )
def orderedDepartments():
"""
Returns a list of the department keys, sorted by the
current filter order.
:return <list> [ <str>, .. ]
"""
keys = DEPARTMENT_FILTERS.keys()
keys.sort(compareDepartment)
return keys
def restore():
"""
Restores the current settings from the config file
:return <dict> { <str> key: <str> value, .. }
"""
# restore review tool settings
options = {}
parser = ConfigParser.ConfigParser()
if ( not DEBUG_MODE ):
try:
parser.read(PATH_CONFIG)
except:
core.warn( 'Could not read the settings from %s' % PATH_CONFIG )
return options
else:
parser.read(PATH_CONFIG)
# restore the saved options
for section in parser.sections():
for option in parser.options(section):
options['%s::%s' % (section,option)] = parser.get(section,option)
# restore settings level options
filters = { 'DEPARTMENT_FILTER': DEPARTMENT_FILTERS, 'SHOT_FILTER': SHOT_FILTERS, 'VERSION_FILTER': VERSION_FILTERS }
for key, value in options.items():
section, option = key.split('::')
if ( section in filters ):
enabled, order = value.split('|')
filters[section][option]['enabled'] = enabled == 'True'
filters[section][option]['order'] = int(order)
# return the restored settings
return options
def save(options = {}):
"""
Saves the current settings to the config file
:param options:
:type <dict> { <str> key: <variant> value, .. }:
:return <bool>: success
"""
# record settings options
filters = { 'DEPARTMENT_FILTER': DEPARTMENT_FILTERS, 'SHOT_FILTER': SHOT_FILTERS, 'VERSION_FILTER': VERSION_FILTERS }
for section, filter in filters.items():
for option, settings in filter.items():
options['%s::%s' % (section,option) ] = '%s|%s' % (settings['enabled'],settings['order'])
# save the config file
parser = ConfigParser.ConfigParser()
# add the items to the parser
for key, value in options.items():
section, option = key.split('::')
if ( not parser.has_section(section) ):
parser.add_section(section)
parser.set( section, option, str(value) )
# save the config settings
f = open(PATH_CONFIG,'w')
if ( not DEBUG_MODE ):
try:
parser.write(f)
except:
f.close()
core.warn( 'Could not save the settings out to %s' % PATH_CONFIG )
return False
else:
parser.write(f)
f.close()
return True
def sharePath( relpath ):
"""
Generates a shared path location based on the inputed relative
path location
:param relpath:
:type <str>:
:return <str>:
"""
return os.path.join(PATH_SHARE,relpath)
def shotFilters():
"""
Returns the current shot filters
:return <dict> { <str> key: <dict> { <str>: <variant> value, .. }, .. }:
"""
return SHOT_FILTERS
def tempPath( relpath ):
"""
Returns a joining of the inputed relative
path with the tool's temporary path location
:return <str>:
"""
return os.path.join( PATH_TEMP, relpath )
def versionFilters():
"""
Returns the current version filters
:return <dict> { <str> key: <dict> { <str>: <variant> value, .. }, .. }:
"""
return VERSION_FILTERS
def versionOrder( status ):
"""
Returns the order based on the inputed status
:return <int>:
"""
return VERSION_FILTERS.get(status,{}).get('order',10000)
def videoFileTypes():
"""
Returns the different video file type
filters based on the global settings
:return <str>:
"""
return 'Image Sequence Types (*.%s);;Movie Types (*.%s)' % ( ' *.'.join(FILETYPE_SEQUENCE), ' *.'.join(FILETYPE_MOVIE) )
|
import copy
from fs.errors import ResourceNotFoundError
import logging
import os
import sys
from lxml import etree
from path import path
from pkg_resources import resource_string
from xblock.fields import Scope, String, Boolean, List
from xmodule.editing_module import EditingDescriptor
from xmodule.html_checker import check_html
from xmodule.stringify import stringify_children
from xmodule.x_module import XModule
from xmodule.xml_module import XmlDescriptor, name_to_pathname
import textwrap
from xmodule.contentstore.content import StaticContent
from xblock.core import XBlock
log = logging.getLogger("edx.courseware")
class HtmlFields(object):
display_name = String(
display_name="显示名称",
help="此名称出现在页面顶部的水平导航中",
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default="Text"
)
data = String(help="Html contents to display for this module", default=u"", scope=Scope.content)
source_code = String(help="Source code for LaTeX documents. This feature is not well-supported.", scope=Scope.settings)
use_latex_compiler = Boolean(
help="Enable LaTeX templates?",
default=False,
scope=Scope.settings
)
class HtmlModule(HtmlFields, XModule):
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/html/display.coffee')
],
'js': [
resource_string(__name__, 'js/src/html/imageModal.js'),
resource_string(__name__, 'js/common_static/js/vendor/draggabilly.pkgd.js')
]
}
js_module_name = "HTMLModule"
css = {'scss': [resource_string(__name__, 'css/html/display.scss')]}
def get_html(self):
if self.system.anonymous_student_id:
return self.data.replace("%%USER_ID%%", self.system.anonymous_student_id)
return self.data
class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor):
"""
Module for putting raw html in a course
"""
mako_template = "widgets/html-edit.html"
module_class = HtmlModule
filename_extension = "xml"
template_dir_name = "html"
js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]}
js_module_name = "HTMLEditingDescriptor"
css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/html/edit.scss')]}
# VS[compat] TODO (cpennington): Delete this method once all fall 2012 course
# are being edited in the cms
@classmethod
def backcompat_paths(cls, path):
if path.endswith('.html.xml'):
path = path[:-9] + '.html' # backcompat--look for html instead of xml
if path.endswith('.html.html'):
path = path[:-5] # some people like to include .html in filenames..
candidates = []
while os.sep in path:
candidates.append(path)
_, _, path = path.partition(os.sep)
# also look for .html versions instead of .xml
nc = []
for candidate in candidates:
if candidate.endswith('.xml'):
nc.append(candidate[:-4] + '.html')
return candidates + nc
@classmethod
def filter_templates(cls, template, course):
"""
Filter template that contains 'latex' from templates.
Show them only if use_latex_compiler is set to True in
course settings.
"""
return (not 'latex' in template['template_id'] or course.use_latex_compiler)
def get_context(self):
"""
an override to add in specific rendering context, in this case we need to
add in a base path to our c4x content addressing scheme
"""
_context = EditingDescriptor.get_context(self)
# Add some specific HTML rendering context when editing HTML modules where we pass
# the root /c4x/ url for assets. This allows client-side substitutions to occur.
_context.update({
'base_asset_url': StaticContent.get_base_url_path_for_course_assets(self.location) + '/',
'enable_latex_compiler': self.use_latex_compiler,
})
return _context
# NOTE: html descriptors are special. We do not want to parse and
# export them ourselves, because that can break things (e.g. lxml
# adds body tags when it exports, but they should just be html
# snippets that will be included in the middle of pages.
@classmethod
def load_definition(cls, xml_object, system, location):
'''Load a descriptor from the specified xml_object:
If there is a filename attribute, load it as a string, and
log a warning if it is not parseable by etree.HTMLParser.
If there is not a filename attribute, the definition is the body
of the xml_object, without the root tag (do not want <html> in the
middle of a page)
'''
filename = xml_object.get('filename')
if filename is None:
definition_xml = copy.deepcopy(xml_object)
cls.clean_metadata_from_xml(definition_xml)
return {'data': stringify_children(definition_xml)}, []
else:
# html is special. cls.filename_extension is 'xml', but
# if 'filename' is in the definition, that means to load
# from .html
# 'filename' in html pointers is a relative path
# (not same as 'html/blah.html' when the pointer is in a directory itself)
pointer_path = "{category}/{url_path}".format(
category='html',
url_path=name_to_pathname(location.name)
)
base = path(pointer_path).dirname()
# log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename))
filepath = "{base}/{name}.html".format(base=base, name=filename)
# log.debug("looking for html file for {0} at {1}".format(location, filepath))
# VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path,
# give the class a chance to fix it up. The file will be written out
# again in the correct format. This should go away once the CMS is
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath):
candidates = cls.backcompat_paths(filepath)
# log.debug("candidates = {0}".format(candidates))
for candidate in candidates:
if system.resources_fs.exists(candidate):
filepath = candidate
break
try:
with system.resources_fs.open(filepath) as file:
html = file.read().decode('utf-8')
# Log a warning if we can't parse the file, but don't error
if not check_html(html) and len(html) > 0:
msg = "Couldn't parse html in {0}, content = {1}".format(filepath, html)
log.warning(msg)
system.error_tracker("Warning: " + msg)
definition = {'data': html}
# TODO (ichuang): remove this after migration
# for Fall 2012 LMS migration: keep filename (and unmangled filename)
definition['filename'] = [filepath, filename]
return definition, []
except (ResourceNotFoundError) as err:
msg = 'Unable to load file contents at path {0}: {1} '.format(
filepath, err)
# add more info and re-raise
raise Exception(msg), None, sys.exc_info()[2]
# TODO (vshnayder): make export put things in the right places.
def definition_to_xml(self, resource_fs):
''' Write <html filename="" [meta-attrs="..."]> to filename.xml, and the html
string to filename.html.
'''
# Write html to file, return an empty tag
pathname = name_to_pathname(self.url_name)
filepath = u'{category}/{pathname}.html'.format(
category=self.category,
pathname=pathname
)
resource_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True)
with resource_fs.open(filepath, 'w') as filestream:
html_data = self.data.encode('utf-8')
filestream.write(html_data)
# write out the relative name
relname = path(pathname).basename()
elt = etree.Element('html')
elt.set("filename", relname)
return elt
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(HtmlDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(HtmlDescriptor.use_latex_compiler)
return non_editable_fields
class AboutFields(object):
display_name = String(
help="Display name for this module",
scope=Scope.settings,
default="overview",
)
data = String(
help="Html contents to display for this module",
default="",
scope=Scope.content
)
@XBlock.tag("detached")
class AboutModule(AboutFields, HtmlModule):
"""
Overriding defaults but otherwise treated as HtmlModule.
"""
pass
@XBlock.tag("detached")
class AboutDescriptor(AboutFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = "about"
module_class = AboutModule
class StaticTabFields(object):
"""
The overrides for Static Tabs
"""
display_name = String(
display_name="显示名称",
help="此名称出现在页面顶部的水平导航中.",
scope=Scope.settings,
default="Empty",
)
data = String(
default=textwrap.dedent("""\
<p>在这里,您可以添加额外的页面到您的课件。点击“编辑”按钮,开始编辑。</p>
"""),
scope=Scope.content,
help="HTML for the additional pages"
)
@XBlock.tag("detached")
class StaticTabModule(StaticTabFields, HtmlModule):
"""
Supports the field overrides
"""
pass
@XBlock.tag("detached")
class StaticTabDescriptor(StaticTabFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = None
module_class = StaticTabModule
class CourseInfoFields(object):
"""
Field overrides
"""
items = List(
help="List of course update items",
default=[],
scope=Scope.content
)
data = String(
help="Html contents to display for this module",
default="<ol></ol>",
scope=Scope.content
)
@XBlock.tag("detached")
class CourseInfoModule(CourseInfoFields, HtmlModule):
"""
Just to support xblock field overrides
"""
# statuses
STATUS_VISIBLE = 'visible'
STATUS_DELETED = 'deleted'
@XBlock.tag("detached")
class CourseInfoDescriptor(CourseInfoFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = None
module_class = CourseInfoModule
|
from spack import *
class RNetwork(RPackage):
"""Tools to create and modify network objects. The network class can
represent a range of relational data types, and supports
arbitrary vertex/edge/graph attributes."""
homepage = "https://statnet.org"
url = "https://cran.r-project.org/src/contrib/network_1.13.0.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/network"
version('1.13.0', 'd0b967d6f1aad43b6479d72f29b705de')
|
#
"""
Function action
============================================================================
The :class:`Function` action wraps a callable, optionally with some
default keyword argument values. On execution, the execution data
(commonly containing the recognition extras) are combined with the
default argument values (if present) to form the arguments with which
the callable will be called.
Simple usage::
>>> def func(count):
... print "count:", count
...
>>> action = Function(func)
>>> action.execute({"count": 2})
count: 2
True
>>> # Additional keyword arguments are ignored:
>>> action.execute({"count": 2, "flavor": "vanilla"})
count: 2
True
Usage with default arguments::
>>> def func(count, flavor):
... print "count:", count
... print "flavor:", flavor
...
>>> # The Function object can be given default argument values:
>>> action = Function(func, flavor="spearmint")
>>> action.execute({"count": 2})
count: 2
flavor: spearmint
True
>>> # Arguments given at the execution-time to override default values:
>>> action.execute({"count": 2, "flavor": "vanilla"})
count: 2
flavor: vanilla
True
Class reference
----------------------------------------------------------------------------
"""
from inspect import getargspec
from .action_base import ActionBase, ActionError
class Function(ActionBase):
""" Call a function with extra keyword arguments. """
def __init__(self, function, **defaults):
"""
Constructor arguments:
- *function* (callable) --
the function to call when this action is executed
- defaults --
default keyword-values for the arguments with which
the function will be called
"""
ActionBase.__init__(self)
self._function = function
self._defaults = defaults
self._str = function.__name__
(args, varargs, varkw, defaults) = getargspec(self._function)
if varkw: self._filter_keywords = False
else: self._filter_keywords = True
self._valid_keywords = set(args)
def _execute(self, data=None):
arguments = dict(self._defaults)
if isinstance(data, dict):
arguments.update(data)
if self._filter_keywords:
invalid_keywords = set(arguments.keys()) - self._valid_keywords
for key in invalid_keywords:
del arguments[key]
try:
self._function(**arguments)
except Exception, e:
self._log.exception("Exception from function %s:"
% self._function.__name__)
raise ActionError("%s: %s" % (self, e))
|
import glob
import os
import subprocess
import sys
def RunCommand(commandLine):
#print ' '.join(commandLine)
return subprocess.call(commandLine)
for filename in glob.glob(os.path.join('Bin', '*.raw')):
os.remove(filename)
for arch in ('ia32', 'x64'):
for debugType in (None, 'port80', 'serial'):
output = os.path.join('Bin', 'ResetVector')
output += '.' + arch
if debugType is not None:
output += '.' + debugType
output += '.raw'
commandLine = (
'nasm',
'-D', 'ARCH_%s' % arch.upper(),
'-D', 'DEBUG_%s' % str(debugType).upper(),
'-o', output,
'Vtf0.nasmb',
)
ret = RunCommand(commandLine)
print '\tASM\t' + output
if ret != 0: sys.exit(ret)
commandLine = (
'python',
'Tools/FixupForRawSection.py',
output,
)
print '\tFIXUP\t' + output
ret = RunCommand(commandLine)
if ret != 0: sys.exit(ret)
|
"""Support for Zigbee sensors."""
from binascii import hexlify
import logging
import voluptuous as vol
from homeassistant.components import zigbee
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from . import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
CONF_TYPE = "type"
CONF_MAX_VOLTS = "max_volts"
DEFAULT_VOLTS = 1.2
TYPES = ["analog", "temperature"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(TYPES),
vol.Optional(CONF_MAX_VOLTS, default=DEFAULT_VOLTS): vol.Coerce(float),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ZigBee platform.
Uses the 'type' config value to work out which type of ZigBee sensor we're
dealing with and instantiates the relevant classes to handle it.
"""
typ = config.get(CONF_TYPE)
try:
sensor_class, config_class = TYPE_CLASSES[typ]
except KeyError:
_LOGGER.exception("Unknown ZigBee sensor type: %s", typ)
return
add_entities([sensor_class(hass, config_class(config))], True)
class ZigBeeTemperatureSensor(Entity):
"""Representation of XBee Pro temperature sensor."""
def __init__(self, hass, config):
"""Initialize the sensor."""
self._config = config
self._temp = None
@property
def name(self):
"""Return the name of the sensor."""
return self._config.name
@property
def state(self):
"""Return the state of the sensor."""
return self._temp
@property
def unit_of_measurement(self):
"""Return the unit of measurement the value is expressed in."""
return TEMP_CELSIUS
def update(self):
"""Get the latest data."""
try:
self._temp = zigbee.DEVICE.get_temperature(self._config.address)
except zigbee.ZIGBEE_TX_FAILURE:
_LOGGER.warning(
"Transmission failure when attempting to get sample from "
"ZigBee device at address: %s",
hexlify(self._config.address),
)
except zigbee.ZIGBEE_EXCEPTION as exc:
_LOGGER.exception("Unable to get sample from ZigBee device: %s", exc)
TYPE_CLASSES = {
"temperature": (ZigBeeTemperatureSensor, zigbee.ZigBeeConfig),
"analog": (zigbee.ZigBeeAnalogIn, zigbee.ZigBeeAnalogInConfig),
}
|
import os
import sys
import re
DEFAULT_DOMAIN = 'vsphere.local'
DEFAULT_USER = 'administrator'
DEFAULT_PASSWORD = 'vmware'
DEFAULT_PORT = '389'
print 'running on %s' % (os.name)
if(os.name == 'nt'):
build = 'c:\\PROGRA~1\\vmware\\cis\\vmdird\\'
vdcpromo = build+'vdcpromo.exe'
vdcmerge = build+ 'vdcmerge.exe'
vdcsplit = build+ 'vdcsplit.exe'
vdcsetupldu = build+'vdcsetupldu.exe'
data_store = '"C:\\Documents and Settings\\All Users\\Application Data\\VMware\\cis\\data\\vmdird\\*"'
else:
vdcfirstboot='/usr/lib/vmware-vmdir/bin/vmdir_fbcore.py'
kdcfirstboot='/usr/lib/vmware-vmkdc/firstboot/vmkdc-firstboot.py'
afdfirstboot='/usr/lib/vmware-vmafd/firstboot/vmafd-firstboot.py'
vdcpromo = '/usr/lib/vmware-vmdir/bin/vdcpromo'
vdcmerge = '/usr/lib/vmware-vmdir/bin/vdcmerge'
vdcsplit = '/usr/lib/vmware-vmdir/bin/vdcsplit'
#vdcmerge = '/home/zhaog/workspaces/lotus/main/vmdir/build/tools/vdcmerge/.libs/vdcmerge'
#vdcsplit = '/home/zhaog/workspaces/lotus/main/vmdir/build/tools/vdcsplit/.libs/vdcsplit'
vdcpass = '/home/zhaog/workspaces/lotus/main/vmdir/build/tools/vdcpass/.libs/vdcpass'
vdcsetupldu = '/home/zhaog/workspaces/lotus/main/vmdir/build/tools/vdcsetupldu/vdcsetupldu'
lwsm = '/opt/likewise/bin/lwsm'
data_store = '/storage/db/vmware-vmdir/*'
def reset_rpms():
print 'remove afd'
os.system('rpm -e vmware-afd')
print 'remove vmkdc'
os.system('rpm -e vmware-kdc')
print 'remove vmdir'
os.system('rpm -e vmware-directory')
print 'install vmdir'
os.system('rpm --nodeps -i /mnt/hgfs/workspaces/lotus/main/vmdir/build/stage/RPMS/x86_64/vmware-directory-1.0.0-00000.x86_64.rpm ')
print 'install vmkdc'
os.system('rpm --nodeps -i /mnt/hgfs/workspaces/lotus/main/vmkdc/build/stage/RPMS/x86_64/vmware-kdc-1.0.0-00000.x86_64.rpm')
print 'install vmafd'
os.system('rpm --nodeps -i /mnt/hgfs/workspaces/lotus/main/vmafd/build/stage/RPMS/x86_64/vmware-afd-1.0.0-00000.x86_64.rpm')
def reset_service():
print 'resetting service...'
print 'cleaning up database...'
if(os.name == 'nt'):
print 'stopping service'
os.system('net stop VMWareDirectoryService')
os.system('%s %s' % ('del /Q ', data_store))
else:
os.system('%s %s' % ('rm', data_store))
print 'restarting vmdir...'
if(os.name == 'nt'):
os.system('net start VMWareDirectoryService')
else:
os.system('%s %s %s' % (lwsm, 'restart', 'vmdir'))
def vdc_firstboot():
cmd='python '+vdcfirstboot
print cmd
os.system(cmd)
cmd='python '+kdcfirstboot
print cmd
os.system(cmd)
def init_source():
reset_rpms()
reset_service()
vdc_firstboot()
def vdc_promo(domain):
print 'vdcpromoing...'
cmd='%s -d %s -u %s -w %s -i 1' % (vdcpromo, domain, DEFAULT_USER, DEFAULT_PASSWORD)
print cmd
os.system(cmd)
def find_rdn(dn):
m = re.match(r'cn=(.+?),', dn)
return m.group(1)
def add_user(l, dn):
modlist = [('objectClass', 'vmIdentity-User'),
('vmIdentity-Account', find_rdn(dn)),
('cn',find_rdn(dn))]
l.add_s(dn, modlist)
def add_userwithpassword(l, dn, password):
modlist = [('objectClass', 'user'),
('sAMAccountName', find_rdn(dn)),
('cn',find_rdn(dn)),
('userpassword', password),
('userPrincipalName', find_rdn(dn)+'@vsphere.local')]
l.add_s(dn, modlist)
def add_container(l, dn):
modlist = [('objectClass', 'vmIdentity-Container'),
('cn', find_rdn(dn))]
l.add_s(dn, modlist)
def set_password(source_uri, admin_dn, admin_password, user_dn, user_password ):
cmdline= '%s -h %s -u %s -w %s -U %s -W %s' % (vdcpass, source_uri, admin_dn, admin_password, user_dn, user_password);
print cmdline
os.system(cmdline)
def change_password(source_uri, user_dn, old_password, new_password):
cmdline= '%s -h %s -u %s -w %s -W %s' % (vdcpass, source_uri, user_dn, old_password, new_password);
print cmdline
os.system(cmdline)
|
from suds import *
from suds.client import Client
from sys import exit
from optparse import OptionParser
from aviary.util import *
wsdl = 'file:/var/lib/condor/aviary/services/job/aviary-job.wsdl'
cmds = ['holdJob', 'releaseJob', 'removeJob', 'suspendJob', 'continueJob']
parser = build_basic_parser('Control job state remotely via SOAP.','http://localhost:39090/services/job/')
parser.add_option('--cmd', action="store", choices=(cmds), dest='cmd', help=str(cmds))
parser.add_option('--cproc', action="store", dest='cproc', help="a cluster.proc id like '1.0' or '5.3'")
(opts,args) = parser.parse_args()
if opts.cmd is None:
print 'One of these commands must be supplied', cmds
parser.print_help()
exit(1)
if opts.cproc is None:
print 'You must provide a cluster.proc job id'
parser.print_help()
exit(1)
client = create_suds_client(opts,wsdl,None)
opts.url += opts.cmd
client.set_options(location=opts.url)
jobId = client.factory.create('ns0:JobID')
jobId.job = opts.cproc
try:
func = getattr(client.service, opts.cmd, None)
if callable(func):
result = func(jobId,"test")
except Exception, e:
print "unable to access scheduler at: ", opts.url
print e
exit(1)
if result.code != "OK":
print result.code,":", result.text
else:
print opts.cmd, 'succeeded'
|
import six
from st2common.runners.base_action import Action
class PacksTransformationAction(Action):
def run(self, packs_status):
"""
:param packs_status: Result from packs.download action.
:type: packs_status: ``dict``
"""
packs = []
for pack_name, status in six.iteritems(packs_status):
if 'success' in status.lower():
packs.append(pack_name)
return packs
|
"""This module is deprecated. Please use `airflow.providers.amazon.aws.operators.sagemaker_tuning`."""
import warnings
from airflow.providers.amazon.aws.operators.sagemaker_tuning import SageMakerTuningOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.sagemaker_tuning`.",
DeprecationWarning, stacklevel=2
)
|
from AlgorithmImports import *
class CustomConsolidatorRegressionAlgorithm(QCAlgorithm):
'''Custom Consolidator Regression Algorithm shows some examples of how to build custom
consolidators in Python.'''
def Initialize(self):
self.SetStartDate(2013,10,4)
self.SetEndDate(2013,10,11)
self.SetCash(100000)
self.AddEquity("SPY", Resolution.Minute)
#Create 5 day QuoteBarConsolidator; set consolidated function; add to subscription manager
fiveDayConsolidator = QuoteBarConsolidator(timedelta(days=5))
fiveDayConsolidator.DataConsolidated += self.OnQuoteBarDataConsolidated
self.SubscriptionManager.AddConsolidator("SPY", fiveDayConsolidator)
#Create a 3:10PM custom quote bar consolidator
timedConsolidator = DailyTimeQuoteBarConsolidator(time(hour=15, minute=10))
timedConsolidator.DataConsolidated += self.OnQuoteBarDataConsolidated
self.SubscriptionManager.AddConsolidator("SPY", timedConsolidator)
#Create our entirely custom 2 day quote bar consolidator
self.customConsolidator = CustomQuoteBarConsolidator(timedelta(days=2))
self.customConsolidator.DataConsolidated += (self.OnQuoteBarDataConsolidated)
self.SubscriptionManager.AddConsolidator("SPY", self.customConsolidator)
#Create an indicator and register a consolidator to it
self.movingAverage = SimpleMovingAverage(5)
self.customConsolidator2 = CustomQuoteBarConsolidator(timedelta(hours=1))
self.RegisterIndicator("SPY", self.movingAverage, self.customConsolidator2)
def OnQuoteBarDataConsolidated(self, sender, bar):
'''Function assigned to be triggered by consolidators.
Designed to post debug messages to show how the examples work, including
which consolidator is posting, as well as its values.
If using an inherited class and not overwriting OnDataConsolidated
we expect to see the super C# class as the sender type.
Using sender.Period only works when all consolidators have a Period value.
'''
consolidatorInfo = str(type(sender)) + str(sender.Period)
self.Debug("Bar Type: " + consolidatorInfo)
self.Debug("Bar Range: " + bar.Time.ctime() + " - " + bar.EndTime.ctime())
self.Debug("Bar value: " + str(bar.Close))
def OnData(self, slice):
test = slice.get_Values()
if self.customConsolidator.Consolidated and slice.ContainsKey("SPY"):
data = slice['SPY']
if self.movingAverage.IsReady:
if data.Value > self.movingAverage.Current.Price:
self.SetHoldings("SPY", .5)
else :
self.SetHoldings("SPY", 0)
class DailyTimeQuoteBarConsolidator(QuoteBarConsolidator):
'''A custom QuoteBar consolidator that inherits from C# class QuoteBarConsolidator.
This class shows an example of building on top of an existing consolidator class, it is important
to note that this class can leverage the functions of QuoteBarConsolidator but its private fields
(_period, _workingbar, etc.) are separate from this Python. For that reason if we want Scan() to work
we must overwrite the function with our desired Scan function and trigger OnDataConsolidated().
For this particular example we implemented the scan method to trigger a consolidated bar
at closeTime everyday'''
def __init__(self, closeTime):
self.closeTime = closeTime
self.workingBar = None
def Update(self, data):
'''Updates this consolidator with the specified data'''
#If we don't have bar yet, create one
if self.workingBar is None:
self.workingBar = QuoteBar(data.Time,data.Symbol,data.Bid,data.LastBidSize,
data.Ask,data.LastAskSize)
#Update bar using QuoteBarConsolidator's AggregateBar()
self.AggregateBar(self.workingBar, data)
def Scan(self, time):
'''Scans this consolidator to see if it should emit a bar due yet'''
#If its our desired bar end time take the steps to
if time.hour == self.closeTime.hour and time.minute == self.closeTime.minute:
#Set end time
self.workingBar.EndTime = time
#Emit event using QuoteBarConsolidator's OnDataConsolidated()
self.OnDataConsolidated(self.workingBar)
#Reset the working bar to None
self.workingBar = None
class CustomQuoteBarConsolidator(PythonConsolidator):
'''A custom quote bar consolidator that inherits from PythonConsolidator and implements
the IDataConsolidator interface, it must implement all of IDataConsolidator. Reference
PythonConsolidator.cs and DataConsolidatorPythonWrapper.py for more information.
This class shows how to implement a consolidator from scratch in Python, this gives us more
freedom to determine the behavior of the consolidator but can't leverage any of the built in
functions of an inherited class.
For this example we implemented a Quotebar from scratch'''
def __init__(self, period):
#IDataConsolidator required vars for all consolidators
self.Consolidated = None #Most recently consolidated piece of data.
self.WorkingData = None #Data being currently consolidated
self.InputType = QuoteBar #The type consumed by this consolidator
self.OutputType = QuoteBar #The type produced by this consolidator
#Consolidator Variables
self.Period = period
def Update(self, data):
'''Updates this consolidator with the specified data'''
#If we don't have bar yet, create one
if self.WorkingData is None:
self.WorkingData = QuoteBar(data.Time,data.Symbol,data.Bid,data.LastBidSize,
data.Ask,data.LastAskSize,self.Period)
#Update bar using QuoteBar's update()
self.WorkingData.Update(data.Value, data.Bid.Close, data.Ask.Close, 0,
data.LastBidSize, data.LastAskSize)
def Scan(self, time):
'''Scans this consolidator to see if it should emit a bar due to time passing'''
if self.Period is not None and self.WorkingData is not None:
if time - self.WorkingData.Time >= self.Period:
#Trigger the event handler with a copy of self and the data
self.OnDataConsolidated(self, self.WorkingData)
#Set the most recent consolidated piece of data and then clear the workingData
self.Consolidated = self.WorkingData
self.WorkingData = None
|
from collections import OrderedDict
from typing import Dict, Generic, Optional, TypeVar
TCacheKey = TypeVar("TCacheKey")
TCacheValue = TypeVar("TCacheValue")
class LRUCache(Generic[TCacheKey, TCacheValue]):
def __init__(self, num: int):
# TODO: fix type after dropping py36
self.cache: Dict[TCacheKey, TCacheValue] = OrderedDict()
self.num = num
def get(self, key: TCacheKey) -> Optional[TCacheValue]:
if key not in self.cache:
return None
# noinspection PyUnresolvedReferences
self.cache.move_to_end(key)
return self.cache[key]
def set(self, key: TCacheKey, value: TCacheValue) -> None:
self.cache[key] = value
# noinspection PyUnresolvedReferences
self.cache.move_to_end(key)
if len(self.cache) > self.num:
# noinspection PyArgumentList
self.cache.popitem(last=False)
|
import config
from install_package import InstallPackage
import os
import re
import shutil
import sys
import utils
BASENAME = "ExposureRender"
GIT_REPO = "http://code.google.com/p/exposure-render"
dependencies = ['CMake', 'Qt', 'VTK_QT_58']
class ExposureRender(InstallPackage):
def __init__(self):
self.source_dir = os.path.join(config.archive_dir, BASENAME)
self.build_dir = os.path.join(config.build_dir, '%s' %
(BASENAME,))
self.inst_dir = os.path.join(config.inst_dir, BASENAME)
def get(self):
if os.path.exists(self.source_dir):
utils.output("Exposure Render already checked out, skipping step.")
else:
utils.goto_archive()
ret = os.system("hg clone %s %s" % (GIT_REPO, BASENAME))
if ret != 0:
utils.error("Could not clone Exposure Render repository. Fix and try again.")
os.chdir(self.source_dir)
ret = os.system("hg update") #TODO: is this required?
if ret != 0:
utils.error("Could not update Exposure Render. Fix and try again.")
def configure(self):
if os.path.exists(
os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
utils.output("Exposure Render build already configured.")
return
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
QT_MOC_EXECUTABLE = os.path.join(config.QT_BIN, 'moc.exe')
QT_QMAKE_EXECUTABLE = os.path.join(config.QT_BIN, 'qmake.exe')
QT_UIC_EXECUTABLE = os.path.join(config.QT_BIN, 'uic.exe')
#if not os.path.exists(QT_MOC_EXECUTABLE):
# print "Qt MOC executable not found, aborting!"
# return;
cmake_params = \
"-DBUILD_SHARED_LIBS=ON " \
"-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
"-DCMAKE_INSTALL_PREFIX=%s " \
"-DPYTHON_INCLUDE_DIR=%s " \
"-DPYTHON_LIBRARY=%s " \
"-DQT_MOC_EXECUTABLE=%s " \
"-DQT_QMAKE_EXECUTABLE=%s " \
"-DQT_UIC_EXECUTABLE=%s " \
"-DVTK_DIR:PATH=%s" \
% (self.inst_dir,
config.PYTHON_INCLUDE_PATH,
config.PYTHON_LIBRARY,
QT_MOC_EXECUTABLE,
QT_QMAKE_EXECUTABLE,
QT_UIC_EXECUTABLE,
config.VTK_DIR)
ret = utils.cmake_command(self.build_dir, os.path.join(self.source_dir, 'Source'), cmake_params)
if ret != 0:
utils.error("Could not configure Exposure Render. Fix and try again.")
def build(self):
posix_file = os.path.join(self.build_dir, config.BUILD_TARGET,
'libvtkErCorePython.so') #TODO: check whether this is the correct file to test on
nt_file = os.path.join(self.build_dir, config.BUILD_TARGET,
'vtkErCorePythonD.dll')
if utils.file_exists(posix_file, nt_file):
utils.output("Exposure Render already built. Skipping build step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('ErGUI.sln')
if ret != 0:
utils.error("Error building Exposure Render. Fix and try again.")
def install(self):
posix_file = os.path.join(self.inst_dir, 'bin/ErGUI')
nt_file = os.path.join(self.inst_dir, 'bin', 'ErGUI.exe')
if utils.file_exists(posix_file, nt_file):
utils.output("Exposure Render already installed. Skipping install step.")
else:
ret = utils.make_command('ErGUI.sln', install=True)
if ret != 0:
utils.error("Could not install Exposure Render. Fix and try again.")
def clean_build(self):
utils.output("Removing build and installation directories.")
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
self.clean_install()
def clean_install(self):
utils.output("Removing installation directory.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
def get_installed_version(self):
#TODO: implement
return ''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.